source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
atomic_action.py | from abc import ABCMeta, abstractmethod
from threading import Thread, Lock
class AtomicAction(object):
__metaclass__ = ABCMeta
OUTCOMES = ("succeeded", "preempted", "failed")
SUCCEEDED, PREEMPTED, FAILED = OUTCOMES
def __init__(self, name, params=None):
self.name = name
self.params = params if params is not None else {}
self.__mutex__ = Lock()
self.__event = None
self.__monitor_thread = None
def start(self, kb, external_kb, event):
self.__event = event
self.__t = Thread(target=self.run, args=(kb, external_kb))
self.__t.start()
@abstractmethod
def run(self, kb, external_kb):
return
def trigger_event(self):
print "I AM WAITING FOR", self.name
self.__t.join()
print "IT IS DONE", self.name
self.__event.set()
print "EVENT", self.__event, self.__event.is_set()
def monitor(self):
if self.__monitor_thread is None or not self.__monitor_thread.is_alive():
self.__monitor_thread = Thread(target=self.trigger_event)
self.__monitor_thread.start()
return self.__monitor_thread
@property
@abstractmethod
def succeeded(self):
return True
@property
@abstractmethod
def preempted(self):
return True
@property
@abstractmethod
def failed(self):
return True
|
app.py | import requests, os, json
from threading import Thread
from time import sleep, time
import random, os
# create the wallet
shard = random.randint(1, 5)
print("Using shard ", str(shard))
os.system("./dexmd mw wal.json " + str(shard))
validator = requests.get("http://35.211.241.218:5000/start_validator").text
validator = int(validator)
wallet = ""
if validator == 0:
wallet = json.loads(open('wal.json').read())
else:
wallet = json.loads(open('wallet'+str(validator)).read())
address = wallet["Address"]
timestamp = requests.get("http://35.211.241.218:5000/submit_addr", params={
"wallet": address
}).text
timestamp = int(timestamp)
def send_dexmpos():
sleep(timestamp-time()+30)
req = requests.get("http://35.211.241.218:5000/send_money", params={
"wallet": address
})
# wait for merkle proof to actually have the balance to send money
sleep(250)
if req == "Sent":
print("SENDING TO DEXMPOS")
os.system("./dexmd mkt wal.json DexmPoS 20 2")
else:
print("REQUEST NOT SENT")
thread = Thread(target=send_dexmpos)
thread.start()
# check if you are a validator
if validator != 0:
os.system("./dexmd sn wallet" + str(validator) + " " + str(timestamp) + " hackney true")
else:
# wait for all the other validator to start
sleep(60)
os.system("./dexmd sn wal.json " + str(timestamp))
thread.join()
|
gui.py | import external.PySimpleGUI as sg
from external.phue import Bridge
from data import images
import acc
import iracing
import threading
import json
import time
# Global Variables
SAVE_FILE_PATH = './phue-rf-save.json'
HUE_CONNECTION = {
'ip': '',
'lights': [],
'brightness': 255,
'sim': 'ACC'
}
STOP_SYNC = True
# Hue Colors
HUE_COLOR_NO_FLAG = None
HUE_COLOR_BLUE_FLAG = [0.1532, 0.0475]
HUE_COLOR_YELLOW_FLAG = [0.4787, 0.4681]
HUE_COLOR_BLACK_FLAG = None
HUE_COLOR_WHITE_FLAG = [0.3089, 0.3269]
HUE_COLOR_CHECKERED_FLAG = None
HUE_COLOR_PENALTY_FLAG = [0.6897, 0.3074]
HUE_COLOR_GREEN_FLAG = [0.17, 0.7]
HUE_COLOR_ORANGE_FLAG = [0.633, 0.3522]
# GUI Theme Customization
sg.theme('Black')
GUI_COLOR_NO_FLAG = '#000000'
GUI_COLOR_BLUE_FLAG = '#0D47A1'
GUI_COLOR_YELLOW_FLAG = '#FFEB3B'
GUI_COLOR_BLACK_FLAG = '#000000'
GUI_COLOR_WHITE_FLAG = '#ffffff'
GUI_COLOR_CHECKERED_FLAG = '#000000'
GUI_COLOR_PENALTY_FLAG = '#b71c1c'
GUI_COLOR_GREEN_FLAG = '#388E3C'
GUI_COLOR_ORANGE_FLAG = '#FF6F00'
def open_window():
global HUE_CONNECTION
disable_lights_menu = True
show_30_seconds_info = False
msg_bridge = ''
bridge: Bridge
light_options = []
load_hue_connection_from_file()
if bridge_connection_works():
disable_lights_menu = False
msg_bridge = 'Connection established.'
bridge = Bridge(HUE_CONNECTION['ip'])
light_options = get_lights_from_bridge(bridge)
show_30_seconds_info = False
else:
disable_lights_menu = True
msg_bridge = 'Connection failed.'
show_30_seconds_info = True
# GUI Frames
flag_frame_layout = [
[sg.Graph(canvas_size=(875, 100), graph_bottom_left=(0, 0), graph_top_right=(875, 100),
background_color=GUI_COLOR_NO_FLAG, key='CANVAS_FLAG')]
]
bridge_ip_frame_layout = [
[sg.Input(key='INPUT_IP', default_text=HUE_CONNECTION['ip'], font=('Helvetica', 24), size=(15, 1)),
sg.Button('Connect', key='BTN_BRIDGE', font=('Helvetica', 24))]
]
bridge_status_frame_layout = [
[sg.Text(size=(22, 1), key='MSG_BRIDGE', text=msg_bridge, font=('Helvetica', 24))]
]
light_menu_frame_layout = [
[sg.Listbox(values=light_options, key='MENU_LIGHT', disabled=disable_lights_menu,
default_values=HUE_CONNECTION['lights'], enable_events=True, font=('Helvetica', 24), size=(23, 4),
select_mode='multiple')]
]
brightness_menu_frame_layout = [
[sg.Slider(range=(1, 255), default_value=int(HUE_CONNECTION['brightness']), size=(20, 20),
orientation='horizontal', font=('Helvetica', 24), enable_events=True, key='SLIDER_BRIGHTNESS')]
]
sim_select_frame_layout = [
[sg.Radio('Assetto Corsa Competizione', 'SIM_SELECT', font=('Helvetica', 24), disabled=disable_lights_menu,
key='SIM_SELECT_ACC', enable_events=True, default=HUE_CONNECTION['sim'] == 'ACC'),
sg.Radio('iRacing', 'SIM_SELECT', font=('Helvetica', 24), size=(21, 1), disabled=disable_lights_menu,
key='SIM_SELECT_IRACING', enable_events=True, default=HUE_CONNECTION['sim'] == 'iRacing')]
]
acc_color_test_frame_layout = [
[sg.Button('No Flag', key='BTN_ACC_NO_FLAG', button_color=('#ffffff', GUI_COLOR_NO_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Blue Flag', key='BTN_ACC_BLUE_FLAG', button_color=('#ffffff', GUI_COLOR_BLUE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Yellow Flag', key='BTN_ACC_YELLOW_FLAG', button_color=('#000000', GUI_COLOR_YELLOW_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Black Flag', key='BTN_ACC_BLACK_FLAG', button_color=('#ffffff', GUI_COLOR_BLACK_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('White Flag', key='BTN_ACC_WHITE_FLAG', button_color=('#000000', GUI_COLOR_WHITE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24))],
[sg.Button('Checkered Flag', key='BTN_ACC_CHECKERED_FLAG', button_color=('#ffffff', GUI_COLOR_CHECKERED_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Penalty Flag', key='BTN_ACC_PENALTY_FLAG', button_color=('#ffffff', GUI_COLOR_PENALTY_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Green Flag', key='BTN_ACC_GREEN_FLAG', button_color=('#ffffff', GUI_COLOR_GREEN_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Orange Flag', key='BTN_ACC_ORANGE_FLAG', button_color=('#ffffff', GUI_COLOR_ORANGE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24))]
]
iracing_color_test_frame_layout = [
[sg.Button('No Flag', key='BTN_IRACING_NO_FLAG', button_color=('#ffffff', GUI_COLOR_NO_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Blue Flag', key='BTN_IRACING_BLUE_FLAG', button_color=('#ffffff', GUI_COLOR_BLUE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Yellow Flag', key='BTN_IRACING_YELLOW_FLAG', button_color=('#000000', GUI_COLOR_YELLOW_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Black Flag', key='BTN_IRACING_BLACK_FLAG', button_color=('#ffffff', GUI_COLOR_BLACK_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('White Flag', key='BTN_IRACING_WHITE_FLAG', button_color=('#000000', GUI_COLOR_WHITE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24))],
[sg.Button('Checkered Flag', key='BTN_IRACING_CHEQUERED_FLAG',
button_color=('#ffffff', GUI_COLOR_CHECKERED_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Red Flag', key='BTN_IRACING_RED_FLAG', button_color=('#ffffff', GUI_COLOR_PENALTY_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Green Flag', key='BTN_IRACING_GREEN_FLAG', button_color=('#ffffff', GUI_COLOR_GREEN_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Meatball Flag', key='BTN_IRACING_MEATBALL_FLAG', button_color=('#ffffff', GUI_COLOR_ORANGE_FLAG),
disabled=disable_lights_menu, font=('Helvetica', 24))]
]
sync_controls_frame_layout = [
[sg.Button('Start', key='BTN_SYNC_START', disabled=disable_lights_menu, font=('Helvetica', 24)),
sg.Button('Stop', key='BTN_SYNC_STOP', disabled=disable_lights_menu, font=('Helvetica', 24))]
]
sync_status_frame_layout = [
[sg.Text(size=(34, 1), key='MSG_SYNC_STATUS', text='Stopped.', font=('Helvetica', 24))]
]
# Window Layout
layout = [
[sg.Frame('flag', flag_frame_layout, font=('Helvetica', 10), title_color='#ffffff')],
[sg.Frame('bridge ip', bridge_ip_frame_layout, font=('Helvetica', 10), title_color='#ffffff'),
sg.Frame('bridge status', bridge_status_frame_layout, font=('Helvetica', 10), title_color='#ffffff')],
[sg.Frame('lights', light_menu_frame_layout, font=('Helvetica', 10), title_color='#ffffff'),
sg.Frame('brightness', brightness_menu_frame_layout, font=('Helvetica', 10), title_color='#ffffff')],
[sg.Frame('sim', sim_select_frame_layout, font=('Helvetica', 10), title_color='#ffffff')],
[sg.pin(sg.Frame('flag test', acc_color_test_frame_layout, font=('Helvetica', 10), title_color='#ffffff',
visible=HUE_CONNECTION['sim'] == 'ACC', key='FRAME_ACC_FLAGS'))],
[sg.pin(sg.Frame('flag test', iracing_color_test_frame_layout, font=('Helvetica', 10), title_color='#ffffff',
visible=HUE_CONNECTION['sim'] == 'iRacing', key='FRAME_IRACING_FLAGS'))],
[sg.Frame('live sync', sync_controls_frame_layout, font=('Helvetica', 10), title_color='#ffffff'),
sg.Frame('sync status', sync_status_frame_layout, font=('Helvetica', 10), title_color='#ffffff')],
[sg.Text(
'If you are connecting this app to your Bridge for the first time, you have to press the Pairing Button on your Bridge and then connect within 30 seconds.',
size=(80, 2), text_color='#b71c1c', key='MSG_30_SECONDS', visible=show_30_seconds_info)],
]
window = sg.Window('phue-racing-flags', layout, icon=images, font='Helvetica', finalize=True)
while True:
event, values = window.read()
if event == 'BTN_BRIDGE':
HUE_CONNECTION['ip'] = values['INPUT_IP']
window['MENU_LIGHT'].update([])
if (bridge_connection_works()):
save_hue_connection_to_file()
bridge = Bridge(HUE_CONNECTION['ip'])
enable_interface(bridge, window)
else:
disable_interface(window)
if event == 'MENU_LIGHT':
HUE_CONNECTION['lights'] = values['MENU_LIGHT']
save_hue_connection_to_file()
if event == 'SLIDER_BRIGHTNESS':
HUE_CONNECTION['brightness'] = values['SLIDER_BRIGHTNESS']
save_hue_connection_to_file()
if event == 'SIM_SELECT_ACC':
stop_sync()
HUE_CONNECTION['sim'] = 'ACC'
window['FRAME_ACC_FLAGS'].update(visible=True)
window['FRAME_IRACING_FLAGS'].update(visible=False)
save_hue_connection_to_file()
if event == 'SIM_SELECT_IRACING':
stop_sync()
HUE_CONNECTION['sim'] = 'iRacing'
window['FRAME_ACC_FLAGS'].update(visible=False)
window['FRAME_IRACING_FLAGS'].update(visible=True)
save_hue_connection_to_file()
# ACC Flag Buttons
if event == 'BTN_ACC_NO_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_NO_FLAG, bridge, window)
if event == 'BTN_ACC_BLUE_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_BLUE_FLAG, bridge, window)
if event == 'BTN_ACC_YELLOW_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_YELLOW_FLAG, bridge, window)
if event == 'BTN_ACC_BLACK_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_BLACK_FLAG, bridge, window)
if event == 'BTN_ACC_WHITE_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_WHITE_FLAG, bridge, window)
if event == 'BTN_ACC_CHECKERED_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_CHECKERED_FLAG, bridge, window)
if event == 'BTN_ACC_PENALTY_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_PENALTY_FLAG, bridge, window)
if event == 'BTN_ACC_GREEN_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_GREEN_FLAG, bridge, window)
if event == 'BTN_ACC_ORANGE_FLAG':
raise_acc_flag(acc.ACCFlagType.ACC_ORANGE_FLAG, bridge, window)
# iRacing Flag Buttons
if event == 'BTN_IRACING_NO_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_NO_FLAG, bridge, window)
if event == 'BTN_IRACING_BLUE_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_BLUE_FLAG, bridge, window)
if event == 'BTN_IRACING_YELLOW_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_YELLOW_FLAG, bridge, window)
if event == 'BTN_IRACING_BLACK_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_BLACK_FLAG, bridge, window)
if event == 'BTN_IRACING_WHITE_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_WHITE_FLAG, bridge, window)
if event == 'BTN_IRACING_CHEQUERED_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_CHEQUERED_FLAG, bridge, window)
if event == 'BTN_IRACING_RED_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_RED_FLAG, bridge, window)
if event == 'BTN_IRACING_GREEN_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_GREEN_FLAG, bridge, window)
if event == 'BTN_IRACING_MEATBALL_FLAG':
raise_iracing_flag(iracing.IRacingGUIFlagType.IRACING_MEATBALL_FLAG, bridge, window)
if event == 'BTN_SYNC_START':
window['MSG_SYNC_STATUS'].update('Running.')
thread = threading.Thread(target=start_sync, args=(bridge, window,))
thread.start()
if event == 'BTN_SYNC_STOP':
stop_sync()
if event == sg.WINDOW_CLOSED:
stop_sync()
window.close()
break
def enable_interface(bridge: Bridge, window: sg.Window):
window['MSG_BRIDGE'].update('Connection established.')
window['MENU_LIGHT'].update(disabled=False)
# ACC Flag Buttons
window['BTN_ACC_NO_FLAG'].update(disabled=False)
window['BTN_ACC_BLUE_FLAG'].update(disabled=False)
window['BTN_ACC_YELLOW_FLAG'].update(disabled=False)
window['BTN_ACC_BLACK_FLAG'].update(disabled=False)
window['BTN_ACC_WHITE_FLAG'].update(disabled=False)
window['BTN_ACC_CHECKERED_FLAG'].update(disabled=False)
window['BTN_ACC_PENALTY_FLAG'].update(disabled=False)
window['BTN_ACC_GREEN_FLAG'].update(disabled=False)
window['BTN_ACC_ORANGE_FLAG'].update(disabled=False)
# iRacing Flag Buttons
window['BTN_IRACING_NO_FLAG'].update(disabled=False)
window['BTN_IRACING_BLUE_FLAG'].update(disabled=False)
window['BTN_IRACING_YELLOW_FLAG'].update(disabled=False)
window['BTN_IRACING_BLACK_FLAG'].update(disabled=False)
window['BTN_IRACING_WHITE_FLAG'].update(disabled=False)
window['BTN_IRACING_CHEQUERED_FLAG'].update(disabled=False)
window['BTN_IRACING_RED_FLAG'].update(disabled=False)
window['BTN_IRACING_GREEN_FLAG'].update(disabled=False)
window['BTN_IRACING_MEATBALL_FLAG'].update(disabled=False)
window['BTN_SYNC_START'].update(disabled=False)
window['BTN_SYNC_STOP'].update(disabled=False)
window['SIM_SELECT_ACC'].update(disabled=False)
window['SIM_SELECT_IRACING'].update(disabled=False)
window['MENU_LIGHT'].update(values=get_lights_from_bridge(bridge))
window['MSG_30_SECONDS'].update(visible=False)
def disable_interface(window: sg.Window):
window['MSG_BRIDGE'].update('Connection failed.')
window['MENU_LIGHT'].update(disabled=True)
# ACC Flag Buttons
window['BTN_ACC_NO_FLAG'].update(disabled=True)
window['BTN_ACC_BLUE_FLAG'].update(disabled=True)
window['BTN_ACC_YELLOW_FLAG'].update(disabled=True)
window['BTN_ACC_BLACK_FLAG'].update(disabled=True)
window['BTN_ACC_WHITE_FLAG'].update(disabled=True)
window['BTN_ACC_CHECKERED_FLAG'].update(disabled=True)
window['BTN_ACC_CHECKERED_FLAG'].update(disabled=True)
window['BTN_ACC_PENALTY_FLAG'].update(disabled=True)
window['BTN_ACC_GREEN_FLAG'].update(disabled=True)
window['BTN_ACC_ORANGE_FLAG'].update(disabled=True)
# iRacing Flag Buttons
window['BTN_IRACING_NO_FLAG'].update(disabled=True)
window['BTN_IRACING_BLUE_FLAG'].update(disabled=True)
window['BTN_IRACING_YELLOW_FLAG'].update(disabled=True)
window['BTN_IRACING_BLACK_FLAG'].update(disabled=True)
window['BTN_IRACING_WHITE_FLAG'].update(disabled=True)
window['BTN_IRACING_CHEQUERED_FLAG'].update(disabled=True)
window['BTN_IRACING_RED_FLAG'].update(disabled=True)
window['BTN_IRACING_GREEN_FLAG'].update(disabled=True)
window['BTN_IRACING_MEATBALL_FLAG'].update(disabled=True)
window['BTN_SYNC_START'].update(disabled=True)
window['BTN_SYNC_STOP'].update(disabled=True)
window['SIM_SELECT_ACC'].update(disabled=True)
window['SIM_SELECT_IRACING'].update(disabled=True)
window['MENU_LIGHT'].update(values=[])
window['MSG_30_SECONDS'].update(visible=True)
def load_hue_connection_from_file():
try:
save_file = open(SAVE_FILE_PATH, 'r')
data = json.load(save_file)
HUE_CONNECTION['ip'] = data['ip']
HUE_CONNECTION['lights'] = data['lights']
HUE_CONNECTION['brightness'] = data['brightness']
HUE_CONNECTION['sim'] = data['sim'] or 'ACC'
except (FileNotFoundError, KeyError) as error:
print(error)
HUE_CONNECTION['ip'] = ''
HUE_CONNECTION['lights'] = ''
HUE_CONNECTION['brightness'] = 255
HUE_CONNECTION['sim'] = 'ACC'
def save_hue_connection_to_file():
save_file = open(SAVE_FILE_PATH, 'w')
json.dump(HUE_CONNECTION, save_file)
def bridge_connection_works() -> bool:
if HUE_CONNECTION['ip'] == '':
return False
else:
try:
Bridge(HUE_CONNECTION['ip'])
return True
except:
return False
def get_lights_from_bridge(bridge: Bridge) -> []:
light_options = []
for light in bridge.get_light_objects():
light_options.append(light.name)
return light_options
def sync_acc_color(bridge: Bridge, window: sg.Window):
global HUE_CONNECTION
flag = acc.get_flag()
raise_acc_flag(flag, bridge, window)
def sync_iracing_color(bridge: Bridge, window: sg.Window):
global HUE_CONNECTION
flag = iracing.get_flag()
raise_iracing_flag(flag, bridge, window)
def raise_acc_flag(flag: acc.ACCFlagType, bridge: Bridge, window: sg.Window):
if flag == acc.ACCFlagType.ACC_NO_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_NO_FLAG)
if flag == acc.ACCFlagType.ACC_BLUE_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_BLUE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_BLUE_FLAG)
if flag == acc.ACCFlagType.ACC_YELLOW_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_YELLOW_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_YELLOW_FLAG)
if flag == acc.ACCFlagType.ACC_BLACK_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_BLACK_FLAG)
if flag == acc.ACCFlagType.ACC_WHITE_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_WHITE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_WHITE_FLAG)
if flag == acc.ACCFlagType.ACC_CHECKERED_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_CHECKERED_FLAG)
if flag == acc.ACCFlagType.ACC_PENALTY_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_PENALTY_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_PENALTY_FLAG)
if flag == acc.ACCFlagType.ACC_GREEN_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_GREEN_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_GREEN_FLAG)
if flag == acc.ACCFlagType.ACC_ORANGE_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_ORANGE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_ORANGE_FLAG)
def raise_iracing_flag(flag: iracing.IRacingGUIFlagType, bridge: Bridge, window: sg.Window):
if flag == iracing.IRacingGUIFlagType.IRACING_NO_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_NO_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_BLUE_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_BLUE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_BLUE_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_YELLOW_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_YELLOW_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_YELLOW_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_BLACK_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_BLACK_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_WHITE_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_WHITE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_WHITE_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_CHEQUERED_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': False})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_CHECKERED_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_RED_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_PENALTY_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_PENALTY_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_GREEN_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_GREEN_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_GREEN_FLAG)
if flag == iracing.IRacingGUIFlagType.IRACING_MEATBALL_FLAG:
for light in HUE_CONNECTION['lights']:
bridge.set_light(light, {'transitiontime': 0, 'on': True, 'bri': int(HUE_CONNECTION['brightness']),
'xy': HUE_COLOR_ORANGE_FLAG})
window['CANVAS_FLAG'].update(background_color=GUI_COLOR_ORANGE_FLAG)
def start_sync(bridge: Bridge, window: sg.Window):
global STOP_SYNC
if STOP_SYNC:
STOP_SYNC = False
while True:
if HUE_CONNECTION['sim'] == 'ACC':
sync_acc_color(bridge, window)
time.sleep(0.1)
if HUE_CONNECTION['sim'] == 'iRacing':
sync_iracing_color(bridge, window)
time.sleep(0.1)
if STOP_SYNC:
window['MSG_SYNC_STATUS'].update('Stopped.')
break
def stop_sync():
global STOP_SYNC
STOP_SYNC = True
open_window()
|
qualys.py | from requests.auth import HTTPBasicAuth
import requests
import xmltodict, json
import ast
import time
from multiprocessing import Process
import boto3
import os
auth = HTTPBasicAuth(username, password)
option_title = option
ip_network_id = network_id
iscanner_name = scanner_name
mis_sqs_secret = os.environ.get('MIS_SQS_SECRET')
mis_sqs_key = os.environ.get('MIS_SQS_KEY')
sqs_queue_name = queue_name
session_sqs = boto3.Session(aws_access_key_id = mis_sqs_key, aws_secret_access_key = mis_sqs_secret, region_name = 'us-east-1')
sqs = session_sqs.resource('sqs')
queue = sqs.get_queue_by_name(QueueName = sqs_queue_name)
bucket_name = bucket
mis_s3_secret = os.environ.get('MIS_S3_SECRET')
mis_s3_key = os.environ.get('MIS_S3_KEY')
session_s3 = boto3.Session(aws_access_key_id = mis_s3_key, aws_secret_access_key = mis_s3_secret, region_name = 'us-east-1')
s3 = session_s3.resource('s3')
bucket = s3.Bucket(bucket_name)
def qualys_scan(ip, project, version,message):
try:
index = project + version
print "Launch scan for " + index
#launch scan
url_scan = "https://qualysapi.qualys.com/api/2.0/fo/scan/"
params_scan = {'action': 'launch', 'option_title': option_title, ' ip_network_id': ip_network_id, 'iscanner_name': iscanner_name}
params_scan['ip'] = ip
params_scan['scan_title'] = "scan_" + index
header_scan = {'X-Requested-With': 'Curl'}
scan = requests.post(url_scan, auth = auth, params = params_scan, headers = header_scan)
scan_response = json.loads(json.dumps(xmltodict.parse(scan.text)))
print scan_response['SIMPLE_RETURN']['RESPONSE']['ITEM_LIST']['ITEM']
for item in scan_response['SIMPLE_RETURN']['RESPONSE']['ITEM_LIST']['ITEM']:
if item['KEY'] == 'REFERENCE':
scan_ID = item['VALUE']
print scan_ID
#get scan status and wait it finish
time.sleep(2)
params_scan_status = {'action': 'list'}
params_scan_status['scan_ref'] = scan_ID
scan_status_cmd = requests.get(url_scan, auth = auth, params = params_scan_status, headers = header_scan)
scan_status_response = json.loads(json.dumps(xmltodict.parse(scan_status_cmd.text)))
scan_status = scan_status_response['SCAN_LIST_OUTPUT']['RESPONSE']['SCAN_LIST']['SCAN']['STATUS']['STATE']
print index + scan_status
while scan_status != 'Finished':
time.sleep(60)
scan_status_cmd = requests.get(url_scan, auth = auth, params = params_scan_status, headers = header_scan)
scan_status_response = json.loads(json.dumps(xmltodict.parse(scan_status_cmd.text)))
scan_status = scan_status_response['SCAN_LIST_OUTPUT']['RESPONSE']['SCAN_LIST']['SCAN']['STATUS']['STATE']
print index + scan_status
print index + " scan finished, starting to get reports..."
#when scan finished, launch report, here we need pdf and csv these two formats
#csv
print "Waiting CSV report for " + index
url_report = "https://qualysapi.qualys.com/api/2.0/fo/report/"
params_report_csv = {'action': 'launch', 'template_id': template_id, 'report_type': 'Scan', 'output_format': 'csv'}
params_report_csv['report_refs'] = scan_ID
params_report_csv['report_title'] = "report_" + index
header_report = {'X-Requested-With': 'Curl Sample'}
report_csv = requests.post(url_report, auth = auth, params = params_report_csv, headers = header_report)
report_csv_response = json.loads(json.dumps(xmltodict.parse(report_csv.text)))
csv_ID = report_csv_response['SIMPLE_RETURN']['RESPONSE']['ITEM_LIST']['ITEM']['VALUE']
#wait csv report finish
params_query_csv = {'action': 'list'}
params_query_csv['id'] = csv_ID
query_csv = requests.get(url_report, auth = auth, params = params_query_csv, headers = header_report)
query_csv_response = json.loads(json.dumps(xmltodict.parse(query_csv.text)))
while True:
try:
query_csv = requests.get(url_report, auth = auth, params = params_query_csv, headers = header_report)
query_csv_response = json.loads(json.dumps(xmltodict.parse(query_csv.text)))
status_csv = query_csv_response['REPORT_LIST_OUTPUT']['RESPONSE']['REPORT_LIST']['REPORT']['STATUS']['STATE']
break
except Exception as e:
time.sleep(2)
print index + status_csv
while status_csv != 'Finished':
time.sleep(10)
query_csv = requests.get(url_report, auth = auth, params = params_query_csv, headers = header_report)
query_csv_response = json.loads(json.dumps(xmltodict.parse(query_csv.text)))
status_csv = query_csv_response['REPORT_LIST_OUTPUT']['RESPONSE']['REPORT_LIST']['REPORT']['STATUS']['STATE']
print index + status_csv
#when csv rpeort finished, download it
params_download_csv = {'action': 'fetch'}
params_download_csv['id'] = csv_ID
download_csv = requests.get(url_report, auth = auth, params = params_download_csv, headers = header_report)
report_csv = download_csv.content
f_csv = open('report_' + index + '.csv', 'wb')
f_csv.write(report_csv)
f_csv.close()
print "CSV for " + index + " downloaded!!"
#PDF
print "Waiting PDF report for " + index
params_report_pdf = {'action': 'launch', 'template_id': template_id, 'report_type': 'Scan', 'output_format': 'pdf'}
params_report_pdf['report_refs'] = scan_ID
params_report_pdf['report_title'] = "report_" + index
header_report = {'X-Requested-With': 'Curl Sample'}
report_pdf = requests.post(url_report, auth = auth, params = params_report_pdf, headers = header_report)
report_pdf_response = json.loads(json.dumps(xmltodict.parse(report_pdf.text)))
pdf_ID = report_pdf_response['SIMPLE_RETURN']['RESPONSE']['ITEM_LIST']['ITEM']['VALUE']
#wait pdf report finish
time.sleep(2)
params_query_pdf = {'action': 'list'}
params_query_pdf['id'] = pdf_ID
query_pdf = requests.get(url_report, auth = auth, params = params_query_pdf, headers = header_report)
query_pdf_response = json.loads(json.dumps(xmltodict.parse(query_pdf.text)))
while True:
try:
query_pdf = requests.get(url_report, auth = auth, params = params_query_pdf, headers = header_report)
query_pdf_response = json.loads(json.dumps(xmltodict.parse(query_pdf.text)))
status_pdf = query_pdf_response['REPORT_LIST_OUTPUT']['RESPONSE']['REPORT_LIST']['REPORT']['STATUS']['STATE']
break
except Exception as e:
time.sleep(2)
print index + status_pdf
while status_pdf != 'Finished':
time.sleep(10)
query_pdf = requests.get(url_report, auth = auth, params = params_query_pdf, headers = header_report)
query_pdf_response = json.loads(json.dumps(xmltodict.parse(query_pdf.text)))
status_pdf = query_pdf_response['REPORT_LIST_OUTPUT']['RESPONSE']['REPORT_LIST']['REPORT']['STATUS']['STATE']
print index + status_pdf
#when pdf rpeort finished, download it
params_download_pdf = {'action': 'fetch'}
params_download_pdf['id'] = pdf_ID
download_pdf = requests.get(url_report, auth = auth, params = params_download_pdf, headers = header_report)
report_pdf = download_pdf.content
f_pdf = open('report_' + index + '.pdf', 'wb')
f_pdf.write(report_pdf)
f_pdf.close()
print "PDF for " + index + " downloaded!!"
#once two reports finished, upload to S3
file_csv = "report_" + index + ".csv"
file_pdf = "report_" + index + ".pdf"
project_name = index.split('_')[0]
version = index.split('_')[1]
directory = project_name + "/" + version + "/"
bucket.upload_file(file_csv, directory + file_csv)
bucket.upload_file(file_pdf, directory + file_pdf)
#delete the message and return a new msg to main portal
os.remove("report_" + index + ".csv")
os.remove("report_" + index + ".pdf")
message.delete()
msg_body = "{\"projectname\": \"" + project + "\", \"releaseno\": " + version + ", \"path_for_report\": \"" + directory + file_csv +"\"}"
response = queue.send_messages(
Entries=[
{
'Id': 'string',
'MessageBody': msg_body,
'DelaySeconds': 0,
'MessageAttributes': {
'Title': {
'StringValue': 'qualys_response',
'DataType': 'String'
}
},
'MessageDeduplicationId': 'string',
'MessageGroupId': 'qualys'
},
]
)
print response
except Exception as e:
print str(e)
msg_body = "{\"projectname\": \"" + project + "\", \"releaseno\": " + version + ", \"path_for_report\": \"null\", \"error\": \"" + str(e) + "\"}"
response = queue.send_messages(
Entries=[
{
'Id': 'string',
'MessageBody': msg_body,
'DelaySeconds': 0,
'MessageAttributes': {
'Title': {
'StringValue': 'qualys_response',
'DataType': 'String'
}
},
'MessageDeduplicationId': 'string',
'MessageGroupId': 'qualys'
},
]
)
print response
if __name__ == "__main__":
while True:
messages = queue.receive_messages(MaxNumberOfMessages = 10, MessageAttributeNames=['Title'])
for message in list(set(messages)):
print message.body
if message.message_attributes is not None and message.message_attributes.get('Title').get('StringValue') == "qualys":
data = ast.literal_eval(message.body)
ips = data['params']
project_name = data['projectname']
version = str(data['releaseno'])
p = Process(target = qualys_scan, args = (ips, project_name, version, message))
p.start()
else:
time.sleep(2)
print "no msg"
|
apschedule_demo.py | # 定时任务demo
from subprocess import call
from apscheduler.schedulers.background import BackgroundScheduler
import time
import os
from multiprocessing import Process, Queue
# 这个提示虽然可以用,但不够好用, 还会消失..
def mac_time():
cur_format_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
time_title = "报警时间"
content = cur_format_time
# display notification "Lorem ipsum dolor sit amet" with title "Title"
cmd = 'display notification \"' + content + '\" with title \"' + time_title + '\"'
print(cmd)
call(["osascript", "-e", cmd])
# 看得出background_scheduler 自己维护循环,虽然够用,但不是那么好,总感觉有点奇怪, 但是这个也许可以写在 单独的进程里,就不干扰我做事了
def run_background_scheduler():
scheduler = BackgroundScheduler()
# scheduler.add_job(mac_time, 'interval', minutes=15, start_date='2020-07-19 18:15:00', end_date='2020-07-19 21:00:00')
scheduler.add_job(mac_time, 'interval', minutes=2)
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
if __name__ == "__main__":
process = Process(target=run_background_scheduler())
process.start()
|
tornado_app.py | #*********************************************************************
# ESP8266ARS: Python project for WebTornadoDS
#
# Author(s): Daniel Roldan <droldan@cells.es>
#
# Copyright (C) 2017, CELLS / ALBA Synchrotron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import fandango as fn
import json
import os
import threading
import tornado
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler
class MainHandler(RequestHandler):
def initialize(self, path):
self.path= path
def get(self, url=None):
to_render = "index.html"
if url != '':
if not url.endswith('json') and not url.endswith('html'):
if not url.endswith('/'):
self.redirect(url+'/')
to_render = os.path.join(self.path, url, 'index.html')
else:
to_render = os.path.join(self.path, url)
print "Rendering %s" % to_render
self.render(to_render)
class TangoDSSocketHandler(WebSocketHandler):
waiters = set()
def initialize(self, parent):
self._parent = parent
def open(self):
# Add client to list
self.waiters.add(self)
client = self.request.remote_ip
print "Socket open with client " + str(client)
print "Main data waiters list lenght %s" % len(self.waiters)
self._parent.newClient(self)
def on_close(self):
# Remove client form clients list
client = self.request.remote_ip
self.waiters.remove(self)
print "Socket closed client " + str(client)
print "Main waiters list lenght %s" % len(self.waiters)
def unicode2python(self, obj):
if fn.isMapping(obj):
n = dict(self.unicode2python(t) for t in obj.items())
elif fn.isSequence(obj):
n = list(self.unicode2python(t) for t in obj)
elif fn.isString(obj):
n = str(obj)
else:
n = obj
return n
def on_message(self, jsondata):
# Process message form client
client = self.request.remote_ip
try:
jsondata = json.loads(jsondata, object_hook=self.unicode2python)
jsondata = dict(jsondata)
# SaveNewConfig is called whn the Web page save or delete a new
# config
if 'SaveNewConfig' in jsondata.keys():
conf = jsondata['SaveNewConfig']
conf = json.dumps(conf)
# Call to DS in order to save the config in the properties
self._parent.setStructureConfig(conf)
print "Client=", client, " DATA=", conf
except Exception, e:
response = "IP Client:" + client + " ERROR!!: Invalid Command!! " + str(
e)
print response
class TornadoManagement(object):
def __init__(self, port=8888, parent=None,):
self._webport = port
path = os.path.dirname(os.path.abspath(__file__))
try:
webfilespath = self.parent.WebFilesPath
if webfilespath != "":
path = webfilespath
except:
pass
self.template_path = path + "/templates"
self.static_path = path + "/static"
self.Json_static = path + "/JSONfiles/"
self.handlers = [
#(r"/index.html", MainHandler),
(r"/service/*", TangoDSSocketHandler,
{"parent": parent}),
# (r"/*/(.*json*)", tornado.web.StaticFileHandler,
# {'path': self.Json_static}),
(r"/(.*)", MainHandler, {'path': self.Json_static}),]
self.server = None
self.started = False
def start(self):
# Starting Tornado async
self.thread = threading.Thread(target=self._startTornado)
self.thread.daemon = True
self.thread.start()
def _startTornado(self):
application = Application(self.handlers,
static_path=self.static_path,
template_path=self.template_path,
debug=True)
# Created a simple gHHTPServer to manage the server
# and close it on Stop, else the port is in use
from tornado.httpserver import HTTPServer
self.started = False
try:
self.server = HTTPServer(application)
self.started = True
self.server.listen(self._webport)
tornado.ioloop.IOLoop.current().start()
except Exception as e:
print e
def stop(self):
# Close the HTPServer use the port
if self.started:
self.server.stop()
# stop the tornado Service
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
self.thread.join()
self.started = False
def isRunning(self):
return self.started
|
server.py | #!/usr/bin/env python3
import socket
import os
import sys
import traceback
import random
import time
from threading import Thread
PORT = int(os.environ.get("PORT", 6969))
flag = open('flag.txt', 'r').readline()
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):
RANDOM_NUMBER = random.randint(0, 1000000)
tries = 0
conn.sendall(b"I'm am guessing a number between 0 and 1000000\n")
conn.sendall(b"You have 5 seconds and 30 tries to guess it correctly :)\n")
max_time = 5
start = time.time()
while True:
input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)
siz = sys.getsizeof(input_from_client_bytes)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
try:
guess = int(input_from_client_bytes.decode("utf8").rstrip())
except ValueError:
conn.sendall(b"That's not an int!\n")
break
else:
tries += 1
if guess < RANDOM_NUMBER:
conn.sendall(f"Your guess is lesser than actual number; {30 - tries} tries left\n".encode())
elif guess > RANDOM_NUMBER:
conn.sendall(f"Your guess is greater than the actual number; {30 - tries} tries left\n".encode())
else:
conn.sendall(f"\n Congratulations! Here is the flag\n{flag}\n".encode())
break
if time.time()-start > max_time:
conn.sendall(f"\nTime Limit Exceeded !!!!\n".encode())
break
if tries > 30:
conn.sendall(f"\n No. of tries Exceeded \n")
break
conn.sendall(b"Bye! \n")
conn.close()
print('Connection ' + ip + ':' + port + " ended")
def start_server():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket created')
try:
soc.bind(("0.0.0.0", PORT))
print('Socket bind complete')
except socket.error as msg:
print('Bind failed. Error : ' + str(sys.exc_info()))
sys.exit()
soc.listen(10)
print('Socket now listening')
while True:
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
print('Accepting connection from ' + ip + ':' + port)
try:
Thread(target=client_thread, args=(conn, ip, port)).start()
except:
print("Terible error!")
traceback.print_exc()
soc.close()
start_server()
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import _thread
import importlib.machinery
import importlib.util
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import _testmultiphase
except ImportError:
_testmultiphase = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def decode_stderr(err):
return err.decode('utf-8', 'replace').replace('\r', '')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@support.requires_subprocess()
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned NULL without setting an exception\n'
r'Python runtime state: initialized\n'
r'SystemError: <built-in function return_null_without_error> '
r'returned NULL without setting an exception\n'
r'\n'
r'Current thread.*:\n'
r' File .*", line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an exception')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned a result with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError\n'
r'\n'
r'The above exception was the direct cause '
r'of the following exception:\n'
r'\n'
r'SystemError: <built-in '
r'function return_result_with_error> '
r'returned a result with an exception set\n'
r'\n'
r'Current thread.*:\n'
r' File .*, line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an exception set')
def test_getitem_with_error(self):
# Test _Py_CheckSlotResult(). Raise an exception and then calls
# PyObject_GetItem(): check that the assertion catches the bug.
# PyObject_GetItem() must not be called with an exception set.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.getitem_with_error({1: 2}, 1)
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
if 'SystemError: ' not in err:
self.assertRegex(err,
r'Fatal Python error: _Py_CheckSlotResult: '
r'Slot __getitem__ of type dict succeeded '
r'with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError: bug\n'
r'\n'
r'Current thread .* \(most recent call first\):\n'
r' File .*, line 6 in <module>\n'
r'\n'
r'Extension modules: _testcapi \(total: 1\)\n')
else:
# Python built with NDEBUG macro defined:
# test _Py_CheckFunctionResult() instead.
self.assertIn('returned a result with an exception set', err)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
lines = out.splitlines()
for i, line in enumerate(lines, 1):
self.assertIn(b'MemoryError', out)
*_, count = line.split(b' ')
count = int(count)
self.assertLessEqual(count, i*5)
self.assertGreaterEqual(count, i*5-2)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
def check_fatal_error(self, code, expected, not_expected=()):
with support.SuppressCrashReport():
rc, out, err = assert_python_failure('-sSI', '-c', code)
err = decode_stderr(err)
self.assertIn('Fatal Python error: test_fatal_error: MESSAGE\n',
err)
match = re.search(r'^Extension modules:(.*) \(total: ([0-9]+)\)$',
err, re.MULTILINE)
if not match:
self.fail(f"Cannot find 'Extension modules:' in {err!r}")
modules = set(match.group(1).strip().split(', '))
total = int(match.group(2))
for name in expected:
self.assertIn(name, modules)
for name in not_expected:
self.assertNotIn(name, modules)
self.assertEqual(len(modules), total)
@support.requires_subprocess()
def test_fatal_error(self):
# By default, stdlib extension modules are ignored,
# but not test modules.
expected = ('_testcapi',)
not_expected = ('sys',)
code = 'import _testcapi, sys; _testcapi.fatal_error(b"MESSAGE")'
self.check_fatal_error(code, expected, not_expected)
# Mark _testcapi as stdlib module, but not sys
expected = ('sys',)
not_expected = ('_testcapi',)
code = textwrap.dedent('''
import _testcapi, sys
sys.stdlib_module_names = frozenset({"_testcapi"})
_testcapi.fatal_error(b"MESSAGE")
''')
self.check_fatal_error(code, expected)
def test_pyobject_repr_from_null(self):
s = _testcapi.pyobject_repr_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_str_from_null(self):
s = _testcapi.pyobject_str_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_bytes_from_null(self):
s = _testcapi.pyobject_bytes_from_null()
self.assertEqual(s, b'<NULL>')
def test_Py_CompileString(self):
# Check that Py_CompileString respects the coding cookie
_compile = _testcapi.Py_CompileString
code = b"# -*- coding: latin1 -*-\nprint('\xc2\xa4')\n"
result = _compile(code)
expected = compile(code, "<string>", "exec")
self.assertEqual(result.co_consts, expected.co_consts)
def test_export_symbols(self):
# bpo-44133: Ensure that the "Py_FrozenMain" and
# "PyThread_get_thread_native_id" symbols are exported by the Python
# (directly by the binary, or via by the Python dynamic library).
ctypes = import_helper.import_module('ctypes')
names = []
# Test if the PY_HAVE_THREAD_NATIVE_ID macro is defined
if hasattr(_thread, 'get_native_id'):
names.append('PyThread_get_thread_native_id')
# Python/frozenmain.c fails to build on Windows when the symbols are
# missing:
# - PyWinFreeze_ExeInit
# - PyWinFreeze_ExeTerm
# - PyInitFrozenExtensions
if os.name != 'nt':
names.append('Py_FrozenMain')
for name in names:
with self.subTest(name=name):
self.assertTrue(hasattr(ctypes.pythonapi, name))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
@threading_helper.requires_working_threading()
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
def test_module_state_shared_in_global(self):
"""
bpo-44050: Extension module state should be shared between interpreters
when it doesn't support sub-interpreters.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
script = textwrap.dedent(f"""
import importlib.machinery
import importlib.util
import os
fullname = '_test_module_state_shared'
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
attr_id = str(id(module.Error)).encode()
os.write({w}, attr_id)
""")
exec(script)
main_attr_id = os.read(r, 100)
ret = support.run_in_subinterp(script)
self.assertEqual(ret, 0)
subinterp_attr_id = os.read(r, 100)
self.assertEqual(main_attr_id, subinterp_attr_id)
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
def test_version_api_data(self):
self.assertEqual(_testcapi.Py_Version, sys.hexversion)
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
@support.requires_subprocess()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
# FreeBSD: instruct jemalloc to not fill freed() memory
# with junk byte 0x5a, see JEMALLOC(3)
MALLOC_CONF="junk:false",
)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
MALLOC_CONF="junk:false",
)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
def test_get_module_bad_def(self):
# PyType_GetModuleByDef fails gracefully if it doesn't
# find what it's looking for.
# see bpo-46433
instance = self.module.StateAccessType()
with self.assertRaises(TypeError):
instance.getmodulebydef_bad_def()
def test_get_module_static_in_mro(self):
# Here, the class PyType_GetModuleByDef is looking for
# appears in the MRO after a static type (Exception).
# see bpo-46433
class Subclass(BaseException, self.module.StateAccessType):
pass
self.assertIs(Subclass().get_defining_module(), self.module)
class Test_FrameAPI(unittest.TestCase):
def getframe(self):
return sys._getframe()
def getgenframe(self):
yield sys._getframe()
def test_frame_getters(self):
frame = self.getframe()
self.assertEqual(frame.f_locals, _testcapi.frame_getlocals(frame))
self.assertIs(frame.f_globals, _testcapi.frame_getglobals(frame))
self.assertIs(frame.f_builtins, _testcapi.frame_getbuiltins(frame))
self.assertEqual(frame.f_lasti, _testcapi.frame_getlasti(frame))
def test_frame_get_generator(self):
gen = self.getgenframe()
frame = next(gen)
self.assertIs(gen, _testcapi.frame_getgenerator(frame))
if __name__ == "__main__":
unittest.main()
|
config_vios.py | #!/usr/bin/env python3
# scripts/config_vios.py
#
# Import/Export script for vIOS.
#
# @author Andrea Dainese <andrea.dainese@gmail.com>
# @copyright 2014-2016 Andrea Dainese
# @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE
# @link http://www.unetlab.com/
# @version 20160719
import getopt, multiprocessing, os, pexpect, re, sys, time
username = 'cisco'
password = 'cisco'
secret = 'cisco'
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect([
'Username:',
'\(config',
'>',
'#',
'Would you like to enter the'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline(username)
try:
handler.expect('Password:', timeout = expctimeout)
except:
print('ERROR: error waiting for "Password:" prompt.')
node_quit(handler)
return False
handler.sendline(password)
try:
j = handler.expect(['>', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for [">", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Secret password required
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 1:
# Config mode detected, need to exit
handler.sendline('end')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif i == 2:
# Need higher privilege
handler.sendline('enable')
try:
j = handler.expect(['Password:', '#'])
except:
print('ERROR: error waiting for ["Password:", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Need do provide secret
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 3:
# Nothing to do
return True
elif i == 4:
# First boot detected
handler.sendline('no')
try:
handler.expect('Press RETURN to get started', timeout = longtimeout)
except:
print('ERROR: error waiting for "Press RETURN to get started" prompt.')
node_quit(handler)
return False
handler.sendline('\r\n')
try:
handler.expect('Router>', timeout = expctimeout)
except:
print('ERROR: error waiting for "Router> prompt.')
node_quit(handler)
return False
handler.sendline('enable')
try:
handler.expect('Router#', timeout = expctimeout)
except:
print('ERROR: error waiting for "Router# prompt.')
node_quit(handler)
return False
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('quit\n')
handler.close()
def config_get(handler):
# Clearing all "expect" buffer
while True:
try:
handler.expect('#', timeout = 0.1)
except:
break
# Disable paging
handler.sendline('terminal length 0')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
# Getting the config
handler.sendline('more system:running-config')
try:
handler.expect('#', timeout = longtimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = re.sub('.*Using [0-9]+ out of [0-9]+ bytes\n', '', config, flags=re.DOTALL) # Header
config = re.sub('.*more system:running-config\n', '', config, flags=re.DOTALL) # Header
config = re.sub('!\nend.*', '!\nend\n', config, flags=re.DOTALL) # Footer
return config
def config_put(handler):
while True:
try:
i = handler.expect('CVAC-4-CONFIG_DONE', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
node_quit(handler)
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a) * 1000
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
find_commit_with_best_gold_results.py | #! /usr/bin/env python
# Copyright 2019 Google LLC.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import subprocess
import sys
import threading
import urllib
import urllib2
assert '/' in [os.sep, os.altsep]
skia_directory = os.path.abspath(os.path.dirname(__file__) + '/../..')
def get_jobs():
path = skia_directory + '/infra/bots/jobs.json'
reg = re.compile('Test-(?P<os>[A-Za-z0-9_]+)-'
'(?P<compiler>[A-Za-z0-9_]+)-'
'(?P<model>[A-Za-z0-9_]+)-GPU-'
'(?P<cpu_or_gpu_value>[A-Za-z0-9_]+)-'
'(?P<arch>[A-Za-z0-9_]+)-'
'(?P<configuration>[A-Za-z0-9_]+)-'
'All(-(?P<extra_config>[A-Za-z0-9_]+)|)')
keys = ['os', 'compiler', 'model', 'cpu_or_gpu_value', 'arch',
'configuration', 'extra_config']
def fmt(s):
return s.encode('utf-8') if s is not None else ''
with open(path) as f:
jobs = json.load(f)
for job in jobs:
m = reg.match(job)
if m is not None:
yield [(k, fmt(m.group(k))) for k in keys]
def gold_export_url(job, config, first_commit, last_commit):
qq = [('source_type', 'gm'), ('config', config)] + job
query = [
('fbegin', first_commit),
('fend', last_commit),
('query', urllib.urlencode(qq)),
('pos', 'true'),
('neg', 'false'),
('unt', 'false'),
('head', 'true')
]
return 'https://public-gold.skia.org/json/export?' + urllib.urlencode(query)
def urlopen(url):
cookie = os.environ.get('SKIA_GOLD_COOKIE', '')
return urllib2.urlopen(urllib2.Request(url, headers={'Cookie': cookie}))
def get_results_for_commit(commit, jobs):
sys.stderr.write('%s\n' % commit)
sys.stderr.flush()
CONFIGS = ['gles', 'vk']
passing_tests_for_all_jobs = []
def process(url):
try:
testResults = json.load(urlopen(url))
except urllib2.URLError:
sys.stderr.write('\nerror "%s":\n' % url)
return
sys.stderr.write('.')
sys.stderr.flush()
passing_tests = 0
for t in testResults:
assert t['digests']
passing_tests += 1
passing_tests_for_all_jobs.append(passing_tests)
all_urls = [gold_export_url(job, config, commit, commit)
for job in jobs for config in CONFIGS]
threads = [threading.Thread(target=process, args=(url,)) for url in all_urls]
for t in threads:
t.start()
for t in threads:
t.join()
result = sum(passing_tests_for_all_jobs)
sys.stderr.write('\n%d\n' % result)
sys.stderr.flush()
return result
def find_best_commit(commits):
jobs = [j for j in get_jobs()]
results = []
for commit_name in commits:
commit_hash = subprocess.check_output(['git', 'rev-parse', commit_name]).strip()
results.append((commit_hash, get_results_for_commit(commit_hash, jobs)))
best_result = max(r for h, r in results)
for h, r in results:
if r == best_result:
return h
return None
def generate_commit_list(args):
return subprocess.check_output(['git', 'log', '--format=%H'] + args).splitlines()
def main(args):
os.chdir(skia_directory)
subprocess.check_call(['git', 'fetch', 'origin'])
sys.stderr.write('%s\n' % ' '.join(args))
commits = generate_commit_list(args)
sys.stderr.write('%d\n' % len(commits))
best = find_best_commit(commits)
sys.stderr.write('DONE:\n')
sys.stderr.flush()
sys.stdout.write('%s\n' % best)
usage = '''Example usage:
python %s origin/master ^origin/skqp/dev < /dev/null > LOG 2>&1 & disown
'''
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write(usage % sys.argv[0])
sys.exit(1)
main(sys.argv[1:])
|
engine.py | """
Event-driven framework of vn.py framework.
"""
import re
from collections import defaultdict
from queue import Empty, Queue
from threading import Thread
from time import sleep, time
from typing import Any, Callable, List
from numpy import array
from vnpy.trader.setting import get_settings
from kafka import KafkaProducer
import json
EVENT_TIMER = "eTimer"
class Event:
"""
Event object consists of a type string which is used
by event engine for distributing event, and a data
object which contains the real data.
"""
def __init__(self, type: str, data: Any = None):
""""""
self.type: str = type
self.data: Any = data
def __str__(self):
return "type: {}, data: {}".format(self.type, self.data)
# Defines handler function to be used in event engine.
HandlerType = Callable[[Event], None]
class EventEngine:
"""
Event engine distributes event object based on its type
to those handlers registered.
It also generates timer event by every interval seconds,
which can be used for timing purpose.
"""
def __init__(self, interval: int = 1):
"""
Timer event is generated every 1 second by default, if
interval not specified.
"""
self._interval: int = interval
self._queue: Queue = Queue()
self._active: bool = False
self._thread: Thread = Thread(target=self._run)
self._timer: Thread = Thread(target=self._run_timer)
self._handlers: defaultdict = defaultdict(list)
self._general_handlers: List = []
self._log_debug = get_settings()["log_debug"]
self._log_debug_exclude_events = get_settings()["log_debug_exclude_events"].split(",")
self._kafka_producer = KafkaProducer(
bootstrap_servers='localhost:19092',
# security_protocol="SASL_SSL",
# ssl_context=context,
value_serializer=lambda x: json.dumps(x).encode('utf-8')
)
def _run(self) -> None:
"""
Get event from queue and then process it.
"""
while self._active:
try:
event = self._queue.get(block=True, timeout=1)
self._process(event)
except Empty:
pass
def _process(self, event: Event) -> None:
"""
First ditribute event to those handlers registered listening
to this type.
Then distrubute event to those general handlers which listens
to all types.
"""
if event.type in self._handlers:
[handler(event) for handler in self._handlers[event.type]]
if self._general_handlers:
[handler(event) for handler in self._general_handlers]
# print("log debug ex: {}", self._log_debug_exclude_events)
if self._log_debug:
skip = False
for val in self._log_debug_exclude_events:
if val != "" and re.match(val, event.type):
skip = True
break
if not skip:
event_msg = "{}, {}".format(time(), event)
self._kafka_producer.send("EVENTLOGGG", event_msg)
print(event_msg)
def _run_timer(self) -> None:
"""
Sleep by interval second(s) and then generate a timer event.
"""
while self._active:
sleep(self._interval)
event = Event(EVENT_TIMER)
self.put(event)
def start(self) -> None:
"""
Start event engine to process events and generate timer events.
"""
self._active = True
self._thread.start()
self._timer.start()
def stop(self) -> None:
"""
Stop event engine.
"""
self._active = False
self._timer.join()
self._thread.join()
def put(self, event: Event) -> None:
"""
Put an event object into event queue.
"""
self._queue.put(event)
def register(self, type: str, handler: HandlerType) -> None:
"""
Register a new handler function for a specific event type. Every
function can only be registered once for each event type.
"""
handler_list = self._handlers[type]
if handler not in handler_list:
handler_list.append(handler)
def unregister(self, type: str, handler: HandlerType) -> None:
"""
Unregister an existing handler function from event engine.
"""
handler_list = self._handlers[type]
if handler in handler_list:
handler_list.remove(handler)
if not handler_list:
self._handlers.pop(type)
def register_general(self, handler: HandlerType) -> None:
"""
Register a new handler function for all event types. Every
function can only be registered once for each event type.
"""
if handler not in self._general_handlers:
self._general_handlers.append(handler)
def unregister_general(self, handler: HandlerType) -> None:
"""
Unregister an existing general handler function.
"""
if handler in self._general_handlers:
self._general_handlers.remove(handler)
|
main.py | import os
import re
import socket
import threading
from librespot.audio.decoders import AudioQuality
from librespot.core import Session
from librespot.metadata import TrackId
from librespot.player.codecs import VorbisOnlyAudioQuality
session: Session
sock: socket
def handler(client: socket.socket, address: str):
req_raw = client.recv(1024 * 1024)
if len(req_raw) == 0:
return
req_arr = req_raw.split(b"\r\n")
req_http_raw = req_arr[0]
req_header_str = req_raw.split(b"\r\n\r\n")[0]
req_body_str = req_raw.split(b"\r\n\r\n")[1]
req_http_arr = req_http_raw.split(b" ")
req_method = req_http_arr[0]
req_uri = req_http_arr[1]
req_http_version = req_http_arr[2]
req_header = {}
for header in req_header_str.split(b"\r\n"):
try:
key, value = header.split(b": ")
except ValueError:
continue
else:
req_header[key.decode().lower()] = value.decode()
status, headers, content, manually = response(client, req_uri.decode(),
req_header, req_body_str)
if not manually:
client.send(req_http_version + b" " + status.encode() + b"\r\n")
client.send(b"Access-Control-Allow-Origin: *\r\n")
for header in headers:
client.send(header.encode() + "\r\n")
client.send(b"\r\n")
client.send(content)
client.close()
class HttpCode:
http_200 = "200 OK"
http_204 = "204 No Content"
http_400 = "400 Bad Request"
http_403 = "403 Forbidden"
http_404 = "404 Not Found"
http_500 = "500 Internal Server Error"
def main():
global session, sock
session = None
if os.path.isfile("credentials.json"):
try:
session = Session.Builder().stored_file().create()
except RuntimeError:
pass
if session is None or not session.is_valid():
username = input("Username: ")
password = input("Password: ")
session = Session.Builder().user_pass(username, password).create()
if not session.is_valid():
return
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 8080))
sock.listen(5)
while True:
threading.Thread(target=handler, args=sock.accept()).start()
def response(client: socket.socket, uri: str, header: dict,
body: bytes) -> tuple[str, list, bytes, bool]:
if re.search(r"^/audio/track/([0-9a-zA-Z]{22})$", uri) is not None:
track_id_search = re.search(
r"^/audio/track/(?P<TrackID>[0-9a-zA-Z]{22})$", uri)
track_id_str = track_id_search.group("TrackID")
track_id = TrackId.from_base62(track_id_str)
stream = session.content_feeder().load(
track_id, VorbisOnlyAudioQuality(AudioQuality.VERY_HIGH), False,
None)
start = 0
end = stream.input_stream.stream().size()
if header.get("range") is not None:
range_search = re.search(
"^bytes=(?P<start>[0-9]+?)-(?P<end>[0-9]+?)$",
header.get("range"))
if range_search is not None:
start = int(range_search.group("start"))
end = (int(range_search.group("end"))
if int(range_search.group("end")) <=
stream.input_stream.stream().size() else
stream.input_stream.stream().size())
stream.input_stream.stream().skip(start)
client.send(b"HTTP/1.0 200 OK\r\n")
client.send(b"Access-Control-Allow-Origin: *\r\n")
client.send(b"Content-Length: " +
(str(stream.input_stream.stream().size()).encode() if
stream.input_stream.stream().size() == end else "{}-{}/{}"
.format(start, end,
stream.input_stream.stream().size()).encode()) +
b"\r\n")
client.send(b"Content-Type: audio/ogg\r\n")
client.send(b"\r\n")
while True:
if (stream.input_stream.stream().pos() >=
stream.input_stream.stream().size()):
break
byte = stream.input_stream.stream().read()
client.send(bytes([byte]))
return "", [], b"", True
else:
return HttpCode.http_404, [], HttpCode.http_404.encode(), False
if __name__ == "__main__":
main()
|
GenerativeDetector.py | import cv2
import numpy as np
import time
from abc import ABC, abstractmethod
from threading import Thread
from cv2 import VideoCapture
from multiprocessing import Queue, Process
from multiprocessing import Value
from OpenPersonDetector import OpenPersonDetector
from newgen.TrackJoin import TrackJoin
class PersonDetection:
'''
Class used to represent persons in generated frames
'''
def __init__(self, person_bound):
self.person_bound = person_bound
self.central_point = (
int((person_bound[0] + person_bound[2]) / 2), int((person_bound[1] + person_bound[3]) / 2))
self.track_index = None
self.short_track_index = None
self.head = None
class Frame:
'''
Processed frame provided to frame processor (may be generated or detected)
'''
def __init__(self, raw_frame, time_frame, detections=None):
self.raw_frame = raw_frame # Numpy array of raw pixels
self.time_frame = time_frame
self.detections = detections # Person detections in frame
self.is_detected = False
class _FrameBulk:
def __init__(self, head_frame, tail_frames):
self.head_frame = head_frame
self.tail_frames = tail_frames
class AbstractInputFeeder(ABC):
'''
Base class for input video frames feeder
'''
def __init__(self):
super().__init__()
@abstractmethod
def init(self):
'''
Invoked at the initiation of input process
:return: void
'''
pass
@abstractmethod
def feed_input(self):
'''
Called by input process for requesting a video frame
:return: (boolean : frame_present, 2D numpy array: frame, float: time_frame)
'''
pass
class AbstractFrameProcessor(ABC):
'''
Abstract class for processing output frames
'''
def __init__(self):
super().__init__()
@abstractmethod
def init(self):
'''
Called by main process, during the call to start_sync, following initiation of system.
:return:
'''
pass
@abstractmethod
def process_frame(self, processed_frame):
'''
Called by main process, providing detected/generated frame to user
:param processed_frame: Detected or generated Frame
:return:
'''
pass
class AbstractDetectorGenerator(ABC):
'''
Generates and provides a person detector
'''
def __init__(self):
super().__init__()
@abstractmethod
def generate_detector(self):
'''
Invoked at the initiation of person detection thread.
:return: Instance of person detector to be used
'''
pass
class GenerativeDetector:
'''
Analyses a realtime video frame stream using the provided person detector.
The detections in missed frames by detector (due to time taken for processing a single frame for detection) are
generated using trackers.
'''
def __init__(self):
self.schedule_queue = Queue()
self.detector_results_queue = Queue()
self.all_frames_queue = Queue()
mul = 3
self.schedule_queue_capacity = mul * 3
self.detector_results_queue_capacity = mul * 5
self.results_queue_capacity = mul * 3
self.max_hidden_frame_count = mul * 5
self.detection_frame_time = Value("d", 0.0)
self.tracking_frame_time = Value("d", 0.0)
self.hidden_frame_count = Value("i", 0)
self.frame_generator_process = None
self.detector_process = None
def _input_feed_thread(self, input_feeder):
input_feeder.init()
while True:
while self.schedule_queue.qsize() < self.schedule_queue_capacity and self.detector_results_queue.qsize() < self.detector_results_queue_capacity and self.hidden_frame_count.value < self.max_hidden_frame_count:
r, frame, time_frame = input_feeder.feed_input()
if r:
self.schedule(frame, time_frame)
def _frame_generator_thread(self, _detector_results_queue, _all_results_queue, tracking_frame_time,
hidden_frame_count):
track_join = TrackJoin()
while True:
frame_bulk = _detector_results_queue.get()
track_join.process_bulk(frame_bulk)
head_frame = frame_bulk.head_frame
hidden_frame_count.value -= 1
_all_results_queue.put(head_frame)
trackers = []
track_indices = []
head_links = []
for detection in head_frame.detections:
detection.head = detection
tracker = cv2.TrackerMedianFlow_create()
# tracker = cv2.TrackerKCF_create()
person_bound = tuple(map(int, detection.person_bound))
person_bound = (
person_bound[0], person_bound[1], person_bound[2] - person_bound[0],
person_bound[3] - person_bound[1])
ok = tracker.init(head_frame.raw_frame, person_bound)
if ok:
trackers.append(tracker)
track_indices.append(detection.track_index)
head_links.append(detection)
for tail_frame in frame_bulk.tail_frames:
tail_frame.detections = []
tracking_start_time = time.time()
for _i, tracker in enumerate(trackers):
ok, bbox = tracker.update(tail_frame.raw_frame)
if ok:
gen_person_bound = (int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
generated_detection = PersonDetection(gen_person_bound)
generated_detection.track_index = track_indices[_i]
generated_detection.head = head_links[_i]
tail_frame.detections.append(generated_detection)
# else:
# print("Track Lost", track_indices[_i])
tracking_end_time = time.time()
tracking_frame_time.value = tracking_frame_time.value * 0.5 + (
tracking_end_time - tracking_start_time) * 0.5
hidden_frame_count.value -= 1
_all_results_queue.put(tail_frame)
def print_stats(self):
'''
Print buffer status to standard output
:return:
'''
print("Input Queue Size:", self.schedule_queue.qsize())
print("Detection Results Queue Size:", self.detector_results_queue.qsize())
print("Final Results Queue Size:", self.all_frames_queue.qsize())
def _detector_thread(self, _schedule_queue, _results_queue, detector_generator, detection_frame_time,
hidden_frame_count):
from trinet_reid.trinet import TriNetReID
detector = detector_generator.generate_detector()
re_id = TriNetReID()
while True:
frame = _schedule_queue.get()
hidden_frame_count.value += 1
detection_start_time = time.time()
detections = detector.detectPersons(frame.raw_frame, None)
person_crops = []
_detections = []
for __i, detection in enumerate(detections):
minx, miny, maxx, maxy = detection.upper_body_bound
person_crop = frame.raw_frame[
int(max(0, miny - (maxy - miny) / 1.8 - 5)):int(min(frame.raw_frame.shape[0], maxy + 5)),
int(max(0, minx - 5)):int(min(frame.raw_frame.shape[1], maxx + 5))]
if person_crop.shape[0] > 0 and person_crop.shape[1] > 0:
# detection.re_id_encoding = re_id.embed(person_crop)
person_crops.append(person_crop)
_detections.append(detection)
# cv2.waitKey(1)
embeddings = re_id.embed(person_crops)
for i in range(len(_detections)):
_detections[i].re_id_encoding = embeddings[i]
detection_end_time = time.time()
detection_frame_time.value = detection_frame_time.value * 0.5 + (
detection_end_time - detection_start_time) * 0.5
frame.detections = detections
frame.is_detected = True
backlog = []
while _schedule_queue.qsize() > 0:
backlog.append(_schedule_queue.get())
hidden_frame_count.value += 1
frame_bulk = _FrameBulk(frame, backlog)
_results_queue.put(frame_bulk)
def schedule(self, frame, time_frame):
'''
Manually schedule a frame to input queue.
:param frame: numpy array
:param time_frame: time in seconds
:return:
'''
self.schedule_queue.put(Frame(frame, time_frame))
def has_results(self):
'''
Check whether any processed frames are present in the output buffer.
:return:
'''
return self.all_frames_queue.qsize() > 0
def get_result(self):
'''
Obtain a frame from output buffer
:return:
'''
return self.all_frames_queue.get()
def start_sync(self, input_feeder, frame_processor, detector_generator):
'''
Start processing input to generate detections on frames
:param input_feeder:
:param frame_processor:
:param detector_generator:
:return:
'''
self.frame_generator_process = Process(target=self._frame_generator_thread, args=(
self.detector_results_queue, self.all_frames_queue, self.tracking_frame_time, self.hidden_frame_count))
self.detector_process = Process(target=self._detector_thread, args=(
self.schedule_queue, self.detector_results_queue, detector_generator, self.detection_frame_time,
self.hidden_frame_count))
self.frame_generator_process.daemon = True
self.detector_process.daemon = True
self.frame_generator_process.start()
self.detector_process.start()
self.input_feed_thread = Thread(target=self._input_feed_thread, args=(input_feeder,))
self.input_feed_thread.daemon = True
self.input_feed_thread.start()
frame_processor.init()
while True:
processed_frame = self.get_result()
r = frame_processor.process_frame(processed_frame)
if not r:
return
if __name__ == "__main__":
class DetectorGenerator(AbstractDetectorGenerator):
def generate_detector(self):
return OpenPersonDetector(preview=False)
class FrameProcessor(AbstractFrameProcessor):
def init(self):
self.colour_set = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255),
(255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125),
(255, 125, 255), (255, 255, 125)]
cv2.namedWindow("preview", cv2.WINDOW_FREERATIO)
self.start_time_map = {}
def process_frame(self, processed_frame):
frame = processed_frame.raw_frame
time_frame = processed_frame.time_frame
detections = processed_frame.detections
for i, detection in enumerate(detections):
colour = self.colour_set[detection.track_index % len(self.colour_set)]
if detection.track_index not in self.start_time_map:
self.start_time_map[detection.track_index] = time_frame
cv2.putText(frame, str(detection.track_index),
(int(detection.person_bound[0]), int(detection.person_bound[1])), cv2.FONT_HERSHEY_COMPLEX,
1, colour)
time_elapsed = time_frame - self.start_time_map[detection.track_index]
cv2.putText(frame, str(int(time_elapsed)) + " sec",
(int(detection.person_bound[0]), int(detection.person_bound[1] + 20)),
cv2.FONT_HERSHEY_COMPLEX, 1, colour)
cv2.rectangle(frame, (int(detection.person_bound[0]), int(detection.person_bound[1])),
(int(detection.person_bound[2]), int(detection.person_bound[3])), colour)
cv2.imshow("preview", frame)
k = cv2.waitKey(1)
if k & 0xFF == ord("q"):
return False
return True
class InputFeeder(AbstractInputFeeder):
def init(self):
self.cap = VideoCapture("test_videos/ntb/head_office/Cash_Counter_1-1.dav")
self.total_input_frames = 0
def feed_input(self):
# TODO: Add flow rate logic here
r, frame = self.cap.read()
if r:
frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
frame = np.array(frame, copy=True)
time_frame = self.cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
# time_frame = cap.get_time()
self.total_input_frames += 1
return r, frame, time_frame
else:
return False, None, None
generative_detector = GenerativeDetector()
generative_detector.start_sync(input_feeder=InputFeeder(), frame_processor=FrameProcessor(),
detector_generator=DetectorGenerator())
|
main.py | import os
import time
import threading
from dotenv import load_dotenv
from azure.iot.device import Message
from generator import Humidity, Temperature
import listener
# Get variables from dotenv
load_dotenv()
DEVICE_ID = os.getenv("DEVICE_ID")
# Define the default value
LIMIT_GROWING_DAYS = 40 # The maximum number of days before harvesting
LIMIT_GROWING_REWARD = 40.0 # The maximum number of rewarding scores before harvesting
MESSAGE = '{{ "deviceId": "{deviceId}", "plant_id": {plant_id}, "temperature": {temperature}, "humidity": {humidity}, "growth_state": "{state}", "reward": {reward} }}'
GROWTH_STATE = {
"bRoot": "bRoot", # Root begins
"dRoot": "dRoot", # Root develops
"growing": "growing", # Start growing
}
HUMIDITY_STATUS = {
"Perfect": 0.0, # Within the ideal condition
"Normal": 5.0, # 5% lower or higher than the ideal condition
"Not well": 10.0 # 10% lower or higher than the ideal condition
}
TEMPERATURE_STATUS = {
"Perfect": 0.0, # Within the ideal condition
"Normal": 3.0, # 3 degrees Celsius lower or higher than the ideal condition
"Not well": 4.0 # 5 degrees Celsius lower or higher than the ideal condition
}
def generate_daily_msg(temperature, humidity, days):
# Set up objects based on the current number of grown days
if days <= 3:
temperature.set_init_value(20.0)
humidity.set_init_value(85.0)
temperature.set_range(22.0, 25.0)
humidity.set_range(90.0, 100.0)
growth_state = GROWTH_STATE["bRoot"]
elif days <= 13:
temperature.set_init_value(16.0)
humidity.set_init_value(65.0)
temperature.set_range(18.0, 22.0)
humidity.set_range(70.0, 75.0)
growth_state = GROWTH_STATE["dRoot"]
else:
temperature.set_init_value(13.0)
humidity.set_init_value(65.0)
temperature.set_range(15.0, 22.0)
humidity.set_range(70.0, 75.0)
growth_state = GROWTH_STATE["growing"]
# Generate condition information
temp_info = temperature.generate_info()
humi_info = humidity.generate_info()
return temp_info, humi_info, growth_state
def start_planting():
try:
client = listener.iothub_client_init()
print ("IoT Hub device sending periodic messages, press Ctrl-C to exit")
# Start a thread to listen
device_method_thread = threading.Thread(target=listener.device_method, args=(client,))
device_method_thread.daemon = True
device_method_thread.start()
# Intialise variables
temperature = Temperature(20.0, TEMPERATURE_STATUS)
humidity = Humidity(85.0, HUMIDITY_STATUS)
plant_id = 1
days = 1
curr_reward = 0
# Start sending message
while True:
# Build the message with simulated telmetry values
temp_info, humi_info, growth_state = generate_daily_msg(temperature, humidity, days)
temp = temp_info["temperature"]
humi = humi_info["humidity"]
reward = temp_info["status"] + humi_info["status"]
msg_txt_formatted = MESSAGE.format(deviceId= DEVICE_ID, plant_id=plant_id, temperature=temp, humidity=humi, state=growth_state, reward=reward)
message = Message(msg_txt_formatted)
# Go to a new day
curr_reward += reward
days += 1
# Plant a new mint after harvesting the old one
if days > LIMIT_GROWING_DAYS or curr_reward >= LIMIT_GROWING_REWARD:
days = 1
plant_id += 1
curr_reward = 0
# Send message
print("Sending message: {}".format(message))
client.send_message(message)
print("Message sent")
time.sleep(listener.INTERVAL)
except KeyboardInterrupt:
print("Stopped planting")
if __name__ == "__main__":
print("IoT Mint Growth")
print("Press Ctrl-C to exit.")
start_planting() |
hiksound.py | import urllib.request
import xml.etree.ElementTree as ET
import threading
import queue
import http.client
import time
import socket
# A horrible hack, so as to allow us to recover the socket we still need from urllib
class SocketGrabber:
def __init__(self):
self.sock = None
def __enter__(self):
self._temp = socket.socket.close
socket.socket.close = lambda sock: self._close(sock)
return self
def __exit__(self, type, value, tb):
socket.socket.close = self._temp
if tb is not None:
self.sock = None
def _close(self, sock):
if sock._closed:
return
if self.sock == sock:
return
if self.sock is not None:
self._temp(self.sock)
self.sock = sock
class Hikvision:
class _AudioChannel:
def __init__(self, owner, index):
req = urllib.request.Request(f"{owner._base}/ISAPI/System/TwoWayAudio/channels/{index}/open", method='PUT')
resp = owner._opener.open(req)
audiopath = f"{owner._base}/ISAPI/System/TwoWayAudio/channels/{index}/audioData"
with SocketGrabber() as sockgrab:
req = urllib.request.Request(audiopath, method='PUT')
resp = owner._opener.open(req)
self._output = sockgrab.sock
self._tosend = queue.Queue()
timer = threading.Thread(target = self._send128)
self.sent = 0
self.blank = 0
timer.start()
with SocketGrabber() as sockgrab:
req = urllib.request.Request(audiopath, method='GET')
resp = owner._opener.open(req)
self._input = sockgrab.sock
capture = threading.Thread(target = self._reader)
self._received = queue.Queue()
capture.start()
def _reader(self):
while True:
data = self._input.recv(4096)
if len(data):
self._received.put(data)
chunksize = 128
def _send128(self):
try:
while True:
time.sleep(1.0/64)
if not self._tosend.empty():
self._output.send(self._tosend.get())
self.sent += 1
else:
self._output.send(b'\xff' * self.chunksize)
self.blank += 1
except Exception as e:
print(f"Sent {self.sent} blank {self.blank} [{e}]")
def play(self, ulaw_data):
for x in [ulaw_data[i:i+self.chunksize] for i in range(0, len(ulaw_data), self.chunksize)]:
self._tosend.put(x + (b'\xff' * (self.chunksize - len(x))))
def read(self):
res = b''
max_count = 100
while max_count and not self._received.empty():
res += self._received.get()
max_count -= 1
return res
def __init__(self, ip, username, password):
self._base = f"http://{ip}"
mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
mgr.add_password(None, [self._base], username, password)
self._auth = urllib.request.HTTPDigestAuthHandler(mgr)
self._opener = urllib.request.build_opener(self._auth)
def getAudioChannels(self):
req = self._opener.open(f"{self._base}/ISAPI/System/TwoWayAudio/channels")
data = ET.XML(req.read())
if data.tag != '{http://www.hikvision.com/ver20/XMLSchema}TwoWayAudioChannelList':
raise ValueError(f"Didn't expect {data.tag}")
res = []
for channel in data:
if channel.tag != '{http://www.hikvision.com/ver20/XMLSchema}TwoWayAudioChannel':
raise ValueError(f"Didn't expect {channel.tag}")
res.append(int(channel[0].text))
return res
def openAudioChannel(self, channel):
return self._AudioChannel(self, channel)
|
imageme.py | #!/usr/bin/python
"""
imageMe is a super simple image gallery server.
Run imageme.py from the top level of an image directory to generate gallery
index HTML and run a SimpleHTTPServer on the localhost.
Imported as a module, use imageme.serve_dir(your_path) to do the same for any
directory programmatically. When run as entry point, imageme.serve_dir('.') is
what's called.
"""
# Dependencies
import base64, io, os, re, sys, threading, SimpleHTTPServer, SocketServer
# Attempt to import PIL - if it doesn't exist we won't be able to make use of
# some performance enhancing goodness, but imageMe will still work fine
PIL_ENABLED = False
try:
print('Attempting to import from PIL...')
from PIL import Image
PIL_ENABLED = True
print('Success! Enjoy your supercharged imageMe.')
except ImportError:
print(
'WARNING: \'PIL\' module not found, so you won\'t get all the ' +\
'performance you could out of imageMe. Install Pillow (' +\
'https://github.com/python-pillow/Pillow) to enable support.'
)
# Constants / configuration
## Filename of the generated index files
INDEX_FILE_NAME = 'imageme.html'
## Regex for matching only image files
IMAGE_FILE_REGEX = '^.+\.(png|jpg|jpeg|tif|tiff|gif|bmp)$'
## Images per row of the gallery tables
IMAGES_PER_ROW = 3
## Resampling mode to use when thumbnailing
RESAMPLE = None if not PIL_ENABLED else Image.NEAREST
## Width in pixels of thumnbails generated with PIL
THUMBNAIL_WIDTH = 800
## Base64 data for an image notifying user of an unsupported image type
UNSUPPORTED_IMAGE_TYPE_DATA = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAMgAyADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD36iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorn/+E68If9DXof8A4MYv/iqAOgopM1n6lrukaKYv7V1axsPNz5f2q5SLfjGcbiM4yPzoA0aKytP8SaFrFw1vpet6bfTqhdo7W7jkYKMDJCknGSOfetUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSc0A0ALRRRQAUUUUAFFFFABRRXg/xL+M/iLwv4z1DQNMtdPWK28vE0qM7ndGr/3gB970oA94orkfhhrmoeJPh3perarMJr2487zJAgTO2Z1HAAA4AFddQAUVieL/ABCnhXwnqOtvF5v2SLcsecbmJCqCewLEV89eF/jv4qh8Q2/9t3MN7pssoSWPyEjaNWONyFQDkdcHOenGc0AfUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSUDpzQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwDQB9/8Aavn/APaa6+F/+3v/ANo19Adq+f8A9prr4X/7e/8A2jQBz/7OP/JQ7/8A7BUn/o2Kvp+vmD9nH/kod/8A9gqT/wBGxV6D+0b/AMk9sP8AsKx/+ipaAPSrrxHoVgxW81rTrdh2mukT+Zptl4n8P6lKIrHXdMupD0SC7jcn8Aa+LvD3hrWPFeoPYaJZtd3SRmVow6rhAQCcsQOrD86n8R+DfEXhJo11zS5rMS/cclXRj6BlJGfbNAH3BRXgv7Pvja6u5LnwrqE7TLFF59m0jZKqCA0Y9uQQO3P4dJ8dvGF54a8LWtjpszwXWpyMhmQ4ZYlA3bT2J3KM+hNAHol/4h0XSn2ajrGn2b/3bi5SM/8AjxFOsdc0jU2xp+qWV2fS3uFk/kTXx34M8A6348urmDSBbqtsqtNLcSbUXdnaOASScHoO1dTL8A/GVrfW0bw2lzbvKiyy2twDsUkAthwp4HoKAPquiha+AaAPv6il718AUAff1VbzUbKwXdeXlvbL6zSqg/UirVfAVAH3DH4z8LSyCOPxLo7yHoq30RP/AKFW2DkA5B96+Ob34R+PLC0a5n8OTmNBk+VLHK2P91GJP5VneDPG+r+CdXS806ZjCzD7Ras37uZfQjsfRuo/SgD7YqG5u7azj8y6uIoE/vSuFH606CaO5gjnhkWSKRQ6OpyGBGQR+Br4H4oA+3x408KmTZ/wk2jbz/D9viz+W6tuN0kRXRw6MMhlOQR618ey/B3x/FbmdvDspQDOEniZvwUOT+lc74d8R6t4V1dNQ0m7e3nQ4YfwyD+669x9f0oA+5qKoaJq1rr2iWeq2T7re6iEqZ6gHsfccj8K8H+OPxLuzqUvhPR7loYIRi/mjbBkYjPlgjoAMZ9Tx2OQD3K+8TaBpc3k3+uaZaS/3Li7jjb8iRV+1u7a9gWe0uIriFvuyROHU/iK+LPD3gDxT4qtnudG0ea5t1JHmlljQkdQC5AJ+lZsU2r+GNcDRtc6dqdnJ3BR42HYj+h6igD7rpCQoJJAA6k9qwfBnie28YeFbLWrZdhmUrLHnJjkXhl/Pp7EGvmH42MT8XNbBJIAgAB7fuIzQB9RTeMfC9u+yfxJpEbZxte+iU/+hVpWl/Z6hF5tldwXMf8AehkDj8wa+N9D+GfjDxJpcWp6Tor3FnLu2S+fEgbBKnG5geoI/CvY/gL4L1fw5qGu3et6bPZTFIoIfNTG4ZLPg9xwnSgDxLx9LJL8QvEfmOz7dTuVXcc4AlbAHtX154E/5J54a/7BVr/6KWvkDx3/AMlD8S/9hW6/9GtUcHgvxVdW8Vxb+GtZmglQPHJHYSsrqRkEELggjnNAH3FRXlnwD0nUtH8CX1tqmn3djO2pSOsV1C0TFfKiGQGAOMgjPsa8q+MXxLu/Eeu3GiabctHoto5jYRtgXLg8s3qoPQdOM9+AD6QufFnhuyuDBdeINJgmBwY5b2NGB+hatWKWKeJZYZFkjYZV0YEEexFfGemfDDxprOlrqVjoFxJaOu5WZkQsvqqsQSPTAOayPDviHU/Cmtw6lplw8FxE2GXnbIueUYdwfT+tAH3NQeOar2V5BqFhb3tq/mQXESyxOP4lYAg/ka+T/i18Qbnxf4kntLedl0azkKW8SH5ZWHBkOOue3oPqcgH063jHwxHP5D+JNIWbOPLN9EGz9N2a+W/jWQ3xc1xlIIItyCO/+jx1m6N8MvGev6et/pugzy2rjckjukQceq72BYe4rndR0680m/lsdQtZbW7hOJIZkKsvGRkH1BBHsQaAPrT4Jf8AJINC/wC3j/0okrv64D4Jf8kg0L/t4/8ASiSu/oAp6tplprWk3Wm38QltLmMxypnGQffsfftXk/hr9n7SdF1+DUr/AFebUY7eQSRW3kCJSwORvO47hntxnvxwfRfHX/JPPEv/AGCrr/0U1fIHgT/kofhr/sK2v/o1aAPt6qd7qunacM32oWtr3/fzKn8yKy/HY/4t94l/7BV1/wCimr4qsrOfUb63sbWMyXNxKsUUYIy7scKPzIH40Afbdv4v8M3cohtvEWkzSngJHexsxPsA2a2WZUQsxAUDJJPAFfFPiH4feKvCtot3rWjS21szBfNDpIoJ6AlGIH41jwLqWryWmmW5uryQEx21spZ8ZOSEXtnqcUAfaieMfDEk/kp4k0h5c42LfRFvyzW3XxP4g+H/AIq8LWaXms6PNbWzEL5odJFBPQEoSB+Ndr8CfGN1pPi6LQJZmbTtSyqxseI5gCVYemcbT65HpQB9R1n3uu6PprFb/VbG0I6ie4WM/qRWhnvXxnYfCnx1qUYkt/Dd2qnp55WE/k5FAH1xY+JdB1SXytP1vTruQ9Et7qOQ/kCa1K+Gtf8AC2t+FrpLfW9Oms5JASm/BVwOuGBIOMjoa99/Z/8AGV3rOlXvh+/naaTT1WS2dzlvKPBXPopxj2bHQCgD2iqt7qNlpsXm395b2sX9+eVY1/MkVxHxa8ev4G8Mo1kV/tW+JjttwzsAHzSY74yPxI+lfKenaZqviPVBbafa3F/eyksVRS7H1J/xNAH2nB4w8M3LbbfxHpErekd9Gx/Rq2QwZQVIIIyCK+KfEfw+8UeErCO81zSmtLaSQRI/nRuC5BIHysccA/lXc/s5f8lCv/8AsFSf+jYqAPp6q19qNjpsHnX97b2kX9+eVUX8zXK/E3xuvgbwo97EEe/uG8m0jbpvI5Y+yjn64HevkrTdJ1rxXq7QWFvc6lfzEyOQSzHnlmY9Oe5NAH2vp/iDRdWcppur2F6w5ItrlJD/AOOk1o18O+IfCWveE544tc0yazMmfLZiGV/XDKSCfxr6B+BfxAuvEmnXGg6rcNNf2KCSGZ2y8sOcfNnqVJAz3DDvzQB7DRRRQB8SePpZJPiF4j3yM+3VLlV3EnAErYFfXngX/knnhn/sFWv/AKKWvkDx3/yUPxL/ANhW6/8ARrVHB4M8VXVvFcW/hrWZoJUDxyR2ErK6kZBBC4II5zQB9xUV5Z8A9J1HRvAt7b6pp93YztqcjrHdQtExXyohkBgDjIIz7GvmrxNosnh3xPqWkS5LWlw0YYjllz8rfiMH8aAPuiisDwRrq+JPBWkatuDPPbL5p/6aL8r/APjwNfM3xu1n+2PidfIjborBEtE+qjLf+PMw/CgD64qreajZWC7ry8t7ZfWaVUH6kV5x8BNDOlfDpLyRNs2pTtcHIwdg+RR9PlJ/4FXynQB9wx+M/C0sgjj8S6O8h6Kt9ET/AOhVtg5AOQfevjm9+EfjywtGuZ/Dk5jQZPlSxytj/dRiT+VZ3gzxvq/gnV0vNOmYwsw+0WrN+7mX0I7H0bqP0oA+2KKjhmjuLeOeGQSRSKHR1OQykZBH4Gvlz4xfEu78R63c6Hp1yY9FtJDGwjOPtLrwWYjqoPQdOM88YAPo658WeG7KcwXXiHSoJgcGOW9jRvyJzWrDNFcRLLDIkkbDKujZB/GvjTTPhj401jS11Kx0C4ktXXejs6IXX1VWYEg9sA5rI8PeItU8J65DqWmXDwXETYZedrrnlGHcH0/rQB9zUVi6rqUtz4JvdT0VjLLJp0lxZMgzvYxlkx+OK+II5ZIpUlid0kQhlZSQVI5yDQB980UfjRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwfrGlXOiaxeaZeIUuLWVonHuD1Hsev40AfeAYMoIOQRkV4B+0yQT4XAPOLo/8AomtfwL8cvDo8MWln4juZbO/tYlhaQwvKswUYDZUE5IHOQOc15D8UfHY8e+J1u7aKSGwto/Jtkk+8RnJZh2JPb0AoA6j9nEH/AIWFfnHH9lSf+jYq7/8AaO/5J5p//YVj/wDRU1Y37OPh6WG11bxBNGVjn22tuxGNwUkuR7Z2jPqD6Vs/tHf8k8sP+wrH/wCipaAPP/2cf+Sh3/8A2CpP/RsVeufHGFJPhLq7sATE8Dr7HzkX+RNeR/s4/wDJQ7//ALBUn/o2KvYPjb/ySHXP+3f/ANHx0AeA/BFivxc0UA8MJwff9xIf6V7X8cPBd74q8K211pkLT32mSM4hUZaSNgN4X1Pyqcd8HvivE/gl/wAle0L/ALeP/SeSvprxX450PwWbE63PJCl47LG6Rl9u0DJIHOOR0B60AfGulazqWhXq3mlX09ncKMCSFypI9D6j2PFex+E/2h7+G4itvFFnFcW5IVru2XZIv+0V6N+GK9P1bXPhj4rsturar4eu4yvBnuI1kX/dJIZT9MV8k6nFaQareQ6fM09lHO628rjBeMMQrH3IwfxoA+8hXwDX3N4Sjmh8GaFFcA+emn26yZ67hGoP618OywyQTPDKpSRGKsrDlSOCDQB9918AV9Z6d8d/A11YRzXeoTWU5Ub7eS2kcqe43IpB+ua+TMUAff1fANff1fANAH38a+NPivp0GlfFDXrW3VVj88ShVGADIiuR+bGvoM/HbwF9jM/9pXBkxn7P9kk3n2zjb+tfLniHWrjxH4hv9YuVCy3kzSlAchQeij6DA/CgD60+D1xJdfCfQJJCSwikjGfRZXUfoBXxxX3F4M0VvDngzSNIfAltrZFlwcjeeWx/wImvh2gD79PWvlX4/adBY/Eoywqqm9so7iTaMfPlkz+SCvYbb47eA57TzpNRubaTGfIltJC+fTKgr+tfOPjvxZJ418W3WstEYYn2xwRMclI1GAD7nkn3JoA+gP2ebmSf4byxuSVg1CWNM9htRv5sa+Yb65kvL65upiTLNK0jk9SScnP519d/B7QZfD/w102C4jKXFzuupFIwRvOVz77Qua+afiT4al8LeO9TsGTbA8hntjjhonJK4+nK/VTQB9jaZZQ6bpdpY26hYLeFIowOyqAB/KvAf2lLKCPVfD98qjzp4ZonI6lUKEf+htW/4C+Ofh9fDNrZeJLiWzv7WIRGbyXkSYKMBvlBIJA5BHWvIvih46HjvxSL23jkisLaPybZJPvYySWI7Ent6AUAeofs03MjWPiO1JPlxywSKPdhID+iLXm/xt/5K9rv/bv/AOiI69r+AnhyXRfAjX9zGUn1ObzlUjBEQGEz9fmYezCvFPjb/wAle13/ALd//REdAH0B8Ev+SQ6F/wBvH/o+Su/rgPgl/wAkh0L/ALeP/R8ld/QB8Q+O/wDkofiX/sK3X/o1q+v/AAJ/yTzw1/2CrX/0UtfJvxO0+TT/AImeIYpFwXvXnA9pDvB/Jq9n+Hnxo8LWXgzTtN128ksryxhW3/1DusioMKQUB7AZzjmgD2qvgIV9t+EfGuj+N7O6utHaZorabyWMsewscA5Az05746GvjTWtHutB1u80q9QpcWsrRv74PBHsRgj2NAHp/wDw0Z4uH/MN0Pn/AKYy/wDxyvNPEeu3HibxBd6zdQ28M90waRLdSEBAAyASTzjJ56k19H+Hfj/4Wv7CP+2jNpl4BiUeS0sZPqpQE49iPz61Pqfx+8F2cZNpJe379hDblB+JfbxQBs/BuZpvhNoLvkkRypz6LK4H6AV8eCvv32r4T13Rbvw9rt7pN6pWe0laNuCA2OjD2IwR9aAPusEYwMfSvkL42/8AJXtd/wC3f/0RHXsHhb4+eGr3SYR4hmk0/UUULMRA8kcjf3l2AkZ64I46V4d8T9csPEnxE1XV9LmM1lceV5cjIULbYkU8HB6qaAPpD4Jf8kg0L/t4/wDSiSu/rgPgn/ySHQx/18f+lEld/QBgeOv+SeeJf+wVdf8Aopq+QPAn/JQ/DX/YVtf/AEatfX/jr/knniX/ALBV1/6KavkDwJ/yUPw1/wBhW1/9GrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yUPw1/2FbX/0atfX/jv/AJJ54l/7BV1/6KavkDwJ/wAlD8Nf9hW1/wDRq0AfXXxBgSf4c+JEkG5Rptw4B9VjYj9QK+R/ATlPiH4aI76pbD85VH9a+vfHf/JPPEv/AGCrr/0U1fIHgT/koXhr/sK2v/o1aAPrr4gQJP8ADrxIkiggaZcOB7rGWH6gV8jeA2K/EPw0Qcf8TS2H5yrX1946/wCSe+Jf+wVdf+imr5A8Cf8AJQvDX/YVtf8A0atAH2fresWXh/RrnVdRlEVpbJvkbGT7ADuScAD1NfNGs/tAeL76dzpotNMgz8ipEJXA92fIJ/AV6f8AtCxTSfDeJos7I9QiaXH93a45/wCBFa8S+FHivS/BvjUalq8TvbNbvCJETc0LEqd+PoCOOcMfpQBR8SfEXxP4u0mLT9cvkuoI5hMh+zxowYBl6qo4wxrtP2cSf+FhagM8HSpD/wCRYq1/jT8RfC/i3wdZ6foepG6uY79JnX7PJGAgjkXOWUd2FZH7OI/4uHqH/YKk/wDRsVAEn7SDSf8ACdaYpz5Y01Sv182TP9K7L9m6O2HhHVpV2/amv9snrsEa7fwyX/WtP44+BrrxV4et9R0yIzahppZvJUfNLE2NwHqRgED6454r5r0PxDq/hq/+26PfTWdxjaWjPDD0IPBHQ4IoA+iv2jv+Se6f/wBhWP8A9FS15/8As4/8lCv/APsFSf8Ao2KuC8R+N/EviyKNNb1aa7iRt6xYVEDYI3bVAGcE84zzXe/s4/8AJQr/AP7BUn/o2GgCx+0hcyN400q1JPlx6cJFHu0jg/ogrkPBHxP1nwFY3VrpVjpsouZRJJJcxuz8DAGVdeByenc16h+0b4dmns9L8QwR7o7fdbXBH8IYgoT7Z3D6ketcJ8J/ikvgOW5sNRgkn0q6cSExYLwyYxuAOMggAEZ7DHoQCv4s+Mmv+MfD82i6lYaSkEjK2+GGRXUqwOQTIQDxjp0Jo+BszRfFnSkUnEsc6Ng9vKc/zAr3V/jj4ASLcNakdv7i2c2f1UD9a3PBHjfTvHmm3WoabDPDDBcm3KzgB2wqtnAJ4+b17UAdRRRRQB8Q+O/+Sh+Jf+wrdf8Ao1q+v/Av/JPPDP8A2CrX/wBFLXyb8TrCTTviZ4hhlUqXvZJx7iQ7wfyavZ/h58aPC1n4M03TNcu5LG8sYFtzmB3WRUG1SCgP8IGc45zQB7Sa+e/2jPDJjutN8TQR/LKPslwQOjDLIT9RuH/ARXsfhHxro/jezurvR2maG2m8ljKmwk4ByBnpz3x0NXfE2hW/iXw3f6NdYEV3CU3YzsbqrfgQD+FAHiv7OfiRI4dY8P3MoVY8X0O44wOFk/8AZD+deESyXGo37yuWmubmUseOXdj/ADJNJDcXFlM7RSSQybXibHB2sCrKfqCQfrXpHwJ8N/238QYr6VN1tpSG4YkceYeIx9ckt/wCgD6osrSGwsbezt0CQW8axRr6KoAA/IV8EV9/V8A0Affxr40+K+nQaV8UNetbdVWPzxKFUYAMiK5H5sa+gz8dvAX2Mz/2lcGTGfs/2STefbONv618ueIdauPEfiG/1i5ULLeTNKUByFB6KPoMD8KAPrP4PXEl18J9AkkJLCKSMZ9FldR+gFfHY6E+9fb/AIM0VvDng3SNIfAltrZVlxyN5GWx/wACJr4u1rSLvQdavNKvYylzaStG4PfHQj2IwQe4IoA9Q/4aN8Xf9A7Q/wDvxL/8crzbxHr1z4n8QXmtXcNvBcXbB5EtlKoCFAyASTzjJ56k19GeHvj/AOFr6xj/ALaM2mXgAEg8lpYye+0rk4+o/PrVjUvj94Ls0P2SW9v37LDblB+JfbQBsfBuZp/hLoLuSSElTn0WZwP0ArqYdA0W31E6jDpFhFfNyblLZBIf+BAZrRxXgP8Aw0x/1KP/AJUv/tVAHv2aWgDFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFch44+HOh+O7VBqEbQ3kS7YbuHAkQf3T2Zc9j+GK6+igD5ouv2cPEqSkWmsaTLH2aUyRnH0CsP1rd8P/s4RxXCTeIdZE0anLW9khXd9XbnH0X8RXvVFAEcEEVtBHBBGkcMahERBhVUDAAHYVynxH8Df8LA8PW+k/wBo/YPJu1ufN8jzc4R1243L/fznPauvooA8w+HHwf8A+Ff+IbjVv7d+3+baNbeV9k8rGXRt2d7f3MYx3rsPG/hn/hMfCF9oP2v7H9q8v9/5fmbdsiv93Iznbjr3rfooA8g8E/Az/hDvF9jr/wDwkf2z7L5n7j7F5e7dGyfe8w4xuz07Vs/Ez4Vj4gz2l0mrvZT2sRjRGiEiHJznqCD789OlejUUAfMj/s5eKw/yapopX1MkoP5COuv8Kfs86dp15FeeIdQGomMhhaxR7IiR/eJOWHtx75r2zFFABXmvj/4N6T41u21O3uG03VWGHmRNyS46b1yOfcEe+a9KooA+ZG/Zx8WB8DVNFKepllB/Ly61tP8A2a7kuDqXiOFFHVba2Lk/8CYjH5V9C0UAFfANff1fIP8AwpP4h/8AQv8A/k7b/wDxygDqLv8AZv8AEaTlbTWNKlhzw0xkjbH0CsP1rvfAfwN0vwtfQ6pqt0NT1CIh4V8vbFE3rg5LEdicY9M4NesZzS0AAr4Br7+zjtXyD/wpT4g/9C//AOTtv/8AF0AdZqH7N2uxzkabrenTw54NyrxN+Shv510/g79n2w0u9jvvEd6motGQy2kSEQ5H98nlh7YA9c9K9r60UAJisTxT4S0jxhpLadrFt5sed0bocSRN/eVux/Q9xW5RQB82X37N2vpOw07WtMnhzw1wJIm/IK3866bwn+zzYafdR3fiO/GoFCGFrApWIkf3ieWHtgV7bRQADivIPG3wM/4THxffa9/wkf2P7V5f7j7D5m3bGqfe8wZztz0716/RQBz/AIJ8M/8ACHeELHQftn2z7L5n7/yvL3bpGf7uTjG7HXtXQUUUAcV8QPhnpPj+1ia5ke11CBSsN3EoJA/usv8AEuecZBz0Iyc+NP8As4+KRKRHqujtHnhmklDflsP86+mqKAPPvhd8Npfh5bagJtVF7JfGMuqRbEj2bumSc/ePPHatjxn8P9C8c2ax6pCy3EQIhuoTtkj9s9CPYg/nXU0UAfNV3+zd4ijmIsta0uaLPDTeZEfyCt/Ormm/s2agzqdT8QWsS91toWkJ/FiuPyr6JooAK5bxp4A0Px1ZJDqkLLPECILqEgSR+2ehHsf0PNdTRQB81Xn7N/iFJiLHWtLmizw03mRHH0Ct/OprL9m3WJGH27X7GBe5gieX9Dtr6PxRQBieEPDUHhDwtZaFbzyXEVqHxLIAGbc7Oc492P4Vt0UUAZ+u6Z/bXh7U9J87yft1pLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1b/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBn69pn9t+HtT0nzvJ+3Wktt5u3ds3oV3YyM4znGRXkGhfs8/2J4h0zVv+Eo877DdxXPlf2ft37HDbc+YcZxjODXt9FAGfrumf234d1PSfO8n7day23m7d2zehXdjIzjOcZFeQaF+zz/YniHTNV/4SjzvsN3Fc+V/Z+3fscNtz5hxnGM4Ne30UAZ+u6Z/bXh7U9K87yfttpLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1X/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBBe2dtqNlNZXkKTW06GOWNxkMp4INeAaz+zdeC4ZtE16B4SfljvUKso9Cy5z9cCvoaigD5li/Zx8Ulh52q6Oi+qSSsf1QV6b8NvhCngDWJ9VfWWvZ5rY25RYPLVQWVs53En7nt1r02igDI8SeIdP8LaHNq2qO6WkTKrFF3HLMFGB365+grnZfFHwz8RKJr3UvDl3kcfbvK3flJz+lL8U/BeoeOvC8Wl6dewW0kdws588Ha+FYBcjJHLZ6HpXgFx8DPH0EhWPSYbhRxviu4gD/AN9MD+lAG78Z/GPhnUNNsfDfhWO0NtDcfabiS0iCRbgpVVXAAbhmJPTp15xe/Zt0uR9c1rVtp8qK2W2DY6s7BsD6bP1FZGjfs++Lb6Zf7Tls9Mgz8xaTzXx7BeD+LCvpDw/oGn+GdFt9J0yARWsC4A6lm7sx7knnP8qANCWJJomilRXjcFWVhkMDwQR3FeF+I/2cree4efw7q/2ZGORbXalwv0cc4+oJ9693ooA+Zof2cPFDOPP1bR0T1jeVz+RQfzr2H4bfDmP4eWF5Aupy30l2yM5MYjRSoP3Rknv3PYV3FFABRRRQBxXxA+GekePraNrl2tdRgUrDdxqCQOu1h/EueccY5weTnxp/2cfFXm4j1XR2jzwzSSg4+mw/zr6aooA8++F/w3l+HltfibVBeyXvll1SLYibN3TJOc7uvHQVtL8RPBsiO3/CTaYuwkMr3Ko2R/snB/Sunr5JufgZ4+t5THFpcNyvaSK7jA/8eYH9KAOZ8ca3aeJfGmq6vY232e3upt0aYwTwBuI9WxuPuTX1D8IvCjeE/ANrDPGUvrsm6uQRyrMBtU/RQvHrmvPvAXwCntNSh1PxXLA6QsHSwhO8OR08xsYx7DOfXtXvlABXwDX39XyD/wAKT+If/Qv/APk7b/8AxygDqLv9m/xGk5W01jSpYc8NMZI2x9ArD9a73wH8DdL8LX0OqardDU9QiIeFfL2xRN64OSxHYnGPTODXrGc0tACYrlvGnw+0LxzZLFqkLLcRAiG6hOJI/bPcexzXVUUAfNV3+zf4iSU/YtZ0qaPs03mRn8grfzq5p37NmoPIp1TxDbRJnkW0LSE/i23H5GvonFFADXdY0Z3IVVGSSeAK+CrW1lvbuG1t1LzTyLHGo/iZiAB+Zr7j8R6Odf8ADt/pIu5LQXkJhaaNQWVTwRg+oyPoa8v+H/wSk8I+MxrF/qNvfQ28bfZQkZRhIeNzA5AwM45PJ9qAPZOlFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVBeXkGn2Vxe3UgitreNpZZG6KijJP5A0AT0V53B8avBt5rVlpdjdXd5PeXEdvG0duyqGdgoJ37eMn/61eiUAFFFFABRRRQAUUUUAFFFFABTfrinV8a/Fa7v7n4ma6NQZy8VyYolY8LEPuAD0K4P4570AfZI/KlrzX4E3V/dfDK3N8zssc8kdsznJMQxjn0DbwPpXpVABSYA6ClooAKKKKACiiigAorJ8QeJdI8Laet/rV6tpbNII1cqzZYgnGFBJ4B7dqwfCvxP8O+Mtdm0nRmupZYrdrhpJIdibQyrgZOc5YdqAO0ooooAKKKKACkOQcUtfIfxvlkb4ta1GzsUQQBVJ4X9xGeB2oA+u+aM8cHmuB+CX/JIdC/7eP/SiSvIv2jpZB4+sIhI3l/2XG2zPGfNl5x60AfTYOR0xQTXkP7OX/JPL/wD7Csn/AKKirlP2kbq/GtaNalnGnfZmkQA4VpdxDfUhdv50AfRINLXzr+zddX51jWrQM5077OsjDPyrLuwuPQld312j0FfRVABRRRQAUUUUAFFFFABRRRQAUUV5f4z+Nmk+D/EV1oj6Xe3V3a7PMYMqRncgcYOSejDsO9AHqFFeBN+0wobC+EmK+p1HB/8ARVbWk/tE+G7yVY9T0++08k8yDEyL9cYb8lNAHsdITUVpd21/aRXVpPHPbyrujliYMrD1BHWvnv8AaRur8a1o1qWcad9maRADhWl3EN9SF2/nQB9Eg0Hivnb9m66vzrGtWgZzp32dZGGflWXdhcehK7vrtHoKvftMSyJH4ZjV2VH+1FlBwGx5OMigD3vmjP514D+zL/zNP/bp/wC1q6P9ouWSL4e2PluybtUjVtpxkeVLwfagD1sc8EH60tfMP7OP/JQtQ/7BUn/o2Kvp6gAooooAKKKKACiiigAooooAKKKKADFFUdY1iw0HS59T1S5W2soMeZKwJC5IUdOepA/GuR0T4u+FfEXia00LSpru4ubovsk8gpGNqMxzuweinse34AHeUYFFFAB0ooooAKKKKACiiigAowKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKwPHX/JPPE3/YKuv/RTVv1geOv+SeeJv+wVdf8AopqAPjXw1qMOj+KdI1S4V3gs72G4kWMZYqjhiBkjnA9a+iv+GjfB/wD0Ddc/78Rf/Ha+cdB0wa14h0zSvO8k3t3Fb+bt3bN7hd2MjOM5xkV7d/wzP/1N3/lN/wDttAHu17eQadYXF9dSCK2t4mmlcg/KijLHjngAmvI9U/aM8OWzsmnaXqF7g4DvthVvpyT+YFejeO/+SeeJf+wVdf8Aopq+MvDumx6z4m0nS5XaOO9vIbd3TqodwpIz35oA9zg/aWtmkxceF5Y09Y70OfyKCvUte8b2HhzwfbeJruyv5LKZYmMcKKZIxIMjcCwHUgHBPJrz+6/Zw8ONCRZ6xqsUuPvS+XIPyCr/ADr1PX9Gh17w3qGjzALHd27wg4+4SOCPocH8KAOM8L/Gnw14t8Q22i2VrqcFzc7vLe5ijVCVUsRkOTnAPavRq+DtL1GfSdXs9Stjie0mSePP95WyP5V9ueJtYTQPC2p6uSv+i2zypnozAfKPxOB+NAHm7/tFeEFdl+wa22CRuEEWD9P3ter2dyL2xt7oRSRCaNZBHKAGTIzhgCRkZ55NfDOg6TLruv2GlQZ8y7uEhBAzjcQCfoBk/hX3bQAV5T46+J/gbSvEU+heIvD9xqVxZFTuazhmQbkVxt3uD0YdhzXq1fIPxt/5K9rv/bv/AOiI6APqXwt4hsvFnhq01vT4porW53+Wk6hXG12Q5AJHVT3rlPF3xl8N+D9Yn0i6hv7m+gC+YkES7V3KGGWZh2YdM1L8Ev8AkkOhf9vH/pRJXgHxs/5K9rv/AGw/9ER0AeiTftLQKx8nwrI69i98F/lGa3fDv7QPhvVrtLfU7S50p3IAlkYSRA/7TDBH1xj1IrlPh58FvD/i3wNp2uX9/qcdxdeYGSCRFQbZGQdUJ6KO9eXeOvCU3grxXc6NNN56IFkhm27fMRuQcdu4+ooA+2a5nxh460LwRZpcavcsJJM+VbRDdLLj0HAA9yQKwfglrUus/DSy89zJLZSPaFjySFwVH4Kyj8K+cfiXeXF98SvEMtyxLpfSQrk9ERiqj/vkCgD1iT9paES4j8KyNH/ea/AP5CMj9a7fwL8XdH8c6qdMtrC+tbxYjKRIFZNowD8wOe47d64/4cfDv4a+JfCdjK4XUdTaFWuka8dJIpMfMNisuADwCRyO5r0Dwb8NND8D6ne3ukNdE3UaxlJ3DiMA5wpwDzx1z0oA5P8AaO/5J7p//YVj/wDRUteN/CbxrpvgTxTdapqkF3NBLZPbqtqiswYujZO5lGMIe/pXsn7R3/JPdP8A+wrH/wCipa8Q+HHgb/hYHiG40r+0fsHk2jXPm+R5ucOi7cbl/v5zntQB71pPx78LazrNjpdtYayk95cR28bSQxBQzsFBJEhOMnng16B4i1608MaBd6zfJM1taqGdYVDOcsBwMjufWvJNC/Z6/sTxDpmq/wDCUed9iu4rjyv7P279jhsZ8w4zjGcGvW/EekL4g8N6lpDuIxeW7wiQru2EjAbHfBwce1AHjt1+0rZo5Fr4ZnlXsZbsJ/JWrS0L9ofw9qFykGq6fdaZvOPNDiaNfqQAw/AGm2/7OPhlYR9o1fV5Hx1jaNB+RQ/zrxP4heDJfAvil9Jef7RE0SzwTbdpeMkgZHYgqR+FAH2kCCMg8H0r5B+Nv/JXtd/7d/8A0RHXuPwG1WXU/hlBFKxY2NzJaqScnaMOB+AfH0FeHfG3/kr2u/8Abv8A+iI6APf/AIJf8kh0L/t4/wDSiSvIP2jv+Sh2H/YKj/8ARstev/BL/kkOhf8Abx/6USV5B+0d/wAlDsP+wVH/AOjZaAO//Zx/5J5f/wDYVk/9FRV1HxF8c+HvB9tZW3iHTbi/g1DzAsUcEcqfJtzuDsB/GMda5f8AZx/5J5f/APYVk/8ARUVc/wDtM9PC/wD29/8AtGgD0j4deOfD3jG2vLbw9ptxYQaf5YaKSCOJBv3Y2hGI/hOenarnjT4gaL4DgtZNX+0s91v8mO3j3M2zbu6kAfeHUjrXlf7MvXxT/wBun/taj9pr/mV/+3v/ANo0AWbn9pWyR8Wvhm4lX1lu1jP5BW/nU+mftI6RPMq6noN3aITgvBMs2Pcghf0rz74Q/DnS/iAdY/tO6vIFsfJ2C1ZVLb9+c7lP9wfnTPit8L18ANZXVjdzXWnXTNGDMo3xuOcEjAORkjgdDQB9V2V7bajZw3lnPHcW0yB45Y23KwPcGpzjBzjGO9fPv7N2tS/atZ0N5CYTGt3En90g7XI+uU/KtD9onxRPZ2On+G7aQoLwG4utpwWjU4RfoW3H/gIoA1/EH7QPhnSrh7fTLa51Z16yR4jiJ9mPJ+u3HuawoP2lrdmH2jwtKi9yl6GP6oP515z8M/hpcfEG+uHe5NpptptE0yruZmPRVB4zjPPbj1ro/ix8J9F8C+GLXVNMvdQmllvFtmS6dGGCjtkbVHdBQB7J4K+Kfh/x1fS2OmR3sN3FCZmiuIgPkDAEggkdWFdxXzB+zj/yUO//AOwVJ/6Nir6foAK+Qfjb/wAle1z/ALd//REdfX1fIPxs/wCSv65/27/+iI6AOj+E/wAKND8d+FrrVdTu9QhlivXt1S1dFUqERsncrc5Y1ifFD4WSeADa3ltem8025cxqzqFeNwMhWxwcgEgjHQ5Hr2HwW+Inhbwl4NvLDXNU+y3UmoPMsf2eV8oY4wDlVI6qfyrA+MPxSsfG8NnpWjwzCxtpfOeaZdplfBAwOwAZuvJz0GOQCT9n3xBPYeOX0UyE2uowv+7zwJEG4N/3yGH417V8RfHPh7wfbWVt4h024v7fUPMCxRwRyp8m3O5XYD+MY614Z8AdHmvviPHqCo3kadbySO+OAzqUUfU7mP8AwE11f7TX/Mr/APb3/wC0aAPSPh1458PeMba8tvD2m3FhBp/lhopII4kG/djaEYj+E56dq83/AGmenhb/ALe//aNH7MvXxT/26f8Ataj9pr/mV/8At7/9o0AH7Mv/ADNP/bp/7WroP2jv+Se6f/2FY/8A0VLXP/sy/wDM0/8Abp/7WroP2jv+Se6f/wBhWP8A9FS0Aef/ALOP/JQtQ/7BUn/o2Kvp48CvmH9nH/koWof9gqT/ANGxV7v8RPEx8I+BtS1aMj7SqCO3/wCujHap/DO76CgDL8ZfFrw14Mna0uJZLzUVxutbUAlPTcScL9OvtXB2/wC0raNOFufDE0cOeXivA7Y/3SgH614l4X8O3vizxHaaNY48+4bBdukagZZj7AD+nevb9e/Z0sE0WWTQ9UvX1GNCyx3WwpKR/CMKNufU5oA9f8O+JtH8V6aL/Rr1LmDO1gMhoz6MDyDXP+Nvido3gK+trXVrHUpTcxmSOS1jRkODgglnBz07dxXyt4P8TXXhHxRZaxauw8lwJkU/62I/eQ/UfkcHtX0X8f8AQ/7T+Hv9oRpum0y4WUnHPlt8jD8yp/4DQB1ngjx5pXjzT7q80qK6iW3l8p47lVVskAg4VmGDkjr2NS+NPGmmeBdHi1PVEuJIpZhAqWyqzliCc4ZlGMKe/pXg37O2sfY/G95pbvhL+0O0eskZ3D/x0vVj9o3W/tXinTdGR8pY25lkAPR5D0P/AAFVP40Ae2eCPHmk+PLC5u9KiuoltpBE8dyqq2cZBG1mGPx7Gjxt480nwFp9tearFdTLcy+Ukdqis2cZJIZlGPx7ivCv2d9Z+xeN7vS3fCahanaPWSM7h/46Xo/aJ1j7Z44s9MR8pYWg3D+7JIdx/wDHQlAHs/gn4oaN49vrq10qx1KI20Qkkkuo0VeTgAFXY5PPbsea7ccivK/gFoX9l/DwX8iYm1Odpskc7F+RR+jH/gVeqUAcB8bf+SQa7/27/wDpRHXzT8O/EVn4S8dabrl/HPJbW3m70gALndE6DAJA6sO9fS3xt/5JBrv/AG7/APpRHXzD4I8NDxh4vsdBF39k+1eZ+/8AL8zbtjZ/u5Gc7cdR1oA9+/4aN8H/APQN1z/vxF/8dr1+vAP+GZ/+pu/8pv8A9truvjP4sm8K+BZBZSmO+1CQW0TqcMgIJdh+Axn1YGgCHxd8bPDPhe7ksYfN1S9jJWSO2I2RnuGc8Z9hn04rlrH9pPTpbhV1Dw3c28JPLwXKykfgVT+deP8AgHwVd+O/EaaZbSiCFEMtxcFc+Wg44HckkAD/AAr03xx8ArbSPDtzqnh6/vLiW1jMsttdbWLoBklSoHIGTgjnFAHvOkavp+u6bFqGl3cV1aS/ckjOR9D3B9jzV6vjv4UeLJvCvjqwYyEWV7IttdJngqxwGP8Aukhs9eo719iYxxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjrn4e+JR/wBQq6/9FNW/VbULOPUdNurGb/V3MLwv9GBB/nQB8VeBePiD4aJOANVtcn/tqtfbxr4JkiuLC7eKaOSC5gk2srAqyMD0x1BBr1F/2hfGbW3kiDSUfGPOW3Yt9eXK/pQB9C+O/wDknniX/sFXX/opq+QfAn/JQvDX/YVtf/Rq19Y+JL9dU+Eer6gn3brQppxj/agLf1r5O8Cf8lC8Nf8AYVtf/Rq0Afb1FFFAHyD8Y/C58NfEK9MabbTUP9MgwOBuJ3j8GDcehFbHiX4grq3wO0LQxNm+8829yuefLgAK5+oaPn1U17R8XvCX/CWeBbkQR77+xzdW3HLYHzJ+K549QtfIHtQB7R+zr4dN54nvtelTMVhF5URI/wCWsnBI+ihv++hX0rXKfDfwz/wiXgXTdNkQLclPOueOfNfkg/Thf+AiuroAK+Qfjb/yV7Xf+3f/ANER19fV8g/G0f8AF3tc/wC3f/0RHQB7/wDBL/kkOhf9vH/pRJXz/wDG3/kr2u/9u/8A6Ijr6A+CX/JIdC/7eP8A0fJXgHxsGfi9rn/bv/6IjoA9/wDgn/ySHQv+3j/0okrx/wDaNGPiHYf9gqP/ANGy17B8E/8AkkOhf9vH/o+SvH/2jv8AkoVh/wBgqP8A9Gy0Aegfs4/8k91D/sKyf+ioqb8Vfg3L4s1Btd0GWKLUmULcQSnak+BgMG7NgAc8HA6d3fs48fD2/wD+wrJ/6KirktS+PuvaT4t1a2jtbG+02G8kjgDgo4RWIHzA45x3FAHkGseH9X8PXP2fV9NubKU/dE0ZUN7qejD3Ga9O+C/xH1TT/E9l4d1C8ludMvG8iJZWLGBz93aT2JwMdOc9uZvE/wAf5/EPhu/0iLw5FbfbIWheWS6Mu1WGCQuxefQ544rk/hD4du9f+IulvDGxt7CZbueUDhAh3KCfUsAB+PpQB7J+0d/yT2w/7Csf/oqWvP8A9nIgfEO/BPXSpAP+/sVey/F3w5P4m+HV9a2kZkurcrdQxqMlinUD1O0tgdzivk/w/r2o+GNat9W0qfybuEnaduQQRggg9QR/nNAH3VmvHPjr8QL/AMN2lnoWj3D215eoZZriM4eOLOAFPYkg89Rj3rg4f2gPFl3qVks66da2onj8/wAiA5ZNw3DLs2MjPTmtz9o7w9dNeaX4ijjZrYQ/ZJmHSMhiy5+u5vyoA8z8GfDfxB47WeXSkgS3hbY9xcyFU3YztGASTjngdx61T8Z+DdT8DaxFpeqSW0k8kAnVrZyy7SzKOSAeqmr3gv4l+IPAkdxBpTW8ltO294LmMsm7GNwwQQcY79hVLxp401Hx1rMWqapFbRzxW626rbIyrtDM3QsTnLHvQB73+zj/AMk91D/sKyf+ioq8g+Nv/JXtd/7d/wD0RHXr/wCzjx8PdQ/7Csn/AKKirzb4/aJcWHxDfVGjb7NqUCMkmONyKEZfqAqn/gQoA9p+CR/4tFoY7/6R/wCj5K8g/aO/5KHYf9gqP/0bLVP4O+OfEWm+ItN8MWcsUmm3l0N8U0ZbyweXKEEEHAJ9M9q6L9pDQ7garpGupGWtmtzaO4HCMrFlB+u5sfQ0AdT+zkcfDzUP+wrJ/wCioq5/9pnp4W/7e/8A2jXmnw+8ceIvCmqLZ6JJE0d9Mkb286b0ZicA8EEHnsa9L/aYHHhfp/y9/wDtGgA/Zl6+Kf8At0/9rUftNf8AMr/9vf8A7Ro/Zm4/4Sj/ALdP/a1L+01z/wAIv/29/wDtGgBP2Zf+Zp/7dP8A2tXQftHD/i3unn/qKx/+ipa5/wDZm4/4Sn/t0/8Aa1dB+0dz8PbD/sKx/wDoqWgDgP2cf+Shah/2CpP/AEbFVn9pGzkTxhpF6QfLmsPJUnplJGJ/9GCq37OQ/wCLg6h0/wCQVJ/6Nir3rxv4NsfHHhyXSrxjE+RJBcKMtDIM4IHcc4I7g9utAHlH7O/inTLXStQ8PXVzFBdvdfaYRIwXzQyqpC56kbBx159jWp+0ZfWjeCdPshdQm7/tJJfIEg37BFIC23rjLDn3FeN6t8K/G2kXLxS+Hry4APElmnnqw9RsyfwIB9qzNV8GeI9C0dNU1bSbiys5JlhU3GEZnIY42k7uinnFAHf/ALOP/JQ7/wD7BUn/AKNir6fr5h/ZxB/4WFqBxwNKkB/7+xV9PUAFfIPxt/5K9rn/AG7/APoiOvr6vkH42/8AJXtc/wC3f/0RHQAzwV8J9d8eaNNqml3enQwRXBt2F1I6sWCqxICoRjDjv613ulfs2XBlVtZ1+NYwfmjs4Sxb6M2Mf98mul/Zy/5J5qH/AGFZP/RUVev0AZ2h6FpvhzSotN0m0S1tY+Qi9z3JPUk+prxH9pnp4X/7e/8A2jXv9eA/tMjI8L/9vf8A7RoAT9mXr4p/7dP/AGtR+01/zK//AG9/+0aP2ZuP+Eo/7dP/AGtWv+0ZolxfeGtL1aCNnTT53WbA+6sgUbj7ZRR/wIUAZH7Mpx/wlHv9k/8Aa1dB+0d/yT2w/wCwrH/6KlrwHwl4y1nwVqbX2jzqjyJsljkXcki9cMPr6YPvX0n8ctDuNZ+Gs7WsZkksbhLsooySqhlY/grE/QUAeVfs4/8AJQr/AP7BUn/o2KvRP2iI3k+HNsyAlY9SiZ/YeXIP5kV84+HvEOp+F9Yh1XSZ/JuosgHAIYHqpB6g19j3+j/8Jh4COm6sqRy6hZJ52xeI5SobKg/3XwQD6CgD58/Z5nhi+JEySEBptPlSPPdt6Nx/wFWr6lyP618JMmqeG9b2ss9jqdlKDg5V43HSu41343+MNe0aTTJXs7SKVCkslpEVkkUjkEljjPtigDzevvXULKHUtOurG4XdBcxNFIvqrAg/oa+Rfhb4DuvGnimBpYG/si1kEl5KR8rAc+WPUt0x2GTX2FQB8NeGNWk8OeLNM1TDA2d0kjr3Kg/Mv4jIrUmab4hfFBiu7/ibaiAvqkRbA/75T+VbPxs8Of2B8RryaNNttqQ+2R46bmJDj/voE/iK6f8AZz8PfavEOo6/KmY7KEQQkj/lo/Uj6KCP+B0AedQmfwD8S137t2k6lhvV0V+fwZf51n+JtWk8SeLNS1UBmN7dPJGvUhSflX8BgV6l+0X4f+x+JtP12JMR30JilI/56R4wT9VKj/gNc98EvDZ1/wCIlrcSR7rbTF+1yZHG4HCD67iD/wABNAH1bY2cOnadbWNsuyC2iWGNfRVGAPyFWKKKAOA+NnPwh13/ALYf+j46+f8A4J8fF3QiTgfvx/5Akr6h8a6G3iTwXq+jx4825t2EWTgeYPmXJ7DcBXxVp99daTqNvfWcjQ3dtIJI5B1VgcjigD7zzivBf2mInNt4alGfLV7lW+pEZH8jXJXf7QnjS4tzHFHpdq3TzYbdi3/j7MP0r3z4ieEV8beDrrSlKpdAia1kbosq9M+gIJU/71AHkv7NM8K3viSBiPOeO3dB32qZA36sv6V79eXENrY3FxOQIYo2eQnptAyf0r4c0rVdU8L63HfWEstnf2rkdMFT0KsD+IINdX4r+L/inxhpbabePa2tm/8ArY7SMp5uOQGLMTj2zQBwtvFJNdRRRZMjuFXHXJPFffNfLnwQ8BXOt+JYPEN5AV0vT38yNmHE0w+6F9Qp5PuAK+o6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigApCM0tFAGNrvhPQfE0aprOlW15tGFeRfnUezD5h+BrAtfg94BtJRJH4dhZgc/vZpJB+TMRXcUUAQNZWr2LWLW0LWjRmJoCgMZQjBUr0xjjHTFZUHgvwta3EVxb+GtGhnicPHJHYRKyMDkEELkEHnNblFABXN+PPE7eD/Bt9rccSTSwbBHE5IDszhccfUn8K6SsbxN4X0rxfpH9maxA8ttvEihJGQqwBAOQfQnrxQB4D4j/AGhtU1jRZ7DTdHj02WdDG9z9pMrKp67RtXBx35x9a4/4YeCpfGvjCCB4idNtSJr18cbAeE+rEY+mT2r26L9njwbHMJHudXlXOfLe4TafyQH9a9N0zSrDRtPisNNtIrW0iGEiiXAHv7n36mgC5RRRQAVjX3hLw5qd495f6BpV1dSY3zT2ccjtgYGWIJPAA/CtmigCvY2FnplnHZ2FrBaWsedkMEYjRcnJwowBkkn8azr7wl4c1O8e8v8AQNKurmTG+a4s45HbAAGWIyeAB+FbNFAFexsLPTLNLSwtYLW2jzshgjEaLk5OFAAGSSfxqnqXhrQtZuFuNU0TTb6dUCLJdWqSsFyTgFgTjJJx7mtSigClpukabo1u1vpen2ljAz+YYrWFYlLYA3YUAZwAM+wrI1f4f+E9dkaXUdAsZZWOWlWPY7fVlwT+ddJRQBwsHwb8AW8gdPDsZIOcSXEzj8mciu0tbS3srdLe1giggQYSKJAqqPYCpqKACuX1v4c+EfEVy9zqehWstw/LyoDE7H1LIQSfrXUUUAchpvwt8EaTMJrTw5Z+YpyDPumx9N5NdcQCCMDFLRQBxV78JPAeoStLP4ctlYnJ8h3hH5IwFJa/CLwFaOHj8OW7Ef8APWSSQfkzEV21FAFax0+z0y3FvYWlvawA5EUEYRc/QcdqfcWsF5bPb3UMc8Eg2vHKgZXHoQeoqaigDldK+G/hHQ9ai1jTNFitr6LdskSSTC7lKnC7tvQkdK6h0WRGR1DIwIZWGQQe1OooA5CL4XeC4NWt9Ug0GCG8t5VmieF3QK6nIO0MF4I9K39U0HSNb8r+1dKsb/yc+X9qt0l2ZxnG4HGcDOPQVoUUAZ2maDpGieb/AGVpVjYGbHmfZbdIt+M4ztAzjJ/M0anoOka35X9raVY3/k58v7VbpLszjONwOM4HT0FaNFAGfpehaRonm/2VpVjYedjzPstukW/GcZ2gZxk4z6mn6lpGm6zbLb6pp9pfQK4dY7mFZFDAEAgMCM4JGfertFAGXpvhrQtGuWuNL0XTrGdk2NJbWqRMVJBxlQDjIHHsK574qeLLrwb4Il1OwaNb1p4oYTIu5clstkd/lVq7WsnX/DOjeKLEWWtWEd5bhtyq5KlT0yCCCDye/egDw61/aWvEjAu/DEEsndorwxj8ijfzrzzx98SNV+IF3A15FFbWdtnybaIkgE9SSfvHgDPHH1Ofcrj9nnwZM+6OfVrcf3YrhCP/AB5Ca1NG+CPgfR5lmOny38iHKm9l8xfxUAKfxBoA5T9nfwvcWGl6j4huoigvtsNruGC0aklm+hOAP9017fRiigArGvvCXhzU7uS7v9A0u6upMb5p7OOR2wMDLMpJ4AHPpWzRQBT03SdO0a3a30uwtbGBnLtHawrEpYgDJCgDOABn2FXKKKACs/VNB0jW/K/tXSrG/wDJz5f2q3SXZnGcbgcZwM49BWhRQBnaZoOkaJ5v9laVY2Bmx5n2W3SLfjOM7QM4yfzNaGOOtLRQBxt98KPAuoXHnz+HLRXzn9yXhH5IQP0rsqKKAOS1X4Y+C9anae98PWjSucs8W6EsfUlCM11uKKKAM/VtC0rXrX7Nq2n217COQs8YbafUE9D7iuWt/g74BtZxNH4dhZwc4kmlkX/vlmI/Su5ooAYkSRIqRqqooCqoGAo9AKfRRQBn6noek60sa6rpllfLGSYxdW6yhCeuNwOM1Jp2lafpFsbbTbG2soCxYxW0KxLuPU4UAZ4H5VcooAp6jpOnaxbrb6nYWt7ArBxFcwrKoYd8MCM8nn3pmmaJpWipImlaZZ2KyEF1tYFiDEdM7QM1fooAKKKKAEIyc1ga94G8M+J5PN1jRra6lxjzSCkmPTepBx+NdBRQBxdj8JPAenyiSDw5bMw/57u8w/J2IrswABgcD0FLRQBj654V0LxJEses6Va3oThGlT51Hsw5H4GsKx+EfgPTpxNB4ctmYHOJ3kmX/vl2IrtaKAEAwMCloooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigArzTUPjv4Gss+TfXN6R2t7Zv5vtFemCvgCgD7+ooooAKKKKACiiigArF8VeJrDwj4fuNY1F8QxDCoD80rn7qL7n/AOv0Brar5B+L3jF/FvjW4WGUtp2nsba2AOVOD8zj/eI6+gFAH074T8aaJ4004XmkXQcqB5sD/LLCfRl/qMg9jWNrXxg8FaFeXFnc6q0l3byNHLBDbuxVlOCM425BHrVP4N+DV8K+CoZ54wNR1JVuJyRyqkfIn4A5+rGvmfx3/wAlD8S/9hW6/wDRrUAfaOk6lFrGj2OqW6usF5bx3EayABgrqGGcEgHB9T9au1z/AIE/5J54Z/7BVr/6KWugoAKKK+cfGHx28U6d4l1bSLC306CKzvJrdJTEzuQjlcnLY7elAH0dRWR4UvrnU/B2iahePvurqwgmmfaBudo1YnA4HJNa9ABRXzj4v+O3irTvEmraTYW2nQRWd5NbpKYmd2COVBOWx29K938KX1xqfg7Q7+7k8y5utPgmmfaBudo1LHA4HJPSgDXooooAKKKKACiiigAooooAKKKKACszxFrUHhzw7qGsXI3RWcDS7c43kDhc+pOB+NadeGftG+IvI0zTPDsT/PcSG6nAP8C8KD7Elv8AvmgBNL/aIuNW1az0228IZnupkhj/AOJj3YgD/ll717pXzL+zz4d+3+L7vW5UzFpsO2Mn/nrICB+Sh/zFfTVAGX4j1uDw54d1DWLgborOBpducbyB8q57ZOB+NeP6V+0Pcavq1np1r4R3T3UyQxj+0e7EAf8ALL3pf2jvEPkaVpfh6J8PcObqcA/wL8qg+xJY/wDAK5j9nfw99v8AF15rcqZi02HbGT/z1kyP/QQ/5igD6aooooAKhurqCxtJru6lSG3hQySSOcKigZJJ+lTVzvjvQJvFHgjVdGt5FjnuYh5TMcDerBgD7EqB+NAHN+HvjX4R8Ra1HpUMl3azzPsge7iVElbsAQxwT2zjnjrXo1fH/hb4VeLdW8R21tcaNe2NskqtPcXUTRqiA8lSR8x9AM5+nNfV3iLUpNH8MatqkSK8llZzXCo3RiiFsH8qANKivmjQvjd4x17xlodg8llbWt1qEEMsdvb/AHkaRVYZcsRwTyK+l6AIL27g0+xuL26lEVvbxtLLI3RFUEkn2AFcFB8avBt5rVlpdhc3d3PeXEdvG0dsyrudgoJL7eMn3rpPHf8AyTzxN/2Crr/0U1fIPgT/AJKF4a/7Ctr/AOjVoA+3qKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIri4itLaW5ncRwxIZJHPRVAyT+VeDL+0uzMFXwgWJOABqPX/yFXV/HvxD/ZHgA6fG+LjVJRCBnkRr8zn/ANBX/gVeMfBXw9/b3xJsXkTdb6eDeSZHGV+5/wCPlT+BoA+uh0560UUUAFFFFABRRRQAUUUUAFFFFABXlXxS+L0Xg2Q6RpMcd1rJXdIZOY7cEcbgOrEc4/E+h9Vr4Iubqa8u5rq5kMk8zmSR26sxOST+NAHQ3PxG8aXdwZpPFGqq2c4iumjX/vlSB+ld74F+O+s6ffw2fieX7fpzsFNyVAmhHrx98DuDz6HtXo+l/Er4TaLpSaZp+owQWipsMa6fN8477v3fzE9yetfN3ixNFj8U6gPDtx52kNLutn2MuFIB24YBuCSvPpQB9x18g6l8Z/Heou3/ABOjaof+WdrCiY/4Fjd+tfSHwt1GTVfhloF1K26QWwiLE5J8slOf++a+L6AOjPj7xiX3HxVrWfa+kx+W7Fd/4H+O2t6dqMFr4lnGoaa7BXnZQJoR/eyMbgO4Iz6GvpWW0tprdraW2ikgZdpiaMFSPQg8V8M6/p40jxFqemhiwtLuWAE9Tscr/SgD7s7Zr51+Mnj7xf4f8b3Gk6fq8lpp5hjliSKNA3K8/NjP3g3evdPCt2994P0S8kOZLiwglYnuWjUmsLxd4p8DeGNQS719rE6oIwkYFuJbjZkkDgEqMk4zgcmgD5ZX4geMVk8weKdZJ972Qj8s4r2L4VfGjUNW1m38P+JnjlkuTstr1VCsX7I4HHPQEAc+uci1rPxo+HnibTptL1TSdTe3mQp5klrGdmR95TvJBHByBXzrbztbXUU6Eq8Th1I6gg5oA+2fHf8AyTzxL/2Crr/0U1fFOn31xpepWuoWjhLm1mSeJiAdrqQynB4PIFfa3jv/AJJ54l/7BV1/6KavjzwZBDdeOvD9vcRJLDLqVskkcihldTKoIIPUEdqALUnxD8Zyz+a3inV9+c4S7dV/75Bx+lerfDT44X1xqdvoviqWOWO4YRw6hgIyOeAJMcEHgbsDHfPb3W90nTtRsHsbywt57VlKmGSMFcfTHFfDerWD6VrF7p0hy9pcSQMcYyUYqT+lAH3iOpr4Br7x0S//ALU0HTtQIx9qto58em5Q39a+DqAPv6vKvin8XYvBkv8AZGkxxXOssoLmTmO3BHG4d2Pp6cntn1U18EXNzLe3ctzO5eaZ2kkc9WZjkn8yaAOhufiN40u5zNJ4o1ZWPaK5aNf++VIH6V3vgb476zp9/FaeJ5ft+nOwU3OwCaEdM/LjePUHn0PGK9G0r4l/CfRNKj0zT9SghtETYUXT5/m4xlv3fzE9yetfN/ixNFj8UaiPDs/naQZd1s21lwpAO3DAH5SSvPXH40AfcQ7dc0tcj8LtRk1X4Y6BdSsWcW3kkk5J8tjHz7/LXXUAZviDVBonhzU9UbH+iWsk4B7lVJA/EiviTQtLfWfEGnaWhIa8uY4AR23MBn8M19Z/GWVofhLrzKcEpEv4GZAf0NfO3wahWb4taCj4IDyv+KwuR+ooA+w6+IfHf/JQvEv/AGFbr/0a1fb1fEPjv/koXiX/ALCt1/6NagCQePfFqWltaw+I9TggtoliijguWiVUUAKMKR2Heur8K/G/xToV5GNTu31bTycSRXGDIB6q/XP1yP519E+ArS2T4eeHglvCol0y2Z8IPmJiUkn1zXyl8StFt/DvxE1rTLRBHbRzB4ox0RXUOFHsA2B9KAPtCKSOaNJYnDxuoZXU5DA8gj1r4l8df8lD8Tf9hW6/9GtX1N8H7uS9+FGgSyEllieLJ9EkdB+iivlnx1/yUPxN/wBhW6/9GtQBr2Pxe8daZp1rYWeu+XbWsSwwp9kgbaigBRkoScADrXv/AMFPFGs+LfB13f65efa7qPUHhV/KSPCCOMgYQAdWP51c8GeDfC114F8P3Fz4a0eaeXTbZ5JJLGJmdjEpJJK5JPrXX6bpOm6Pbtb6Xp9pYwM5do7WFYlLEAZIUAZwAM+woA+LfHf/ACULxL/2Fbr/ANGtWxYfF7x3pmnW1hZ675draxJDCn2SA7UUAKMlMnAA61j+Ov8AkoXiX/sK3X/o1q+ovBng3wrdeBfD9xceGdGmnl0y2eSWSwiZnYxKSSSuSSec0AU/gr4o1nxb4NvL/W7z7Xcx6g8Kv5aR4QRxkDCgDqxPTPNeReOPi740g8WazplpqotLW0vZreNYYEDbUdlGWIJzgetfTGm6Tp2j2zW2mafa2MDOXMdtCsaljjJwoAzwOfavi7x3/wAlD8S/9hW6/wDRrUASP4/8Yu+4+KtZz14vZAPyBxXaeE/jx4k0e5jj1xxq1gWAfeoWZB6qwxuPs2c+or6C8CwxH4deHFMSFW0q2LAqMHMS5zXzp8bfBdn4T8V28+mQrBYajG0iwr92N1OHCjsOVOO2T0GKAPqi1u4L20iu7WVJoJkDxyIcq6kZBB9MV458WPjHdeG9Rk8P+HfLF/GB9ou3UOISRkKgPBbGCSQQM4+j/wBnLWJbvwpqmlyMWWxuVePJ+6sgJx9Nysf+BGvFfiZYT6d8SvEMVwrK0l9JOpPdXJdSPwagCvJ8QfGUkpkbxTrAbOcLeSKPyBxXr3wP8a+LfE3ia6sdU1V7zT7e1MrCWNS27coUb8Z7k8ntVbwF8eNO0jQrDRdd0yaNLOFYEubXDBlUYBZDjBx1IJz6V7jouuaR4hs/t+j30F5C2FMkTZI9mHUHnoaAPM/jN8T9T8G3Vno2iCOO9uIftElxKgby0LFVCg8Ekq2cg4wOOeD4M/E/U/GdzeaRrYjkvbeH7RHcRoE8xNwVgwHGQWXpjg9OOen+IXwz0z4gwW7XE8lnfWwKxXMahvlPVWXjIzyORgnryRR8Pfhnpnw/gneCeS8vrgBZbmRQvyjnaqjO0Z56nPHPAoA7ivin4ieIf+Eo8eatqavugaYxQY6eWnyrj6gZ/Gvrnxnr6eGPB2qawWAe3gYxA95D8qD8WIr4w8P6PNr/AIh0/SYMiS7nSLI/hBPLfgMn8KAPrX4SeHv+Ec+HGlwOm24ul+1zf70mCAfcLtH4V29ArC8Z68vhjwdqusFgHt4GMWe8h+VB/wB9EUAfI3xD8Q/8JR471bU1fdA0xjg548tPlXH1Az+NfT3wk8Pf8I58ONMgdAtxdL9sn7EtJyM+4XaPwr5M8P6PN4g8Q6fpEGfMu50i3AZ2gnlvwGT+FfXXxV1KXSfhhr11AxWTyBCGHBHmMsZx7/NQB5H47+PmoT3s1j4S2W1ohK/bpEDSSkcZUHIVfTIJ6HjpXmTfEDxk0nmHxVrIY9heyAflnFL4B8Px+KfHOk6NOxWC4lJlwcEoqlmAPuFIr7QgsbS2t1toLWGK3AwIo4wqgemBxQB8k6b8ZvHenOv/ABO2uUHVLmJHB/HG79a+p/Fl7cab4O1y+s5PKurXT55oXwDtdY2KnB4OCB14r4aNfb3jv/knniX/ALBV1/6KagD538KfF3x1qfjLQ7C713zLa61CCGZPskA3I0ihhkJkcHHFfQ/jr/knnib/ALBV1/6KavkHwJ/yULw1/wBhW1/9GrX1/wCO/wDknniX/sFXX/opqAPimwvbjTtQtb+0k8q5tpVmhkwDtdSCpweDyO/Fdv8A8Ls+If8A0MP/AJJW/wD8brnPBcMV1458PW9xEk0MmpWyPHIoZXUyqCpB4IIPSvsH/hBfCH/QqaH/AOC6L/4mgBfHf/JPPEv/AGCrr/0U1fE9leT6ffW97aymK5t5VlikHVXU5B/AgV9seO/+SeeJf+wVdf8Aopq+PfBEaTePvDkciK8b6pbKysMhgZVyCO9AFhPiF4yWbzf+Ep1jdnODeSEflnFe2fCj4yXfiHU4vD/iPy2vJQRbXaAJ5jAZ2uBwGIzgjGcYxzXqXibw3p/iTw9d6Xd2kUiyRMsZKDMbkcMp7EHFfEthdvY6ja3cRxJbzLKpHYqQR/KgD7d8WXtxpng3XL+zk8q5trCeaF9oO11jYqcHIOCB1r5x8J/F7x1qXjLQ7C71zzLW51CCGaP7JANyNIoYZCZ6E9K+h/Hf/JPfEv8A2Crr/wBFNXyB4E/5KH4a/wCwra/+jVoA+3elfIX/AAur4hH/AJmH/wAk7f8A+Ir6/r4AoA+3/Hf/ACTzxL/2Crr/ANFNXxTp99caXqVrqFo4S5tZkniYgHa6kMpweDyBX2t47/5J54l/7BV1/wCimr488GQQ3Xjrw/b3ESSwy6lbJJHIoZXUyqCCD1BHagC1J8Q/Gcs/mt4p1ffnOEu3Vf8AvkHH6V6t8NPjhfXGp2+i+KpY5Y7hhHDqGAjI54AkxwQeBuwMd89vdb3SdO1GwexvLC3ntWUqYZIwVx9McV8N6tYPpWsXunSHL2lxJAxxjJRipP6UAfeA6mlqjot//amg6dqBGPtVtHPj03KG/rV6gDhfiR8SrDwBp8YMYutVuFJt7UNjj++57L+pPA7kfNWpfFDxtqk5lm8S6hFnottKYFH4JisrxZrcviLxXqmrTOXNzcMyZ7IDhB+CgCvq/wCHngPTPB/hyzRbKJtTeJXurlkBdnIyQCeijOABjpnqSaAPFPhZ4+8a6n460vRn1ye6tJ5SZ0ugJT5aqWbDEbgcA9+pr6E8WeKdO8HaDNq+pyERIdqRpgtK56KoPfg/gCa0E0uwTUBfrY2wvApQXAiUSbTjI3YzjgflXzJ+0DrMt98Qhppc+Rp1uiKnYM4DsfqQVH4CgDE174v+NNcuXkGsS6fCT8sFiTEqD03D5j+JpuhfF7xrodykn9szX0QPzQ3zecGH1PzD8DXrXwC8G6fF4Y/4Sa5tY5r66mdYHkUN5Uanb8uehLA8+mKT4+eDNPl8Mf8ACS2trFDe2kqLO8ahfNjc7fmx1IYrg+maAPT/AAl4q07xj4fh1fTXJjf5ZI3xuicdVbHf+Ywa4L42+O9e8Fw6MmhzxQNe+f5sjxByNnl4xnIH3z29K8x/Z/1iWx+In9nBz5Oo20iMnYsg3g/UAMPxrqP2menhb/t7/wDaNAHlV18SfGt25aTxRqin/plcNH+i4rQ0P4u+NNDuUlGtXF9GD88N85mVx6ZPzD8CK9D/AGaEVm8TsUBZfsuDjkf67/Csn9ojw9ZaV4g0rU7O3jgbUIpFmEagBnjK/MfchwPwFAHOeIfjR4z12d/K1I6ZbE/LDZfIQP8Af+8T+OPYVnaT8VPG2k3Cyx+Iry4APMd3IZlYenzZI/DBr0z9myK2nTX2ktoGngeApMYwXUMHyA3UD5elY/7RHh6y0rxDpep2dvHA2oQyLMI1ADPGV+bA7kOPyoA958HeKbPxj4ZtdZsgUWUFZYicmKQfeUn+vcEHvW9Xg/7NFyzWHiO1LHZHLbyAdgWDg/8AoI/Kvb9QvoNL026v7ptsFtC00jeiqMn+VAHyl8bvEP8AbnxIu4I33W+mqLNMH+Icv+O4kf8AARXr/wAAfD39leBG1OVMT6pKZMnr5SZVB+e4/Q18vRpNeXaRoGlnmcKB1Z2J/mSa+8rW1hsrOC0t0CQwRrHGo/hVRgD8hQBNRRRQAUUUUAFFFFABRRRQAUUUUAFfAZBRirKQwPIPBBr78rwz4r/Bi61jUZ/EPhmNGuZjvurIsF3t3dCeMnuDjnnvQBa0/wCAngPV7CG+sNa1i4tplDJJHcQkEf8Afrj6VO/7O3g2JGeTVNaVFGSzXEIA/wDIVfNt3p19p8pivbK4tpAcFJomQj8CKuab4Z13WHCabo9/dFjjMUDMPxOMD8aAPtHwx4cs/Cfh200SweZ7W1DhGmILncxY5IAHVj2FfDFff1fEP/CCeL/+hV1z/wAF8v8A8TQB9v18QeO/+Sh+Jf8AsK3X/o1q+3sivj3xp4L8U3XjrxDc2/hrWJYJdTuXjkjsZWV1MrEEELggjkGgD6j8C/8AJPPDX/YKtf8A0UtfEs00lxNJPM7ySyMWd3bJZjyST6k19u+DIJrXwL4et7iKSGeLTbZJI5FKsjCJQQQeQQeK+X/G/wAJfEPhXUrh7WwuNQ0osTDcwIZCqdhIBypA74wexoA91i+BngGK2ET6VNNIBjznu5dxPrgMFz+FfJFSGNw+wo27ptxzWvp/g/xJqxH2DQdRuFP8SWzlR9WxgUAfYnjv/knniX/sFXX/AKKavkDwJ/yULw1/2FbX/wBGrX2L4vtJ9Q8Fa7ZWsRlubjT7iKKMdXdo2AH4kivi++8M67pb4v8ARtQtiP8AntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe8eH7BtK8N6Xpz/etLSKA/VEC/wBK+EsV9+9K+O/GXwt8R+E9RnAsLi800OTDeQRl1Kdt+PunHUH9aAPsTcCMgg18ClWQlWUhhwQR0NfSH7OGn+T4X1m/ZSGnvFh9OI0B/wDah/Kq/wAWPg1c6zqM/iHwyiPdTfPdWZIXzGxy6E8ZPcHqee9AFnTvgL4C1ewhvtP1vWLm1mXdHLHcQkEf9+vzHbvViT9nfwZDG0kuq60iKMszXEIAHufLr5uvNPvdOm8q9s57aQcFJomRh+BxVvTvDGu6u6rp+j39yW7x27EficYH1oA+0fC/h2z8J+HbTRLB53trUNsadgXO5yxyQAOrHtWvRRQByPxSsW1H4YeIYFBJFqZcD/YIf/2Wvl34XXy6d8TvD87HAN2sOT28wFP/AGavs6SNJYnjkUMjgqynoQe1fB17ZXWkapcWdwpiurSZo3HdXU4P6igD7zr4h8d/8lC8S/8AYVuv/RrV9p6ZqNvq+l2mo2j77e6iWaNv9lgCP518keM/Bfiq68deIbi38M6zNBLqdy8ckdhKyuplYgghcEEc5oA+o/An/JPPDX/YKtf/AEUtfMHxt/5K9rv/AG7/APoiOvqLwZBNa+BfD1tcRSQzxabbJJHIpVkYRqCCDyCCMYr51+L/AIT8R6n8UtZvLDw/qt1ayeTsmgs5HRsQxg4IGDyCPwoA9n+CX/JINC/7eP8A0okr5j8fxPH8RfEqspBOp3Dc+hkYj+Yr6k+ENheaZ8LNGs7+0ntbqPz98NxGUdczyEZU4IyCD+Ned/Gn4V6nqmrv4n0C2a6aVALy2j5k3KMB1H8XAAIHOeecnAB6n8OL6C/+G3h2W3kV0SwhhYg9GRQjD8CprqK+B5IJo5fKkhkSQcbGUg/livo79nix1fTtJ1mHUNOurW2lkilt3niKCQ4YNjP0XmgDw3x9E0XxE8SKwwTqly34GRiP0NfWfw3voL/4b+HZYJFdY7CGFiD0eNQjD81NeXfGn4WanqmrN4n0C2a6eVALy2j/ANZuUYDqP4uAAQOcjPOTjwF4JopfKkhkSQHGxlIP5UAfe+RjPaviLx3/AMlD8S/9hW6/9GtXun7PFjq+naRrMWo6ddWtrLJFLbvPEUEhIYNjPXoteF+O/wDkofiX/sK3X/o1qAPr7wIQfh54aGf+YVa/+ilrwj9orXrXUPE2maTbypI+nQyGYqc7HkK/KfcBAfxrye70jUrCGCe70+6gimRZYpJYWVZFYZDKSMEEEVr+GfAfiTxZdxw6ZpkxiJ+a5lUpEg9Sx4/AZNAHs/7NlhJFoWu6iy4Se4jhU46+WpJ/9GCvUfFPg7QvGNitrrVkswQny5VO2SIn+6w6fTkHjir2gaJZeG9BtNIsEK21qmxc9WPUsfcnJPua+SPGGl+LLDxhqur3WmanYPcXcsyzLG4ADMSMOvHTHQ0AdT44+BOpeG9MutW0q/TULG2RpZY5F2SxoOSfRgByTwfauS+Fuq3Ok/ErQpbd2X7RdJayAHhkkIQg/mD9QKy59f8AE+twGxuNW1i/jf8A5YSXEsob/gJJr174PfCXVbLXYPEniG1azS2y9rayD9474wGYfwgZyAeScenIB7frviPR/DNiLzWtQhs4CdqtITlj6KByx+go0LxHo/iaxN7o2oRXkAO1mjyCp9GU4IPsRXh37RPhzVbjVtO123t5Z9PjtPs8pjUsIWDs2Wx0BDdeny0v7Ovh3VbfVdQ12e3lh0+S1+zxNIpAmYurZX1ACkZ6fN9aAJP2kPEPOk+HIn9bycfmqf8As/6VR/Zz8O/aNb1LxBMmUtIxbwEj+N+WI9wox/wOvLfGevN4n8Y6rrDElbmdjHntGPlQf98gV9a/Dbw7/wAIv8P9J05023Bi864BHPmP8zA/TO3/AIDQB1deAftH+Iv+QT4cif1vJwPxVP8A2f8ASvf6+H/GWvN4m8Y6rrDElbmcmLPaMfKg/BQKAPUf2cvD32nXNS8Qyx/u7SIW8BI/5aPyxHuFGP8Agde2ePdEfxF4E1nSoRmae3JiHq64ZR+agVX+G3h3/hGPAGk6c6bLgxCa4BHPmP8AMQfpnH4V1ZoA+FfD+tXXhvX7HWbPHn2kokUHow7qfYgkH619Az/tIeH1st1vompyXWP9XIY0jz/vgk/+O1veMPgp4a8VXst/E02mX0pLSSW4GyRj1LIe/uMZOc5rjx+zvpGlQzX2seJ7iSyt0aWTyrZYSFUZOWLN2HpQB8+n2r7e8d/8k88S/wDYKuv/AEU1fE1vC1zdQwIpZ5HVFUdyTivtzxnBNdeBfENvbwyTTzabcxxxRqWZ2MTAAAckk9qAPj3wJ/yULw1/2FbX/wBGrX2F42jabwF4iiQZd9LuVUDuTE1fLPgzwX4ptfHXh64uPDesQwRalbPJJJYyqqKJFJJJXAAAzX2AwDqQwBGMEHvQB8OeE7yHTvGOh31ywSC21CCaRj0CrIpJ/IGvuXvXxr4x+GXiLwffypNYzXVhuPlXkEZdGXtux90+x98ZGDXKW1ld3switbWaeVjgJFGWJ/ACgD7V8d/8k88S/wDYKuv/AEU1fIPgT/koXhr/ALCtr/6NWvq7UI7+8+Dt1BNaz/2nNoLxvb7CZDMbcgrtHJO7tXzb4M8F+KrXx14euLjw1rMMEWp2zySSWEqqiiVSSSVwABzmgD7Cr4Br7+zjmviH/hBPF/8A0Kut/wDgvl/+JoA+xfGMDXXgjX7dAS8um3CKB3JjYV8X+Gr9NJ8U6RqM3+qtL2Gd/ojhj/KvuknJxXyL46+EniDwpqM72djPf6SWJhuIELlF9HA5BHr0P6AA+usjFfANSCORn2+W5fONuOa3NN8D+KdXYCx8P6jKp/j+zsqf99EAfrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yULw1/2FbX/0atfYvi+0n1DwVrtlaxGW5uNPuIoox1d2jYAfiSK+L77wzrulvi/0bULYj/ntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe12k9j4O8F6eurXkNpb2FnDBJLKwABVAuPc8dB1p3h3xj4e8WRSPoeqQ3flY8xQCrrnplWAIHvivNv2g/DmqavoelX+nwS3EVhJL9ojiUsQHC4fA7Dafpn61wHwH8OardePbfWo7aVNOs45fMnYEI5ZCgQHucnPHTH0oA811vT30nXtQ06RSHtbmSEg/wCyxH9K+0fB/iWz8V+GLLVbWZHMkaiZQeY5ABuU+hB/TB6GuB+LfwkfxfINb0QxprCIElic7VuVHTnoGA454IwOMV816ho+p6TIYtS066tJF42zwsh/UUAfd2QOpFfKXx90yWy+Js92yny7+2imRscHavlkf+OfqK5LwnpWuTeIdOvNK0m9u3tbmKYeRAzAbWDZJAwOnevq/wCIHgaz8eeHjp1w4huYm8y1uNuTE+MdO6noR9D1AoA5H9n/AMQ2t/4EGi+aovNOlfMWeTG7Fw30yzD8PpR8f/ENrY+BDovmqbzUZkAiz8wRGDlsemVUfjXzvrvhLX/DV09vq2lXVuVON5QmNvdWHB/Om6J4T17xJdJBpGlXVyWON6xkIvuzn5QPqaAO1+Ammy3nxPtrpVOyxt5pmbsMqYx/6H+ldh+0z08Lf9vf/tGvUvh74Gs/Afh0WELia7mPmXVxjHmP2x6KBwPxPevPf2htB1jWx4c/snSr6/8AJ+0+Z9lt3l2Z8rGdoOM4OPpQBn/sy9fFP/bp/wC1qP2mv+ZX/wC3v/2jWh+zzoWr6J/wkf8Aa2lX1h532byvtVu0W/Hm5xuAzjI/Oj9obQtX1v8A4Rz+ydKvr/yftPm/Zbd5dmfKxnaDjOD+VAGf+zL/AMzT/wBun/taj9prr4X/AO3v/wBo1ofs86FrGif8JH/a2lX1h532Xy/tVu8W/Hm5xuAzjIz9RR+0NoWr63/wjn9laVfX/lfafM+y27y7M+VjO0HGcH8jQBn/ALM3TxTn/p0/9rVv/tCeIv7O8G2+ixPibU5vnH/TKPDH/wAe2frVD9nnQtX0T/hI/wC1tKvrDzvs3lfard4t+3zc43AZxkfnXlfxg8Q/8JD8SNSkR91vZkWcP0ThvzcsfxoA1fgT4e/tn4hRXsibrfS4zcHPI3n5UH1ySw/3a+rq81+Bvh3+xPh1b3UibbjU3N0+Rzs+6g+m0Bv+BGvSh70AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAJj3oxS0UAFIBjvS0UAJijbS0UAJilxmiigBMe9GKWigBMUtFFAARmkAxS0UAFJilooATFLiiigAoxRRQAmKWiigArwv4+fD97yAeLtNiLTQIEv0UctGPuyf8B6H2x6GvdKKAPGv2fIvEdr4euotQs3i0SRhNYyynaxY/e2r1KHg54Gc4zk49lpMUtACY560Y9TmlooAQDFKeaKKACkxzmlooATHuaWiigBMV8R+Ov+SheJT/ANRW6/8ARrV9u0mKAMDwL/yT3w1z/wAwq1/9FLW/ijApaAExS4oooAKTFLRQAm0GuX+IniD/AIRbwHqupq+24WExW5HXzH+VSPoTn8DXU0mKAPh3wjoL+JvFml6MgbF1OquR1EY5cj6KGP4V9x0YooA5f4ieIf8AhF/Aeramj7Z1hMcB7+Y/yqfwJz+FfInhDQX8TeLtL0dASt1OqyEdVjHLn8FBNfcZGaTHOaAFpsjiONnIOFBJwMninUUAfCFnqmqaPKwsr68sZAfm8mVojn3wRT9R17V9YCjU9Vvr4KcqLm4eTB/4ETX3XiloA+cPgz8LNQm1q28S65aPbWdqfMtYZ1w80nZtp5CjqCepxjIr6P8A0oxRQAm3ilHFJmlFACY5paKKACiiigApMdaWigBMUEZpaKADHvSYpaKAExS0UUABGaQDFLRQAUgUClooATHHWloooATFLRSZwcUAGKXHvRRQAUhGaWigBMe9GPelooATFGKWigDP1vVYdC0K+1W4P7mzgeZh0ztBOB7npXwzZWc+o6hb2Vsu+e4lWKNfVmIAH5mvvbqMUmM0AJHGkUaxRqERAFVQMAAdAKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFVrvULKwXfeXlvbL6zSqg/WsyPxl4WllEUfiXR3lPARb6Ik/huoA3KKQHP0o780ALRSUtABSHijPHXtXkf7RMskfw8svLkZd+qRq20kZHlS8H24oA9c5yRS18wfs5f8lCv/APsFSf8Ao2Kvp7nk0AcP8VfGt/4D8LW+p6fbW9xPNeLbbbgMVAKO2cAgn7g7964n4P8AxM8S+NvG15ZavcQG0j095lhhhCAOJIwDnlujHv3rQ/aO/wCSe6f/ANhWP/0VLXnX7Pl3bWPjvUZru4hghGlSZklcIo/exdzQB9SUVkWfinw9qMvlWOvaXcyE4CQXcbnP0DVr0AFFFQ3V1b2cJnuriKCFfvSSuFUfUnigCaismx8TaBqc/kWGuabdzf8APOC7SRvyBJrWoAKKSqF5rukacxW+1WxtiOonuUTH5mgDQorGt/Fvhu8lEVr4i0meQnASO9jYk+mA1bIoAKKKKACiioLm8trOLzbm5hgj/vSuFH5mgCeisL/hNPCu/Z/wk+jb/wC79viz/wChVto6yIro6srDIKnINADq+XfEvx58Vz+IJ20W4hstNjlKxR+QkhkUHq5YE5PoMYz+NfUR6V414j/Z80nWNem1Gw1aXTop5DJLbiASKGJydh3DaD6c4+nFAHpfhLxBH4p8KadrccXlC7i3NHnOxgSGGe+CDXhnjD47eKdO8SatpFhbabBFZXk1skpiZ3IRyuTlsdvSvoLSdMtNG0m102wiEVpbRiOJAc4A9+59TXxZ47/5KF4l/wCwrdf+jWoA+x/Cl9can4O0PULuTzLm60+CaZ9oG52jVmOBwOSeleP/ABK+M/iLwv4z1DQNLtNPWK2EeJpo2dzujV/7wHVsdK9Y8C/8k98Nf9gq1/8ARS18wfGz/kr2uf8Abv8A+k8dAH0h8MNc1DxJ8O9L1fVZhNe3HneZIECg7ZnUcAAdABXXVwHwS/5JDoX/AG8f+j5K7+gAoqlf6xpmlKG1HUrSzU9DcTrGD/30aTT9Y0zVkL6bqVneqOrW06yAfkTQBeopOaOvQ9aAFooHSk/GgBaKKTPPFAC1Fc3EVpaTXNw4jhhQySOeiqBkk/gKk9a8r+PniL+yPAX9mxPi41SURYzz5a/Mx/PaP+BUAfPB8deMZpePFGtlnPAW/lHJ7ABv0r7UgjSw0+GJ7hmSGJUMs75ZsDGWY9T7mvk74KeHf7e+JFlJIm6304G8kyOMqcIP++yp/A161+0d/wAk8sP+wrH/AOipaAPRZfGHhm3OJ/EekRH0e9jH82qWz8TaDqDhLLW9NuWPRYbtHP6GvibRNG1DxDq0Gl6Vbm4vZ9wjiDqu7apY8sQBgAnr2ra134c+LfDdm13quiXEFsv3pUZZVX/eKEgfjigD7T5xWL4v8Qp4U8J6jrckXm/ZItyx5wGYkKoJ9CxAr5T+HfxF1PwRrMGJ5JdIkcC5tCSV2k8so7MOvHXGK+utV0uz1rSrrTL+LzbS5jMciZ6g+nofQ9qAPmzwz8ePFUPiG3Ot3EN7p00oSWPyEQxqTyVKgHI9Dn+tfRviHU5NG8M6rqsUaySWVnNcKjHhiiFgD+VeXeHP2ftI0XXodSvtWl1GKCQSw23kCNSQcrvO47hnnAxn6cV6J46/5J54m/7BV1/6KagDwjQvjf4x17xlolhJJZW1rdahBDLHb2/3kaRQwyxYjgnkV9L18Q+Bf+SheGv+wra/+jVr7fNAHypqH7QPja7yLc6fYjsYbfcR/wB9lv5V9VV8A19/UAFFIT3rHufFnhuynNvdeIdKgmHBjlvY1YfgWzQBs0VHFNHcRLLDIkkbchkYEH6EU8nFAC0U3nI9KXPFAC0Un60UALXzP45+NHiuHxe1pZw/2TDplyQbVgGaYqefMPdSOw4wep4NfTFeEftF+FlksrDxRbx/vImFpckDqpyUY/Q5H/AhQB7TpGq2muaRa6pYSiW1uYxJG49D2PoR0I9avV86fs6+KWt9VvvDM7/urlftNuCeBIvDAfVcH/gFfRdABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFHaijtQB8BV2178JPHen2jXM/h2cxKMt5Uscrf98oxP6VxFff1AHxP4M8b6v4J1dLzTp2MJYfaLVmPlzL6Edj6MOR9Mg/U3xG0ePxb8NdTgtiJme3FzasvO5lw64/3gMf8Cr5m+LGmwaV8UNetbZQsXnrMFHQGRFkOPbLGvpP4O3Ml18J9BklJLCKSMZ9EldR+gFAHy14H1z/AIRvxtpGrFtscFwvmn/pm3yv/wCOlq+2Lq5isrOe6nbbDBG0kjeiqMk/kK+IvGPh+Twt4u1LRpAdttMRGT/FGeUP4qQa968e+PF1D4BWmoJKPtWsJHavtPRx/rePT5GH4j1oA+bIo5J50ijUtJIwVVHUk9BX0z+0PDHb/DXS4IlCRx6nEiKOgAhlAFeV/A/w+db+JFpPIm6301Gu3JHG4cJ+O4g/8BNer/tHf8k8sP8AsKx/+ipaAPm3TNJ1HWLhrfS9Pur6dULtFawtKwXIGSFB4yRz7iuv8GeDfFNr468Pz3HhrWIYYtSt5JJJLCVVRRIpJJK8ACuj/Zx/5KHf/wDYKk/9GxV9P0AeQftHf8k90/8A7Csf/oqWvnXQfD2reJ9QNjo1lJeXIjMhRCBhcgZJPA5YfnX0V+0d/wAk90//ALCsf/oqWvP/ANnH/koWof8AYKk/9GxUAefeI/BniHwnJGNc0uWzEpwjlldGPoGUkZ9s17J+z742urmW48KX0zSxxxedZs5yUAIDR/TkEemDXcfHGFJfhLq7sMmJ4HXPY+ci/wAmNeDfBFivxc0QDownB/78SH+lAH0/4y8UW3g/wte61cjf5C4jizgySHhV/E9fQAntXxlPPq/ijXDJK1zqOp3cmB1d3Y9gB29hwK9z/aXuZEsfDlqCfLklnkYdsqIwP/QzWd+zXYwyar4gvmUGaCGGJD3AcuT/AOgLQB5h4h8A+KfC1ql1rGjzW1u5AEoZZEBPQFlJA/GvT/gb8SrtNUi8J6xcPNbTgixllbLRuBny8nqpAOPQ8Drx79qdlDqWl3dhcKGguIWikUjgqwIP86+FLG5ks7+3uoSRLDKsiEdiCCP1FAH1v8bf+SQ67/27/wDo+OvlDRtHv/EGqw6Zplsbm9n3eXEGALYUseTgdATX1f8AG3/kkOu/9u//AKPjrwD4Jf8AJXtD/wC3j/0RJQBi+IfAHinwrbLda1o01tbsQvmh0kQE9AWQkA/Wu++A3ja70/xKnhi6nZ9Ovt3kKx4hlAJ+X0DYII9ce9e7/ECFJ/h14kRwCBptw4z6rGWH6gV8jeAnKfETw0VOD/alsPwMqg0AfblFeAftNf8AMrf9vf8A7RrwCgD7/FfAFe//ALM3TxR/26f+1q8AoA7qX4O+P4rdpn8OSlAMkJPEzfgoYk/gK57w94k1bwnqyahpN29vOhwy/wAMg7qy9CPr9RivuWvlP4/6dBYfEoywqFN5ZRXEgA/iyyfyQUAfTuiava69o1nqtk5a2u4hKmeoz1B9wePwr4w8d/8AJQ/Ev/YVuv8A0a1fQ37PFzJP8N5o3JKwahLGmewKI382NfPPjv8A5KH4l/7Ct1/6NagD6/8AAn/JPPDX/YKtf/RS18geO/8AkoXiX/sK3X/o1q+v/An/ACT3w1/2CrX/ANFLXyB47/5KF4l/7Ct1/wCjWoA+v/Av/JPfDX/YKtf/AEUtfMHxt/5K9rn/AG7/APoiOvp7wL/yT3w1/wBgq1/9FLXzD8bP+Sva5/27/wDoiOgD3/4Jf8kh0L/t4/8AR8lUfjH8Q5fBWiQ2emOq6vf5ET4B8iMfefB784H4ntir3wS/5JDoX/bx/wCj5K8H+OlzJP8AFjU43JKwRQRp7Dylb+bGgDj9D8Oa14pvmtdHsJr24A3vsxhfdmJwPxIo1zw5rXhe/W11iwmsbj7yb8YYDurDg/UGvp34E2UNr8KrGaNcPdzTSyH1YSMg/RBR8drKG6+FeoTSqDJazQyxH0YyKn8nNAEfwb+IcvjTRJrLU3DavYACR8Y8+M8K+PUdD+B718+/E7RD4f8AiLrVmqbYmnM8XHGyT5wB9M4/Ctj4G3EkHxY0uNCQs8c8b+48pm/morv/ANpHQd9rpHiCNOY2azmb2PzJ/J/zFAHqfw81j+3vh9oeoFt8j2qxyN6unyMf++lNfJfj/Wv+Eh8e61qatujluWWI+safIn/jqivWvgb4vi0jwF4niuWBGlg3yKx+8rIRtH/AkH4tXjOkeH7zWdM1i+tlzFpdsLiXjqC6rj8izfRTQB9ffDnWP7d+Hmh35bc7WqxyHuXT5GP4lSfxr5D8Xav/AG94w1fVA25Lq7kkjP8AsbjtH/fOK9h+CHi+PR/AHimKdhnTFN9GrH7wZMbR/wACQf8AfVeVfD7QD4l8eaRpbLuhecSTD/pmnzN+ikfjQB9i+HNITQPDWm6THjFpbJESO7Acn8Tk/jXy18bfEP8AbnxIvIY33W+nKLNP95eX/wDHyw/AV9XajfwaVpl3qF022C1haaRv9lQSf5V8IxRzXl2kUYaSaZwqjqWYnA/HJoA+o/gF4e/snwCdSkTE+qTGXPfy1+VB+e4/8Cqr+0d/yTyw/wCwrH/6Klr1q1tobO0htbdAkMCLHGg/hUDAH5V5L+0d/wAk8sP+wrH/AOipaAPIPgl/yV7Qv+3j/wBJ5K+rdd1Kx0jQr2+1J0SzhiZpd+MMMfd9yemO+a+ExirVppt9qEgjsrK5uXPRYYmcn8AKAKlfb3jv/knviX/sFXX/AKKavFPht8D9SfVbbWPFUC21rAwkSxYhnlYcjeBwq9OOp6YFe1+O/wDknvib/sFXX/opqAPkHwJ/yULw1/2FbX/0atfX/jv/AJJ74m/7BV1/6KavkDwJ/wAlC8Nf9hW1/wDRq19f+O/+Se+Jv+wVdf8AopqAPkDwJ/yULw1/2FbX/wBGrX2/2r4g8C/8lC8Nf9hW1/8ARq19vnpQB8AV9/V8A19/etAHyz8Y/iVd+I9cudC064aPRrRzG3ltj7TIpILE91B4A6HGe4xyejfDbxhr+mrqOm6FPNaMMpIzIgceqhiCw+ma5Pmvv1RgADjjigD4a0LX9X8J60l9ptxJaXcLYdTwGAPKOvceoP8AOvsS6Fl478ATC2cNa6tYt5bMPull4z7q36ivnj9oKyhtfiUssSgNdWMU0hHdtzp/JFr1H9ni5kn+G00chJWDUZY4/ZSiN/NjQB85eEtXOg+L9I1UttW2uo5HP+xu+Yfiua+yvGWsjw94M1fVd+17a1doz/00Iwg/76Ir5B+IGg/8I1481jSwmyKO4Lwjt5b/ADJ+SsB+Fet/G3ximq/DnwzFA4B1dVvZVU9FVB8p/wCBP+a0AeMeE9X/ALB8XaTquSEtbqOR/dNw3D8s19f/ABE1f+wvh7rt+G2utq0cbejv8in/AL6YV8f6v4evNF07R7y5XEeqWpuYuMYG9lx9cBW+jCvY/jb4wTWPh74WigcZ1RVvpFU9AqAbT/wJz+K0AeafC/Q/+Eg+I+i2bLuhScTygjI2R/OQfY4A/GvrDxrpiaz4J1rT2UMZrOUJn++Fyp/BgK8l/Zu0HZaav4glTmR1s4GPXC/M/wCeU/75Ne6ylRE5f7oU5+lAHw/4O1JtI8aaLfqcCC9iZv8Ad3AMPyzX3H+Oa+BIgzSoF+8WGD7199mgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACjtRR2oA+Al69M+3rX1m3x28BfYzONSuDKBkW/wBkk3k+mcbf1r5MBxXs91+zf4jScraazpUsOeGmMkbfkFb+dAHlfiLWrjxH4hv9YuVCzXkzSlF6KD0UewGB+FfZ3gzRW8O+DNI0mQAS21siy46eYRl//Hia4LwH8DdM8LX0WqatdDU9QiO6JRHthib1weWI7E4x6ZANem6lqdno+nS6hqNwlvaQgGSVuignHP4kUAeQftDeFDf6JaeJbaPM1gfJuMd4WPB/Bj/4+fSvnJ7mZ7SK1aRjBG7SJGTwrMAGI+oVfyr688V/FDwdpvh27kGrafqjyRMiWdtOspmJGNrbSdqnPJPbPWvk/QtGuvEGu2Wk2S7p7qVY144XPUn2AyT9KAPpz4E+Gzonw+S+lTbc6rIbg5HPljhB+WWH+9TP2gLCS8+GRmRSRaXsU747Ahk/m4r1FEWONURQqqMADoBTbi3iureW3njSWGVCkkbjKspGCCO4xQB8dfCzxjb+CPGkepXqSNZSwtb3BjGWVWIIIHfDKv4Zr6GHxr8DzX1raWmoz3U1zMkK+XbOoUswUE7wvAz78V55rn7N14Ll20HW4GgJysV8rKyj03KDu/75FQaV+zfrBuFbU9esrdFOc2iPK34bguKAOu/aO/5J7p//AGFY/wD0VLXAfs4jHxCv8/8AQKk/9GxV7d8RvAv/AAsDw9b6V/aP2DyrpbnzfI83OEdduNy/385z2rA+HHwgPgDxDcasdd+3+datb+V9k8rGWRt2d7f3MYx3oA0PjZ/ySHXf+3f/ANHx14B8Ev8Akr2hf9vH/oiSvp7xt4Z/4THwhfaD9s+x/avL/f8AleZt2yK/3cjOduOveuA8E/Az/hDvF1jr3/CRfbPsvmfuPsPl7t0bJ97zDjG7PTtQBd+PPhqbXPAgvrWMvPpcv2hlAyTERh8fT5W+imvCvhf46HgPxSbyeKSawuY/JuY48FgM5DKO5B/QmvsbHNeKeLf2erDUbqS88OX4055CSbWVS0IJ/ukcqPbn2wOKAHePPjl4ePhm7svDdzLeX93E0Ky+S0aQhhgt84BJwTjGef18W+HHhubxT470uwSMtAkqz3JxkLEhBbPpn7o92Fd/Y/s3+IJLgDUNb0yGHPLQeZK2PoVUfrXu/hXwho/gzSf7P0e28tCd0krnMkrert3/AJDsKAOd+NpB+EOuj/r3/wDR8deAfBP/AJK9of8A28f+iJK+nvG3hj/hMfCF9oP2v7H9q8v9/wCX5m3bIr/dyM5246964DwT8Df+EO8XWOvf8JH9r+y+Z+5+xeXv3Rsn3vMOMbs9D0oA9A8df8k98S/9gq6/9FNXyB4FH/FwvDX/AGFbX/0atfZ2u6Z/bXh7UtK87yfttrLbebt3bN6Fd2MjOM5xkV5BoX7PX9i+IdN1X/hKPO+xXUVz5X2Dbv2MGxnzDjOMZxQB7fnPTmiiigBMHnFfAVff4r4AoA+s7X47+A57QTS6hc20mMmCW0kLfTKgr+tfOXj3xZJ418XXestE0MT4jgiY5KRrwAfc8k+5Nejah+zdrsc5Gm63p08PY3KvE35KG/nXT+Dv2fbHSr2O+8R3qai8ZDJaxIViyP7xPLD2wB656UAdj8H9Bl8P/DbTILhClzchrqRSMEFzkAjt8u38a+W/HfPxD8S/9hW6/wDRrV9ugYrxHXf2ef7a8Q6nqv8AwlHk/bbuW58r7Bu2b3LYz5gzjOM45oA9Q8C/8k88Nf8AYKtf/RS18i/EGB7f4jeJEkUgnUp3/BnLD9CK+ydC0z+xfD2maT53nfYrWK283bt37EC7sZOM4zjJrjPiF8JdK8dypffaHsNURQn2hE3LIo6B1yM47EEH60AZXwj+Juja1o+j+GHM0Or29sLdYzGSkixp94MMgfKvfHNePfHG3eH4s6s7KQsyQOp9R5KL/NTXq3wu+EGq+B/F0+rale2FzD9leGEQM+8MzLyQVAAwCOp612Pj/wCHGk+P7OJbx3tr2DPkXcQyyg/wsP4l7449iMnIB518E/iZo9roWn+EdQ82C+WdorVhGXSXzHLAEjODliOeMY5rD/aK8Ny2viKy8RRRk215EIJWA+7KmcZ+q4x/uGt/wN8DtY8LeO9P1m81HT7mytGdysZcSElGC8FcdSD1r2y+sbbUrKazvYI7i2mQpJFIuVdT2IoA+b/g/wDFfT/CWnzaHr3mpYtKZbe4RC/lE43KQOccZGM8k+tJ8Yvivp3i3T4dD0AyvZCQSz3DoU8wgHaqg845ycgcgVq65+zdc/aXfQNbhMDHKxXylWT23IDn8hTdE/Zuuzco+v63AsAOWisVZmb23MBj8jQBR/Z28NzXXiW88QyRkWtlEYI2I4aV8ZwfZc5/3hXu/jPQR4n8G6ro5AMlzAwiz0Eg+ZD/AN9AVq2Fha6ZYw2Vjbx29tCuyOKNcKo9qsUAfA8U8sMcyRSMqTJskAPDruDYP4qD+Ar6g+B3heO1+GMsl5CCdaeR5FI5MONij6EBj/wKsPU/2b7e81S7ubTxJ9kt5pmkjt/sG/ylJyFz5gzjpnFe329tFaWkVtAgSGFBHGo6KoGAPyFAHwdvubI3NsS8RceVMnTIDBtp/wCBKD+Ar339m/w6Y7XVvEcycyMLO3JHYYZ/wJ2D/gJrV8W/AODxL4ov9Zg1/wCwreSea0H2LzNrEfMd28Zycnp3r1Hw7olv4b8OWGjWpzFaQrGGxjee7Y7EnJ/GgDzH9obxD/Z3g+10WJ8TalPlwP8AnlHgn/x4p+RrzP4D+Hf7Z+IKX8ibrfS4jOcjjzD8qD65JYf7tZPxf8QnxF8SNSlR91vZt9jg9AEyGx9WLH8a96+B3h3+w/hzbXMibbnU3N2+RzsPCD6bQG/4EaAPSa8g/aN/5J5Yf9hWP/0VLXr9cf8AEfwL/wALA8PQaV/aP2DybpbnzfI83OEdduNy4+/nOe1AHzj8Ev8Akruh/wDbf/0RJX19XkHgn4Gnwf4vsdfPiP7Z9l8z9x9i8vdujZPveYcY3Z6dq9foAK5/x3/yT3xL/wBgq6/9FNXQVn67pn9t+HtT0rzvJ+3Wktt5u3ds3oV3YyM4znGRQB8Y+Bf+SheGv+wra/8Ao1a+xPGUD3XgfxBbxgmSXTbhFA7kxsBXlWhfs8/2J4h03Vf+Eo8/7FdRXPlf2ft37HDbc+YcZxjODXt/86APg/RdRfR9d0/U0Te9ncx3AQnG4owbH6V9reGPFWk+L9I/tPR5nltg5jYvGyFXABIwRz94cjivIvE37OcNzeS3PhzVVtY3JItbpSyofQOOcexBPua9N+HXha48HeCbLRruSGS6iaR5XhJKMzOSMEgHoQOnagD4sHFfa/g3x3ovjmzmuNIkmL2+3z4poirRls4BPQ9D0J6V594y+AFjrWoTajoeoLp08zF5LeSPdEWPUrjlR3xzXVfCfwJe+AvDt3Y6jNazXVxdmbfbMxXZtUAfMoOcg/nQB8peIdDuvDniC+0i8Uia1laMnGAw7MPYjBH1r6S8P/Hvwnd6NFLrNxLYagqATQiB5FZu5QqDwffGP1rrvGngDRfHVgsGqRMk8QPkXUJxJHn3PUeoP6GvErn9m/xIs5FrrGkyxZ4aUyI2PoEb+dAHnfjjxTN408W3mtSRmJJSFhiJz5cajCj69z7k19TfCjw5L4Y+HenWdyhS6mBuZ0IwVZ+dp9wu0H6VzHgn4D6T4eu49R1q5/tW8jYNHHs2woR3IPLn64HtXrg6UAeB/tH+HS0OleI4k5QmznI9Dlk/Xf8AmK8GDXV89ragvMygQwR+mWJ2j6sxP1NfcHiXQoPE3hrUNFuW2x3cJj34zsbqrY74IBx7V5j4Q+AkPhjxTY61Prwv1tHMiwfYvLBbBCnPmHoSD07UAWvjr4Yju/htDcWcIB0Z0ZFUdISAhA/8dP0WvmCSeWaOFJJGZYU2Rg/wKWLYH4sx/GvvG8tIb6yuLO4QPBcRtFIp7qwwR+RrxTSP2crew1ezvLvxH9st4Jklkt/sGzzQpzt3eYcA9OlAHq3g3QV8MeD9L0cAB7aBVkx0Mh5c/ixJqt4/1ZNE8Aa5fs20paOiHP8AG42L/wCPMK6Wvnr9orxWs1zY+FraQHySLq7wejEEIp/Alv8AgS0AeUeA9LbWfHmh2KruEl5Gzjr8inc3/joNfW/jvxjbeB/DE2r3ELTuHWKGFW2+ZIc4GewwCSfQV5N+zr4TZft3im5jwrA2loT35zIw/ID/AL6r1nx34OtfHPhibR7iYwPvWWGYLu8uQZwcdxgkEehoA8y8BfHa717xLa6Pr2n2sIvJBHBPa7lCufuqysTnJ4zngkcV7lXhvgH4D3eg+JbbWNd1G1mFnIJYILTcwZx90szAYwecAdcc17lQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFGe1FGM0AfII+CfxCOP+Kf/wDJyD/4uvr6kxS0AFcV8U/DereLfBUukaO1us8k8bus7lQ6L82AQDzkL1x0rtaTAoA+SIvgb4/kmEb6PFEuceY95EVHvwxP6V738NvhlY+AbKSRpRd6tcACa524Cr12ID0Hqep/AAd7RQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAZr5B/4Ul8Q/8AoX//ACdt/wD45X16RmlxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABUN1JNFZzyW8JnnSNmjiDBfMYDhcngZPGTU1FAHyGvwR+ILMAdBC5PLG8g49+Hr66jRI41jjUKiAKqgYAA6ClxS0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXgHi74A3l74lW80PUhJZ3lxuuhdNmWDccs4P8Y69cHOOvJHv9GOc0AQWdpBYWcNpaRLFbwoI441GAqgYAFT0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH/9k='
class BackgroundIndexFileGenerator:
def __init__(self, dir_path):
self.dir_path = dir_path
self.thread = threading.Thread(target=self._process, args=())
self.thread.daemon = True
def _process(self):
_create_index_files(self.dir_path)
def run(self):
self.thread.start()
def _clean_up(paths):
"""
Clean up after ourselves, removing created files.
@param {[String]} A list of file paths specifying the files we've created
during run. Will all be deleted.
@return {None}
"""
print('Cleaning up')
# Iterate over the given paths, unlinking them
for path in paths:
print('Removing %s' % path)
os.unlink(path)
def _create_index_file(
root_dir, location, image_files, dirs, force_no_processing=False):
"""
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
"""
# Put together HTML as a list of the lines we'll want to include
# Issue #2 exists to do this better than HTML in-code
header_text = \
'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]'
html = [
'<!DOCTYPE html>',
'<html>',
' <head>',
' <title>imageMe</title>'
' <style>',
' html, body {margin: 0;padding: 0;}',
' .header {text-align: right;}',
' .content {',
' padding: 3em;',
' padding-left: 4em;',
' padding-right: 4em;',
' }',
' .image {max-width: 100%; border-radius: 0.3em;}',
' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}',
' </style>',
' </head>',
' <body>',
' <div class="content">',
' <h2 class="header">' + header_text + '</h2>'
]
# Populate the present subdirectories - this includes '..' unless we're at
# the top level
directories = []
if root_dir != location:
directories = ['..']
directories += dirs
if len(directories) > 0:
html.append('<hr>')
# For each subdirectory, include a link to its index file
for directory in directories:
link = directory + '/' + INDEX_FILE_NAME
html += [
' <h3 class="header">',
' <a href="' + link + '">' + directory + '</a>',
' </h3>'
]
# Populate the image gallery table
# Counter to cycle down through table rows
table_row_count = 1
html += ['<hr>', '<table>']
# For each image file, potentially create a new <tr> and create a new <td>
for image_file in image_files:
if table_row_count == 1:
html.append('<tr>')
img_src = _get_thumbnail_src_from_file(
location, image_file, force_no_processing
)
link_target = _get_image_link_target_from_file(
location, image_file, force_no_processing
)
html += [
' <td>',
' <a href="' + link_target + '">',
' <img class="image" src="' + img_src + '">',
' </a>',
' </td>'
]
if table_row_count == IMAGES_PER_ROW:
table_row_count = 0
html.append('</tr>')
table_row_count += 1
html += ['</tr>', '</table>']
html += [
' </div>',
' </body>',
'</html>'
]
# Actually create the file, now we've put together the HTML content
index_file_path = _get_index_file_path(location)
print('Creating index file %s' % index_file_path)
index_file = open(index_file_path, 'w')
index_file.write('\n'.join(html))
index_file.close()
# Return the path for cleaning up later
return index_file_path
def _create_index_files(root_dir, force_no_processing=False):
"""
Crawl the root directory downwards, generating an index HTML file in each
directory on the way down.
@param {String} root_dir - The top level directory to crawl down from. In
normal usage, this will be '.'.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {[String]} Full file paths of all created files.
"""
# Initialise list of created file paths to build up as we make them
created_files = []
# Walk the root dir downwards, creating index files as we go
for here, dirs, files in os.walk(root_dir):
print('Processing %s' % here)
# Sort the subdirectories by name
dirs = sorted(dirs)
# Get image files - all files in the directory matching IMAGE_FILE_REGEX
image_files = [f for f in files if re.match(IMAGE_FILE_REGEX, f)]
# Sort the image files by name
image_files = sorted(image_files)
# Create this directory's index file and add its name to the created
# files list
created_files.append(
_create_index_file(
root_dir, here, image_files, dirs, force_no_processing
)
)
# Return the list of created files
return created_files
def _get_image_from_file(dir_path, image_file):
"""
Get an instance of PIL.Image from the given file.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the image file as a PIL Image, or None
if the functionality is not available. This could be because PIL is not
present, or because it can't process the given file type.
"""
# Save ourselves the effort if PIL is not present, and return None now
if not PIL_ENABLED:
return None
# Put together full path
path = os.path.join(dir_path, image_file)
# Try to read the image
img = None
try:
img = Image.open(path)
except IOError as exptn:
print('Error loading image file %s: %s' % (path, exptn))
# Return image or None
return img
def _get_image_link_target_from_file(dir_path, image_file, force_no_processing=False):
"""
Get the value to be used as the href for links from thumbnail images. For
most image formats this will simply be the image file name itself. However,
some image formats (tif) are not natively displayable by many browsers and
therefore we must link to image data in another format.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The href to use.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
# If format is directly displayable in-browser, just return the filename
# Else, we need to return a full-sized chunk of displayable image data
if img.format.lower() in ['tif', 'tiff']:
return _get_image_src_from_file(
dir_path, image_file, force_no_processing
)
return image_file
def _get_image_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's full image,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
This is a full-sized version of _get_thumbnail_src_from_file, for use in
image formats which cannot be displayed directly in-browser, and therefore
need processed versions even at full size.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _get_index_file_path(location):
"""
Get the full file path to be used for an index file in the given location.
Yields location plus the constant INDEX_FILE_NAME.
@param {String} location - A directory location in which we want to create
a new index file.
@return {String} A file path for usage with a new index file.
"""
return os.path.join(location, INDEX_FILE_NAME)
def _get_server_port():
"""
Get the port specified for the server to run on. If given as the first
command line argument, we'll use that. Else we'll default to 8000.
@return {Integer} The port to run the server on. Default 8000, overridden
by first command line argument.
"""
return int(sys.argv[1]) if len(sys.argv) >= 2 else 8000
def _get_src_from_image(img, fallback_image_file):
"""
Get base-64 encoded data as a string for the given image. Fallback to return
fallback_image_file if cannot get the image data or img is None.
@param {Image} img - The PIL Image to get src data for
@param {String} fallback_image_file - The filename of the image file,
to be used when image data capture fails
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If the image is None, then we can't process, so we should return the
# path to the file itself
if img is None:
return fallback_image_file
# Target format should be the same as the original image format, unless it's
# a TIF/TIFF, which can't be displayed by most browsers; we convert these
# to jpeg
target_format = img.format
if target_format.lower() in ['tif', 'tiff']:
target_format = 'JPEG'
# If we have an actual Image, great - put together the base64 image string
try:
bytesio = io.BytesIO()
img.save(bytesio, target_format)
byte_value = bytesio.getvalue()
b64 = base64.b64encode(byte_value)
return 'data:image/%s;base64,%s' % (target_format.lower(), b64)
except IOError as exptn:
print('IOError while saving image bytes: %s' % exptn)
return fallback_image_file
def _get_thumbnail_image_from_file(dir_path, image_file):
"""
Get a PIL.Image from the given image file which has been scaled down to
THUMBNAIL_WIDTH wide.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the thumbnail as a PIL Image, or None
if the functionality is not available. See _get_image_from_file for
details.
"""
# Get image
img = _get_image_from_file(dir_path, image_file)
# If it's not supported, exit now
if img is None:
return None
if img.format.lower() == 'gif':
return None
# Get image dimensions
img_width, img_height = img.size
# We need to perform a resize - first, work out the scale ratio to take the
# image width to THUMBNAIL_WIDTH (THUMBNAIL_WIDTH:img_width ratio)
scale_ratio = THUMBNAIL_WIDTH / float(img_width)
# Work out target image height based on the scale ratio
target_height = int(scale_ratio * img_height)
# Perform the resize
try:
img.thumbnail((THUMBNAIL_WIDTH, target_height), resample=RESAMPLE)
except IOError as exptn:
print('WARNING: IOError when thumbnailing %s/%s: %s' % (
dir_path, image_file, exptn
))
return None
# Return the resized image
return img
def _get_thumbnail_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's thumbnail,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get a thumbnail image
img = _get_thumbnail_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _run_server():
"""
Run the image server. This is blocking. Will handle user KeyboardInterrupt
and other exceptions appropriately and return control once the server is
stopped.
@return {None}
"""
# Get the port to run on
port = _get_server_port()
# Configure allow_reuse_address to make re-runs of the script less painful -
# if this is not True then waiting for the address to be freed after the
# last run can block a subsequent run
SocketServer.TCPServer.allow_reuse_address = True
# Create the server instance
server = SocketServer.TCPServer(
('', port),
SimpleHTTPServer.SimpleHTTPRequestHandler
)
# Print out before actually running the server (cheeky / optimistic, however
# you want to look at it)
print('Your images are at http://127.0.0.1:%d/%s' % (
port,
INDEX_FILE_NAME
))
# Try to run the server
try:
# Run it - this call blocks until the server is killed
server.serve_forever()
except KeyboardInterrupt:
# This is the expected way of the server being killed, since imageMe is
# intended for ad-hoc running from command line
print('User interrupted, stopping')
except Exception as exptn:
# Catch everything else - this will handle shutdowns via other signals
# and faults actually starting the server in the first place
print(exptn)
print('Unhandled exception in server, stopping')
def serve_dir(dir_path):
"""
Generate indexes and run server from the given directory downwards.
@param {String} dir_path - The directory path (absolute, or relative to CWD)
@return {None}
"""
# Create index files, and store the list of their paths for cleanup later
# This time, force no processing - this gives us a fast first-pass in terms
# of page generation, but potentially slow serving for large image files
print('Performing first pass index file generation')
created_files = _create_index_files(dir_path, True)
if (PIL_ENABLED):
# If PIL is enabled, we'd like to process the HTML indexes to include
# generated thumbnails - this slows down generation so we don't do it
# first time around, but now we're serving it's good to do in the
# background
print('Performing PIL-enchanced optimised index file generation in background')
background_indexer = BackgroundIndexFileGenerator(dir_path)
background_indexer.run()
# Run the server in the current location - this blocks until it's stopped
_run_server()
# Clean up the index files created earlier so we don't make a mess of
# the image directories
_clean_up(created_files)
if __name__ == '__main__':
# Generate indices and serve from the current directory downwards when run
# as the entry point
serve_dir('.')
|
wsconnector.py | from threading import Thread, Lock
from websocket import WebSocketApp, setdefaulttimeout, ABNF
from msgpack import packb, unpackb
from ssl import CERT_NONE
class WSConnector:
class REID:
def __init__(self):
self._next = 0
def __next__(self):
n = self._next
self._next += 1
return n
def __iter__(self):
return self
def __init__(self, username: str, token: str, address: str, on_msg=None, ignore_ssl_cert=False):
self.username = username
self.token = token
self.address = address
self.on_msg = on_msg
self.ws = None
self.lock = Lock()
self.reid = self.REID()
self.running = False
self.ignore_ssl_cert = ignore_ssl_cert
setdefaulttimeout(60)
def send(self, data):
with self.lock:
self.ws.send(packb(self.construct_package(data), use_bin_type=True), opcode=ABNF.OPCODE_BINARY)
def start(self):
self.stop()
self.ws = WebSocketApp(self.address,
on_message=None if self.on_msg is None else self._handle_msg,
on_open=self._ready, on_error=self._fail)
self.lock.acquire()
kwargs = {"sslopt": {"cert_reqs": CERT_NONE}} if self.ignore_ssl_cert else None
Thread(target=self.ws.run_forever, kwargs=kwargs).start()
def _fail(self, ws, err):
self.lock.release()
raise err
def stop(self):
if self.ws is not None:
with self.lock:
print("Closing the connection.")
self.running = False
self.ws.close()
self.ws = None
def _ready(self, ws):
print(f"Connected to {self.address}.")
self.running = True
self.lock.release()
def _handle_msg(self, ws, msg):
if isinstance(msg, bytes):
msg = unpackb(msg)
self.on_msg(msg)
def construct_package(self, payload_data):
return {
'REID': next(self.reid),
'AUTH': {'USER': self.username, 'TOKEN': self.token},
'VERB': 'PUT',
'PATH': ['user', self.username, 'model'],
'META': {},
'PAYL': payload_data
} |
__init__.py | # -*- coding: utf-8 -*-
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__release__ = '2.0b3'
__version__ = '$Id: febe9736b00037cc7c4a06ff9bd4ba3cff4ae5bb $'
import datetime
import math
import re
import sys
import threading
import json
if sys.version_info[0] > 2:
from queue import Queue
long = int
else:
from Queue import Queue
from warnings import warn
# Use pywikibot. prefix for all in-package imports; this is to prevent
# confusion with similarly-named modules in version 1 framework, for users
# who want to continue using both
from pywikibot import config2 as config
from pywikibot.bot import (
output, warning, error, critical, debug, stdout, exception,
input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui, log,
calledModuleName, Bot, CurrentPageBot, WikidataBot,
# the following are flagged as deprecated on usage
handleArgs,
)
from pywikibot.exceptions import (
Error, InvalidTitle, BadTitle, NoPage, NoMoveTarget, SectionError,
SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily,
UnknownExtension,
NoUsername, UserBlocked,
PageRelatedError, IsRedirectPage, IsNotRedirectPage,
PageSaveRelatedError, PageNotSaved, OtherPageSaveError,
LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError,
EditConflict, PageDeletedConflict, PageCreatedConflict,
ServerError, FatalServerError, Server504Error,
CaptchaError, SpamfilterError, CircularRedirect, InterwikiRedirectPage,
WikiBaseError, CoordinateGlobeUnknownException,
)
from pywikibot.tools import PY2, UnicodeMixin, redirect_func
from pywikibot.i18n import translate
from pywikibot.data.api import UploadWarning
from pywikibot.diff import PatchManager
import pywikibot.textlib as textlib
import pywikibot.tools
textlib_methods = (
'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts',
'isDisabled', 'interwikiFormat', 'interwikiSort',
'getLanguageLinks', 'replaceLanguageLinks',
'removeLanguageLinks', 'removeLanguageLinksAndSeparator',
'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks',
'removeCategoryLinks', 'removeCategoryLinksAndSeparator',
'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params',
'TimeStripper',
)
__all__ = (
'config', 'ui', 'UnicodeMixin', 'translate',
'Page', 'FilePage', 'Category', 'Link', 'User',
'ItemPage', 'PropertyPage', 'Claim',
'html2unicode', 'url2unicode', 'unicode2html',
'stdout', 'output', 'warning', 'error', 'critical', 'debug',
'exception', 'input_choice', 'input', 'input_yn', 'inputChoice',
'handle_args', 'handleArgs', 'showHelp', 'ui', 'log',
'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot',
'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'NoMoveTarget',
'SectionError',
'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily',
'UnknownExtension',
'NoUsername', 'UserBlocked', 'UserActionRefuse',
'PageRelatedError', 'IsRedirectPage', 'IsNotRedirectPage',
'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError',
'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError',
'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict',
'UploadWarning',
'ServerError', 'FatalServerError', 'Server504Error',
'CaptchaError', 'SpamfilterError', 'CircularRedirect',
'InterwikiRedirectPage',
'WikiBaseError', 'CoordinateGlobeUnknownException',
'QuitKeyboardInterrupt',
)
__all__ += textlib_methods
if PY2:
# T111615: Python 2 requires __all__ is bytes
globals()['__all__'] = tuple(bytes(item) for item in __all__)
for _name in textlib_methods:
target = getattr(textlib, _name)
wrapped_func = redirect_func(target)
globals()[_name] = wrapped_func
deprecated = redirect_func(pywikibot.tools.deprecated)
deprecate_arg = redirect_func(pywikibot.tools.deprecate_arg)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.getcurrenttime() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = "%Y%m%d%H%M%S"
ISO8601Format = "%Y-%m-%dT%H:%M:%SZ"
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classmethod
def fromISOformat(cls, ts):
"""Convert an ISO 8601 timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.ISO8601Format)
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a MediaWiki internal timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self.ISO8601Format)
toISOformat = redirect_func(isoformat, old_name='toISOformat',
class_name='Timestamp')
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
def __sub__(self, other):
"""Perform substraction, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
class Coordinate(object):
"""
Class for handling and storing Coordinates.
For now its just being used for DataSite, but
in the future we can use it for the GeoData extension.
"""
def __init__(self, lat, lon, alt=None, precision=None, globe='earth',
typ="", name="", dim=None, site=None, entity=''):
"""
Represent a geo coordinate.
@param lat: Latitude
@type lat: float
@param lon: Longitude
@type lon: float
@param alt: Altitute? TODO FIXME
@param precision: precision
@type precision: float
@param globe: Which globe the point is on
@type globe: str
@param typ: The type of coordinate point
@type typ: str
@param name: The name
@type name: str
@param dim: Dimension (in meters)
@type dim: int
@param entity: The URL entity of a Wikibase item
@type entity: str
"""
self.lat = lat
self.lon = lon
self.alt = alt
self._precision = precision
if globe:
globe = globe.lower()
self.globe = globe
self._entity = entity
self.type = typ
self.name = name
self._dim = dim
if not site:
self.site = Site().data_repository()
else:
self.site = site
def __repr__(self):
string = 'Coordinate(%s, %s' % (self.lat, self.lon)
if self.globe != 'earth':
string += ', globe="%s"' % self.globe
string += ')'
return string
@property
def entity(self):
if self._entity:
return self._entity
return self.site.globes()[self.globe]
def toWikibase(self):
"""
Export the data to a JSON object for the Wikibase API.
FIXME: Should this be in the DataSite object?
"""
if self.globe not in self.site.globes():
raise CoordinateGlobeUnknownException(
u"%s is not supported in Wikibase yet."
% self.globe)
return {'latitude': self.lat,
'longitude': self.lon,
'altitude': self.alt,
'globe': self.entity,
'precision': self.precision,
}
@classmethod
def fromWikibase(cls, data, site):
"""Constructor to create an object from Wikibase's JSON output."""
globes = {}
for k in site.globes():
globes[site.globes()[k]] = k
globekey = data['globe']
if globekey:
globe = globes.get(data['globe'])
else:
# Default to earth or should we use None here?
globe = 'earth'
return cls(data['latitude'], data['longitude'],
data['altitude'], data['precision'],
globe, site=site, entity=data['globe'])
@property
def precision(self):
u"""
Return the precision of the geo coordinate.
The biggest error (in degrees) will be given by the longitudinal error;
the same error in meters becomes larger (in degrees) further up north.
We can thus ignore the latitudinal error.
The longitudinal can be derived as follows:
In small angle approximation (and thus in radians):
M{Δλ ≈ Δpos / r_φ}, where r_φ is the radius of earth at the given latitude.
Δλ is the error in longitude.
M{r_φ = r cos φ}, where r is the radius of earth, φ the latitude
Therefore::
precision = math.degrees(self._dim/(radius*math.cos(math.radians(self.lat))))
"""
if not self._precision:
radius = 6378137 # TODO: Support other globes
self._precision = math.degrees(
self._dim / (radius * math.cos(math.radians(self.lat))))
return self._precision
def precisionToDim(self):
"""Convert precision from Wikibase to GeoData's dim."""
raise NotImplementedError
class WbTime(object):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def __init__(self, year=None, month=None, day=None,
hour=None, minute=None, second=None,
precision=None, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
"""
Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = long(year)
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
if calendarmodel is None:
if site is None:
site = Site().data_repository()
calendarmodel = site.calendarmodel()
self.calendarmodel = calendarmodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int) and
precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "%s"' % precision)
@classmethod
def fromTimestr(cls, datetimestr, precision=14, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError(u"Invalid format: '%s'" % datetimestr)
t = match.groups()
return cls(long(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel, site)
def toTimestr(self):
"""
Convert the data to a UTC date/time string.
@return: str
"""
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: dict
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, ts):
return cls.fromTimestr(ts[u'time'], ts[u'precision'],
ts[u'before'], ts[u'after'],
ts[u'timezone'], ts[u'calendarmodel'])
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return u"WbTime(year=%(year)d, month=%(month)d, day=%(day)d, " \
u"hour=%(hour)d, minute=%(minute)d, second=%(second)d, " \
u"precision=%(precision)d, before=%(before)d, after=%(after)d, " \
u"timezone=%(timezone)d, calendarmodel='%(calendarmodel)s')" \
% self.__dict__
class WbQuantity(object):
"""A Wikibase quantity representation."""
def __init__(self, amount, unit=None, error=None):
u"""
Create a new WbQuantity object.
@param amount: number representing this quantity
@type amount: float
@param unit: not used (only unit-less quantities are supported)
@param error: the uncertainty of the amount (e.g. ±1)
@type error: float, or tuple of two floats, where the first value is
the upper error and the second is the lower error value.
"""
if amount is None:
raise ValueError('no amount given')
if unit is None:
unit = '1'
self.amount = amount
self.unit = unit
upperError = lowerError = 0
if isinstance(error, tuple):
upperError, lowerError = error
elif error is not None:
upperError = lowerError = error
self.upperBound = self.amount + upperError
self.lowerBound = self.amount - lowerError
def toWikibase(self):
"""Convert the data to a JSON object for the Wikibase API."""
json = {'amount': self.amount,
'upperBound': self.upperBound,
'lowerBound': self.lowerBound,
'unit': self.unit
}
return json
@classmethod
def fromWikibase(cls, wb):
"""
Create a WbQuanity from the JSON data given by the Wikibase API.
@param wb: Wikibase JSON
"""
amount = eval(wb['amount'])
upperBound = eval(wb['upperBound'])
lowerBound = eval(wb['lowerBound'])
error = (upperBound - amount, amount - lowerBound)
return cls(amount, wb['unit'], error)
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return (u"WbQuantity(amount=%(amount)s, upperBound=%(upperBound)s, "
u"lowerBound=%(lowerBound)s, unit=%(unit)s)" % self.__dict__)
_sites = {}
_url_cache = {} # The code/fam pair for each URL
def Site(code=None, fam=None, user=None, sysop=None, interface=None, url=None):
"""A factory method to obtain a Site object.
Site objects are cached and reused by this method.
By default rely on config settings. These defaults may all be overridden
using the method parameters.
@param code: language code (override config.mylang)
@type code: string
@param fam: family name or object (override config.family)
@type fam: string or Family
@param user: bot user name to use on this site (override config.usernames)
@type user: unicode
@param sysop: sysop user to use on this site (override config.sysopnames)
@type sysop: unicode
@param interface: site class or name of class in pywikibot.site
(override config.site_interface)
@type interface: subclass of L{pywikibot.site.BaseSite} or string
@param url: Instead of code and fam, does try to get a Site based on the
URL. Still requires that the family supporting that URL exists.
@type url: string
"""
# Either code and fam or only url
if url and (code or fam):
raise ValueError('URL to the wiki OR a pair of code and family name '
'should be provided')
_logger = "wiki"
if url:
if url not in _url_cache:
matched_sites = []
# Iterate through all families and look, which does apply to
# the given URL
for fam in config.family_files:
family = pywikibot.family.Family.load(fam)
code = family.from_url(url)
if code is not None:
matched_sites += [(code, fam)]
if matched_sites:
if len(matched_sites) > 1:
pywikibot.warning(
'Found multiple matches for URL "{0}": {1} (use first)'
.format(url, ', '.join(str(s) for s in matched_sites)))
_url_cache[url] = matched_sites[0]
else:
# TODO: As soon as AutoFamily is ready, try and use an
# AutoFamily
_url_cache[url] = None
cached = _url_cache[url]
if cached:
code = cached[0]
fam = cached[1]
else:
raise SiteDefinitionError("Unknown URL '{0}'.".format(url))
else:
# Fallback to config defaults
code = code or config.mylang
fam = fam or config.family
interface = interface or config.site_interface
# config.usernames is initialised with a dict for each family name
family_name = str(fam)
if family_name in config.usernames:
user = user or config.usernames[family_name].get(code) \
or config.usernames[family_name].get('*')
sysop = sysop or config.sysopnames[family_name].get(code) \
or config.sysopnames[family_name].get('*')
if not isinstance(interface, type):
# If it isnt a class, assume it is a string
try:
tmp = __import__('pywikibot.site', fromlist=[interface])
interface = getattr(tmp, interface)
except ImportError:
raise ValueError("Invalid interface name '%(interface)s'" % locals())
if not issubclass(interface, pywikibot.site.BaseSite):
warning('Site called with interface=%s' % interface.__name__)
user = pywikibot.tools.normalize_username(user)
key = '%s:%s:%s:%s' % (interface.__name__, fam, code, user)
if key not in _sites or not isinstance(_sites[key], interface):
_sites[key] = interface(code=code, fam=fam, user=user, sysop=sysop)
debug(u"Instantiated %s object '%s'"
% (interface.__name__, _sites[key]), _logger)
if _sites[key].code != code:
warn('Site %s instantiated using different code "%s"'
% (_sites[key], code), UserWarning, 2)
return _sites[key]
# alias for backwards-compability
getSite = pywikibot.tools.redirect_func(Site, old_name='getSite')
from pywikibot.page import (
Page,
FilePage,
Category,
Link,
User,
ItemPage,
PropertyPage,
Claim,
)
from pywikibot.page import html2unicode, url2unicode, unicode2html
link_regex = re.compile(r'\[\[(?P<title>[^\]|[<>{}]*)(\|.*?)?\]\]')
@pywikibot.tools.deprecated("comment parameter for page saving method")
def setAction(s):
"""Set a summary to use for changed page submissions."""
config.default_edit_summary = s
def showDiff(oldtext, newtext, context=0):
"""
Output a string showing the differences between oldtext and newtext.
The differences are highlighted (only on compatible systems) to show which
changes were made.
"""
PatchManager(oldtext, newtext, context=context).print_hunks()
# Throttle and thread handling
stopped = False
def stopme():
"""Drop this process from the throttle log, after pending threads finish.
Can be called manually if desired, but if not, will be called automatically
at Python exit.
"""
global stopped
_logger = "wiki"
if not stopped:
debug(u"stopme() called", _logger)
def remaining():
remainingPages = page_put_queue.qsize() - 1
# -1 because we added a None element to stop the queue
remainingSeconds = datetime.timedelta(
seconds=(remainingPages * config.put_throttle))
return (remainingPages, remainingSeconds)
page_put_queue.put((None, [], {}))
stopped = True
if page_put_queue.qsize() > 1:
num, sec = remaining()
format_values = dict(num=num, sec=sec)
output(u'\03{lightblue}'
u'Waiting for %(num)i pages to be put. '
u'Estimated time remaining: %(sec)s'
u'\03{default}' % format_values)
while(_putthread.isAlive()):
try:
_putthread.join(1)
except KeyboardInterrupt:
if input_yn('There are %i pages remaining in the queue. '
'Estimated time remaining: %s\nReally exit?'
% remaining(), default=False, automatic_quit=False):
return
# only need one drop() call because all throttles use the same global pid
try:
list(_sites.values())[0].throttle.drop()
log(u"Dropped throttle(s).")
except IndexError:
pass
import atexit
atexit.register(stopme)
# Create a separate thread for asynchronous page saves (and other requests)
def async_manager():
"""Daemon; take requests from the queue and execute them in background."""
while True:
(request, args, kwargs) = page_put_queue.get()
if request is None:
break
request(*args, **kwargs)
page_put_queue.task_done()
def async_request(request, *args, **kwargs):
"""Put a request on the queue, and start the daemon if necessary."""
if not _putthread.isAlive():
try:
page_put_queue.mutex.acquire()
try:
_putthread.start()
except (AssertionError, RuntimeError):
pass
finally:
page_put_queue.mutex.release()
page_put_queue.put((request, args, kwargs))
# queue to hold pending requests
page_put_queue = Queue(config.max_queue_size)
# set up the background thread
_putthread = threading.Thread(target=async_manager)
# identification for debugging purposes
_putthread.setName('Put-Thread')
_putthread.setDaemon(True)
wrapper = pywikibot.tools.ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('ImagePage', FilePage)
wrapper._add_deprecated_attr(
'PageNotFound', pywikibot.exceptions.DeprecatedPageNotFoundError,
warning_message=('{0}.{1} is deprecated, and no longer '
'used by pywikibot; use http.fetch() instead.'))
wrapper._add_deprecated_attr(
'UserActionRefuse', pywikibot.exceptions._EmailUserError,
warning_message='UserActionRefuse is deprecated; '
'use UserRightsError and/or NotEmailableError')
wrapper._add_deprecated_attr(
'QuitKeyboardInterrupt', pywikibot.bot.QuitKeyboardInterrupt,
warning_message='pywikibot.QuitKeyboardInterrupt is deprecated; '
'use pywikibot.bot.QuitKeyboardInterrupt instead')
|
multi.py | import torch
import torch.multiprocessing as mp
import subprocess
import time
import numpy as np
from tqdm import tqdm, trange
from triton_inference.measure import ModelMetricsWriter
def monitor():
writer = ModelMetricsWriter("/jmain01/home/JAD003/sxr06/lxx22-sxr06/model-inference/tritonserver", "stress")
while lock.value > 0:
r = subprocess.run(['curl', 'http://dgj101:8002/metrics'], capture_output=True, text=True)
writer.text = r.stdout
writer.record_gpu_metric("nv_energy_consumption")
writer.record_gpu_metric("nv_gpu_utilization")
writer.record_gpu_metric("nv_gpu_memory_used_bytes")
writer.record_gpu_metric("nv_gpu_power_usage")
writer.record_gpu_metric("nv_energy_consumption")
time.sleep(0.1)
writer.writer.close()
lock = mp.Value('i', 0)
lock.value = 1
job = mp.Process(target=monitor)
job.start()
RUN_SEC = 30
for size in trange(1,21):
size_m = 2 ** size
start_time = time.process_time()
while True:
A = torch.rand(size_m, size_m, device="cuda:0")
B = torch.rand(size_m, size_m, device="cuda:0")
C = torch.multiply(A, B)
curr_time = time.process_time()
if curr_time - start_time > RUN_SEC:
break
time.sleep(RUN_SEC)
lock.value = 0
job.join()
|
quantize_tinyYOLO_BAC_multiprocess.py | #!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
from yolov1.caffe_yolo_test import calculate_tiny_yolo_mAP
import shutil
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
layer_param = net.params[layer.name]
max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
if layer.convolution_param.bias_term:
max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
f = open('./test.txt',"r")
for image_path in f:
image_path = image_path.strip()
img_filename = image_path
image_name = img_filename.split("/")[-1]
image_id = image_name.split(".")[0]
img = caffe.io.load_image(img_filename)
inputs = img
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
start = datetime.now()
out = net.forward_all(data=np.asarray([transformer.preprocess('data', inputs)]))
end = datetime.now()
elapsedTime = end-start
#print 'total time is " milliseconds', elapsedTime.total_seconds()*1000
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto, max_per_image=100, thresh=0.05, vis=False):
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
net_multiplier_IL[layer.name] = -sys.maxint - 1
f = open('./test.txt',"r")
for image_path in f:
image_path = image_path.strip()
img_filename = image_path
image_name = img_filename.split("/")[-1]
image_id = image_name.split(".")[0]
img = caffe.io.load_image(img_filename)
inputs = img
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
start = datetime.now()
out = net.forward_all(data=np.asarray([transformer.preprocess('data', inputs)]))
end = datetime.now()
elapsedTime = end-start
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict):
#caffe.set_mode_cpu()
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
#ap = test_qnet(net_path, args.caffemodel, imdb)
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
ap = calculate_tiny_yolo_mAP(net_path, args.caffemodel, './test.txt', i)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL):
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto, imdb, max_per_image=args.max_per_image, vis=args.vis)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL):
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC,
max_per_image=args.max_per_image, vis=args.vis)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL):
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
if os.path.exists('./results/'):
shutil.rmtree("./results")
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
#cfg.GPU_ID = args.gpu_id
#print('Using config:')
#pprint.pprint(cfg)
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print 'Create quantized prototxt'
#net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict))
p.start()
p.join()
full_ap = shared_dict['FP-FP-FP-FP-FP']
#sys.exit(0)
#full_ap = test_qnet(args.prototxt, args.caffemodel, imdb)
#full_ap = 0.540425
print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 0 #just initial
bw_deconv = 0 #just initial
bw_fc = 0 #just initial
bw_output = 0 #just initial
bw_adder = 0 #just initial
bw_multiplier = 0 #just initial
print 'Analyzing network'
net_proto = read_from_prototxt(args.prototxt)
has_conv, has_deconv, has_fc = analyze_network(net_proto)
print 'Network Structure'
print 'CONV:{}, DECONV:{}, FC:{}'.format(has_conv, has_deconv, has_fc)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL,))
p.start()
p.join()
#net_param_IL = analyze_net_param_IL(net, net_proto)
#net_output_IL = manager.dict()
#net_input_IL = manager.dict()
#if args.act_analysis == None:
# print 'Analyzing network output IL'
# p = multiprocessing.Process(target=analyze_net_output_IL_worker,
# args=(net_output_IL, net_input_IL))
# p.start()
# p.join()
# with open('act_analysis.json', 'w') as outfile:
# act_analysis = dict()
# act_analysis['net_output_IL'] = dict()
# act_analysis['net_input_IL'] = dict()
# for t in net_output_IL.keys():
# act_analysis['net_output_IL'][t] = net_output_IL[t]
# for t in net_input_IL.keys():
# act_analysis['net_input_IL'][t] = net_input_IL[t]
# json.dump(act_analysis, outfile)
#else:
# print 'Loading network output IL'
# with open(args.act_analysis) as json_data:
# act_analysis = json.load(json_data)
# for t in act_analysis['net_output_IL'].keys():
# net_output_IL[t] = act_analysis['net_output_IL'][t]
# for t in act_analysis['net_input_IL'].keys():
# net_input_IL[t] = act_analysis['net_input_IL'][t]
# Analyze Convolution and DeConvolution Layers
if has_conv:
print 'Analyzing CONV and DECONV'
print '\tbit width\t accuracy'
jobs = []
for i in range(3, 11):
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, i, 0)
quantize_net_deconv(net_proto, net_param_IL, i, 0)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=(str(i)+'-32-32-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for i in range(3, 11):
print '\t{}bit:\t\t{}'.format(i,shared_dict[str(i)+'-32-32-32-32'])
for i in range(3, 11):
if shared_dict[str(i)+'-32-32-32-32'] > (full_ap - args.error_margin):
bw_conv = i
break;
# Analyze Convolution and DeConvolution Layers
#if has_conv:
# print 'Analyzing CONV and DECONV'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_conv(net, net_proto, net_param_IL, bw)
# quantize_net_deconv(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_conv = bw
# if bw_h - bw_l <= 1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze Fully Connected Layers
if has_fc:
print 'Analyzing FC'
print '\tbit width\t accuracy'
jobs = []
for i in range(3, 11):
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_fc(net_proto, net_param_IL, i, 0)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('32-'+str(i)+'-32-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for i in range(3, 11):
print '\t{}bit:\t\t{}'.format(i,shared_dict['32-'+str(i)+'-32-32-32'])
for i in range(3, 11):
if shared_dict['32-'+str(i)+'-32-32-32'] > (full_ap - args.error_margin):
bw_fc = i
break;
# Analyze Fully Connected Layers
#if has_fc:
# print 'Analyzing FC'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_fc(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_fc = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze input and output of layers
#net_proto = read_from_prototxt(args.prototxt_quantized)
#quantize_net_conv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_deconv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_fc(net, net_proto, net_param_IL, bw_fc, -1)
#write_to_prototxt(net_proto, args.prototxt_quantized)
net_output_IL = manager.dict()
net_input_IL = manager.dict()
if args.act_analysis == None:
print 'Analyzing network output IL'
p = multiprocessing.Process(target=analyze_net_output_IL_worker,
args=(net_output_IL, net_input_IL))
p.start()
p.join()
with open('act_analysis.json', 'w') as outfile:
act_analysis = dict()
act_analysis['net_output_IL'] = dict()
act_analysis['net_input_IL'] = dict()
for t in net_output_IL.keys():
act_analysis['net_output_IL'][t] = net_output_IL[t]
for t in net_input_IL.keys():
act_analysis['net_input_IL'][t] = net_input_IL[t]
json.dump(act_analysis, outfile)
else:
print 'Loading network output IL'
with open(args.act_analysis) as json_data:
act_analysis = json.load(json_data)
for t in act_analysis['net_output_IL'].keys():
net_output_IL[t] = act_analysis['net_output_IL'][t]
for t in act_analysis['net_input_IL'].keys():
net_input_IL[t] = act_analysis['net_input_IL'][t]
print 'Analyzing layer output'
print '\tbit width\t accuracy'
jobs = []
for i in range(3, 11):
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_output(net_proto, net_output_IL, net_input_IL, i, 0)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('32-32-'+str(i)+'-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for i in range(3, 11):
print '\t{}bit:\t\t{}'.format(i,shared_dict['32-32-'+str(i)+'-32-32'])
for i in range(3, 11):
if shared_dict['32-32-'+str(i)+'-32-32'] > (full_ap - args.error_margin):
bw_output = i
break;
# Analyze input and output of layers
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer output'
#print '\tbit width\t accuracy'
#while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_output(net, net_proto, net_output_IL, net_input_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_output = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
#Create 8-bit quantization model
if bw_conv < 8:
bw_conv = 8
if bw_fc < 8:
bw_fc = 8
if bw_output < 8:
bw_output = 8
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, 0)
quantize_net_deconv(net_proto, net_param_IL, bw_conv, 0)
quantize_net_fc(net_proto, net_param_IL, bw_fc, 0)
quantize_net_output(net_proto, net_output_IL, net_input_IL, bw_output, 0)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
shared_dict))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-32-32']
layer_mAP = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
jobs = []
ap_after_multiplier = 0
for i in range(bw_output, bw_output+8):
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, 0)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for i in range(bw_output, bw_output+8):
print '\t{}bit:\t\t{}'.format(i,shared_dict['32-32-32-32-'+str(i)])
for i in range(bw_output, bw_output+8):
if shared_dict['32-32-32-32-'+str(i)] > (layer_mAP - 0.005):
bw_multiplier = i
ap_after_multiplier = shared_dict['32-32-32-32-'+str(i)]
break;
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, 0)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
jobs = []
for i in range(bw_output, bw_output+8):
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, 0)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for i in range(bw_output, bw_output+8):
print '\t{}bit:\t\t{}'.format(i,shared_dict['32-32-32-'+str(i)+'-32'])
bw_adder = bw_output+12
for i in range(bw_output, bw_output+8):
if shared_dict['32-32-32-'+str(i)+'-32'] > (layer_mAP - 0.005):
bw_adder = i
break;
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
#net_final = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, 0)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, 0)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, 0)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, 0)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, 0)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, 0)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
|
commands.py | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010,2015, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import getopt
import inspect
import threading
import multiprocessing #python2.6 or later!
try:
import resource
except ImportError: # Windows!
resource = None
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, \
utils, world
from .utils import minisix
from .i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization()
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def _rlimit_min(a, b):
if a == resource.RLIM_INFINITY:
return b
elif b == resource.RLIM_INFINITY:
return a
else:
return min(soft, heap_size)
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds.
<heap_size>, if supplied, limits the memory used by the target
function."""
timeout = kwargs.pop('timeout', None)
heap_size = kwargs.pop('heap_size', None)
if resource and heap_size is None:
heap_size = resource.RLIM_INFINITY
if world.disableMultiprocessing:
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
try:
return f(*args, **kwargs)
except Exception as e:
raise e
try:
q = multiprocessing.Queue()
except OSError:
log.error('Using multiprocessing.Queue raised an OSError.\n'
'This is probably caused by your system denying semaphore\n'
'usage. You should run these two commands:\n'
'\tsudo rmdir /dev/shm\n'
'\tsudo ln -Tsf /{run,dev}/shm\n'
'(See https://github.com/travis-ci/travis-core/issues/187\n'
'for more information about this bug.)\n')
raise
def newf(f, q, *args, **kwargs):
if resource:
rsrc = resource.RLIMIT_DATA
(soft, hard) = resource.getrlimit(rsrc)
soft = _rlimit_min(soft, heap_size)
hard = _rlimit_min(hard, heap_size)
resource.setrlimit(rsrc, (soft, hard))
try:
r = f(*args, **kwargs)
q.put([False, r])
except Exception as e:
q.put([True, e])
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
q.close()
raise ProcessTimeoutError("%s aborted due to timeout." % (p.name,))
try:
raised, v = q.get(block=False)
except minisix.queue.Empty:
return None
finally:
q.close()
if raised:
raise v
else:
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return None
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error as e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.channel
if not channel:
# Don't snarf in private
return
if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg):
# Don't snarf CTCPs unless they are a /me
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10 and '.' not in s:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type=_('integer'), p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type=_('non-integer value')):
try:
_int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = minisix.long(state.args[-1])
def getFloat(irc, msg, args, state, type=_('floating point number')):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type=_('positive integer'), *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type=_('non-negative integer'), *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type=_('index'))
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid(_('number of seconds'), args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid(_('boolean'), args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveVoice(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoice(irc.nick):
state.error(_('I need to be voiced to %s.') % action, Raise=True)
def getHaveVoicePlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoicePlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least voiced to %s.') % action,
Raise=True)
def getHaveHalfop(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfop(irc.nick):
state.error(_('I need to be halfopped to %s.') % action, Raise=True)
def getHaveHalfopPlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfopPlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least halfopped to %s.') % action,
Raise=True)
def getHaveOp(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error(_('I need to be opped to %s.') % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('channel'), args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]) or \
(not conf.supybot.protocols.irc.strictRfc() and
args[0].startswith('$')):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid(_('nick or hostmask'), args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1],
channel=state.channel)
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
raise ValueError
except (ValueError, IndexError):
args[:] = original
state.errorInvalid(_('regular expression'), s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getMatcherMany = _getRe(utils.str.perlReToFindall)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0], conf.supybot.protocols.irc.strictRfc()):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid(_('nick'), args[0],
_('That nick is too long for this server.'))
state.args.append(args.pop(0))
else:
state.errorInvalid(_('nick'), args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = _('I haven\'t seen %s.') % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif msg.channel:
channel = msg.channel
else:
state.log.debug('Raising ArgumentError because there is no channel.')
print(msg.channel, msg)
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannels(irc, msg, args, state):
if args and all(map(irc.isChannel, args[0].split(','))):
channels = args.pop(0).split(',')
elif msg.channel:
channels = [msg.channel]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.args.append(channels)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not in %s.') % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (msg.channel and msg.channel in irc.state.channels):
state.error(_('This command may only be given in a channel that I am '
'in.'), Raise=True)
else:
state.channel = msg.channel
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error(_('You must be in %s.') % channel, Raise=True)
else:
state.error(_('I\'m not in %s.') % channel, Raise=True)
else:
state.errorInvalid(_('channel'), args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error(_('%s is not in %s.') % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def getChannelOrGlobal(irc, msg, args, state):
if args and args[0] == 'global':
channel = args.pop(0)
channel = 'global'
elif args and irc.isChannel(args[0]):
channel = args.pop(0)
state.channel = channel
elif msg.channel:
channel = msg.channel
state.channel = channel
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.args.append(channel)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = _('You must not give the empty string as an argument.')
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, *L):
def p(s):
return len(s.split(None, 1)) == 1
L = L or [_('You must not give a string containing spaces as an argument.')]
getSomething(irc, msg, args, state, p=p, *L)
def private(irc, msg, args, state):
if msg.channel:
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not msg.channel:
if errmsg is None:
errmsg = _('This message must be sent in a channel.')
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def checkCapabilityButIgnoreOwner(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap, ignoreOwner=True):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('url'), args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('email'), args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid(_('http url'), args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid(_('command name'), args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('ip'), args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid(_('letter'), args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, minisix.string_types):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid(_('plugin'), args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid(_('irc color'))
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'isGranted': getHaveHalfopPlus, # Backward compatibility
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channels': getChannels,
'channelOrGlobal': getChannelOrGlobal,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkCapabilityButIgnoreOwner': checkCapabilityButIgnoreOwner,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveHalfop': getHaveHalfop,
'haveHalfop+': getHaveHalfopPlus,
'haveOp': getHaveOp,
'haveOp+': getHaveOp, # We don't handle modes greater than op.
'haveVoice': getHaveVoice,
'haveVoice+': getHaveVoicePlus,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpMatcherMany': getMatcherMany,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError as e:
raise UnknownConverter(str(e))
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, minisix.string_types):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error) as e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error) as e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = list(map(contextify, specs))
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception as e:
e2 = e # 'e' is local.
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e2
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
self.getoptLs = ''
for (name, spec) in getopts.items():
if spec == '':
if len(name) == 1:
self.getoptLs += name
self.getopts[name] = None
self.getoptL.append(name)
self.getopts[name] = None
else:
if len(name) == 1:
self.getoptLs += name + ':'
self.getopts[name] = contextify(spec)
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, self.getoptLs, self.getoptL)
getopts = []
for (opt, arg) in optlist:
if opt.startswith('--'):
opt = opt[2:] # Strip --
else:
opt = opt[1:]
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError(attr)
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.items():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def _wrap(f, specList=[], name=None, checkDoc=True, **kw):
name = name or f.__name__
assert (not checkDoc) or (hasattr(f, '__doc__') and f.__doc__), \
'Command %r has no docstring.' % name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.__code__
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
self.log.debug('Make sure you did not wrap a wrapped '
'function ;)')
raise
newf2 = utils.python.changeFunctionName(newf, name, f.__doc__)
newf2.__module__ = f.__module__
return internationalizeDocstring(newf2)
def wrap(f, *args, **kwargs):
if callable(f):
# Old-style call OR decorator syntax with no converter.
# f is the command.
return _wrap(f, *args, **kwargs)
else:
# Call with the Python decorator syntax
assert isinstance(f, list) or isinstance(f, tuple)
specList = f
def decorator(f):
return _wrap(f, specList, *args, **kwargs)
return decorator
wrap.__doc__ = """Useful wrapper for plugin commands.
Valid converters are: %s.
:param f: A command, taking (self, irc, msg, args, ...) as arguments
:param specList: A list of converters and contexts""" % \
', '.join(sorted(wrappers.keys()))
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
mis_game.py | from mis_util import *
from threading import Thread
import socket
from math import floor, sqrt
def clamp_and_bound(pos, dx, dy, bounds):
if len(bounds) != 4:
raise ValueError
min_x = bounds[0]
max_x = bounds[1]
min_y = bounds[2]
max_y = bounds[3]
#should dx push the position out of bounds, reduce dx to push to *exactly* the bound
if pos[0] + dx < min_x:
dx = min(0,min_x - pos[0])
elif pos[0] + dx > max_x:
dx = min(0,max_x - pos[0])
#should dy ' '
if pos[1] + dy < min_y:
dy = min(0,min_y - pos[1])
elif pos[1] + dy > max_y:
dy = min(0,max_y - pos[0])
return (dx, dy)
def detect_and_avoid_collisions(pos, dx, dy, boxes):
for box in boxes:
#if moving in the x direction causes a collision on the left and right...
tent_x = pos[0] + dx
tent_y = pos[1]
if collide := box.halo_collide(tent_x,tent_y):
if collide[0] or collide[2]:
dx = 0
#if moving in the y direction causes a collision on the top or bottom...
tent_x = pos[0]
tent_y = pos[1] + dy
if collide := box.halo_collide(tent_x,tent_y):
if collide[1] or collide[3]:
dy = 0
#if moving diagonally causes a collision on the corner...
tent_x = pos[0] + dx
tent_y = pos[1] + dy
if collide := box.halo_collide(tent_x,tent_y):
if any(collide[i] and collide[(i+1)%4] for i in range(4)):
dx = 0
dy = 0
return dx, dy
def mloop(ref, me, others, mypos, myID):
me.send(myID.to_bytes(4,'little',signed=True))
#TODO: RATELIMIT SPEED OF LOOP TO 1/4 SECOND
xspeed = 5
yspeed = 5
INV2 = 1 / sqrt(2)
while True:
try:
frame = me.recv(64)
except socket.timeout:
continue
opcode = int.from_bytes(frame[:4],'little',signed=True)
if opcode == MOVEMENT:
#extract
dx = int.from_bytes(frame[4:8] ,'little',signed=True)
dy = int.from_bytes(frame[8:12],'little',signed=True)
#calculate
diag = 1 if not (dx and dy) else INV2
dx_p = floor(xspeed * dx * diag)
dy_p = floor(yspeed * dy * diag)
dx_p, dy_p = detect_and_avoid_collisions(mypos,dx_p,dy_p,ref.obj_map)
#dx_p, dy_p = clamp_and_bound(mypos,dx_p,dy_p,[-250,250,-250,250])
#move
mypos[0] += dx_p
mypos[1] += dy_p
#send back
me.sendall( MOVEMENT.to_bytes(4,'little',signed=True) +
mypos[0].to_bytes(4,'little',signed=True) + mypos[1].to_bytes(4,'little',signed=True) +
dx_p.to_bytes(4,'little',signed=True) + dy_p.to_bytes(4,'little',signed=True) +
myID.to_bytes(4,'little',signed=True)
)
for other in others:
other.sendall( MOVEMENT.to_bytes(4,'little',signed=True) +
mypos[0].to_bytes(4,'little',signed=True) + mypos[1].to_bytes(4,'little',signed=True) +
dx_p.to_bytes(4,'little',signed=True) + dy_p.to_bytes(4,'little',signed=True) +
myID.to_bytes(4,'little',signed=True)
)
elif opcode == MISSILE:
pass
class MIS_GAME:
def __init__(self, socks):
self.obj_map = obj_map1
self.socks = socks
self.poss = [[0,0] for sock in socks]
self.threads = [Thread(target=mloop,args=(self, sock, list(filter(lambda x: x != sock, socks)),self.poss[idx],idx))
for idx, sock in enumerate(socks)]
for t in self.threads:
t.daemon = True
t.start() |
test_zmq.py | import multiprocessing
import pytest
import pythonflow as pf
def run_processor():
with pf.Graph() as graph:
x = pf.placeholder('x')
a = pf.placeholder('a')
y = (a * x).set_name('y')
with pf.Processor.from_graph('tcp://127.0.0.1:5555', 'tcp://127.0.0.1:5556', graph) as processor:
processor.run()
@pytest.fixture
def processor_process():
process = multiprocessing.Process(target=run_processor)
process.start()
yield process
process.terminate()
process.join()
@pytest.fixture
def consumer(processor_process):
with pf.Consumer('tcp://127.0.0.1:5556', 'tcp://127.0.0.1:5555') as consumer:
yield consumer
def test_call(consumer):
assert consumer('y', {'x': 2}, a=3) == 6
def test_map(consumer):
actual = consumer.map('y', [{'x': x} for x in range(5)], a=3)
expected = (0, 3, 6, 9, 12)
for a, b in zip(actual, expected):
assert a == b
def test_map_batch(consumer):
actual = consumer.map('y', [{'x': x} for x in range(5)], a=3, batch_size=3)
expected = [(0, 3, 6), (9, 12)]
for a, b in zip(actual, expected):
assert a == b
def test_call_invalid(consumer):
with pytest.raises(KeyError):
consumer('missing-operation')
|
wsdump.py | #!/Users/yifan/IdeaProjects/tidbbeat/build/ve/darwin/bin/python3.9
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
remote.py | """
fs.remote
=========
Utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
* RemoteFileBuffer: a file-like object that locally buffers the contents of
a remote file, writing them back on flush() or close().
* ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
* CacheFS: a WrapFS subclass that caches file and directory meta-data in
memory, to speed access to a remote FS.
"""
from __future__ import with_statement
import time
import stat as statinfo
from errno import EINVAL
import fs.utils
from fs.base import threading, FS
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.wrapfs.lazyfs import LazyFS
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO, SpooledTemporaryFile, FileWrapper
from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object()
class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code::
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The contents of the remote file are read into the buffer on-demand.
"""
max_size_in_memory = 1024 * 8
def __init__(self, fs, path, mode, rfile=None, write_on_flush=True):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
wrapped_file = SpooledTemporaryFile(max_size=self.max_size_in_memory)
self.fs = fs
self.path = path
self.write_on_flush = write_on_flush
self._changed = False
self._readlen = 0 # How many bytes already loaded from rfile
self._rfile = None # Reference to remote file object
self._eof = False # Reached end of rfile?
if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is None:
# File was just created, force to write anything
self._changed = True
self._eof = True
if not hasattr(rfile, "read"):
rfile = StringIO(unicode(rfile))
self._rfile = rfile
else:
# Do not use remote file object
self._eof = True
self._rfile = None
if rfile is not None and hasattr(rfile,"close"):
rfile.close()
super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
# FIXME: What if mode with position on eof?
if "a" in mode:
# Not good enough...
self.seek(0, SEEK_END)
def __del__(self):
# Don't try to close a partially-constructed file
if "_lock" in self.__dict__:
if not self.closed:
try:
self.close()
except FSError:
pass
def _write(self,data,flushing=False):
with self._lock:
# Do we need to discard info from the buffer?
toread = len(data) - (self._readlen - self.wrapped_file.tell())
if toread > 0:
if not self._eof:
self._fillbuffer(toread)
else:
self._readlen += toread
self._changed = True
self.wrapped_file.write(data)
def _read_remote(self, length=None):
"""Read data from the remote file into the local buffer."""
chunklen = 1024 * 256
bytes_read = 0
while True:
toread = chunklen
if length is not None and length - bytes_read < chunklen:
toread = length - bytes_read
if not toread:
break
data = self._rfile.read(toread)
datalen = len(data)
if not datalen:
self._eof = True
break
bytes_read += datalen
self.wrapped_file.write(data)
if datalen < toread:
# We reached EOF,
# no more reads needed
self._eof = True
break
if self._eof and self._rfile is not None:
self._rfile.close()
self._readlen += bytes_read
def _fillbuffer(self, length=None):
"""Fill the local buffer, leaving file position unchanged.
This method is used for on-demand loading of data from the remote file
into the buffer. It reads 'length' bytes from rfile and writes them
into the buffer, seeking back to the original file position.
"""
curpos = self.wrapped_file.tell()
if length == None:
if not self._eof:
# Read all data and we didn't reached EOF
# Merge endpos - tell + bytes from rfile
self.wrapped_file.seek(0, SEEK_END)
self._read_remote()
self._eof = True
self.wrapped_file.seek(curpos)
elif not self._eof:
if curpos + length > self._readlen:
# Read all data and we didn't reached EOF
# Load endpos - tell() + len bytes from rfile
toload = length - (self._readlen - curpos)
self.wrapped_file.seek(0, SEEK_END)
self._read_remote(toload)
self.wrapped_file.seek(curpos)
def _read(self, length=None):
if length < 0:
length = None
with self._lock:
self._fillbuffer(length)
data = self.wrapped_file.read(length if length != None else -1)
if not data:
data = None
return data
def _seek(self,offset,whence=SEEK_SET):
with self._lock:
if not self._eof:
# Count absolute position of seeking
if whence == SEEK_SET:
abspos = offset
elif whence == SEEK_CUR:
abspos = offset + self.wrapped_file.tell()
elif whence == SEEK_END:
abspos = None
else:
raise IOError(EINVAL, 'Invalid whence')
if abspos != None:
toread = abspos - self._readlen
if toread > 0:
self.wrapped_file.seek(self._readlen)
self._fillbuffer(toread)
else:
self.wrapped_file.seek(self._readlen)
self._fillbuffer()
self.wrapped_file.seek(offset, whence)
def _truncate(self,size):
with self._lock:
if not self._eof and self._readlen < size:
# Read the rest of file
self._fillbuffer(size - self._readlen)
# Lock rfile
self._eof = True
elif self._readlen >= size:
# Crop rfile metadata
self._readlen = size if size != None else 0
# Lock rfile
self._eof = True
self.wrapped_file.truncate(size)
self._changed = True
self.flush()
if self._rfile is not None:
self._rfile.close()
def flush(self):
with self._lock:
self.wrapped_file.flush()
if self.write_on_flush:
self._setcontents()
def _setcontents(self):
if not self._changed:
# Nothing changed, no need to write data back
return
# If not all data loaded, load until eof
if not self._eof:
self._fillbuffer()
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
pos = self.wrapped_file.tell()
self.wrapped_file.seek(0)
self.fs.setcontents(self.path, self.wrapped_file)
self.wrapped_file.seek(pos)
def close(self):
with self._lock:
if not self.closed:
self._setcontents()
if self._rfile is not None:
self._rfile.close()
super(RemoteFileBuffer,self).close()
class ConnectionManagerFS(LazyFS):
"""FS wrapper providing simple connection management of a remote FS.
The ConnectionManagerFS class is designed to wrap a remote FS object
and provide some convenience methods for dealing with its remote
connection state.
The boolean attribute 'connected' indicates whether the remote filesystem
has an active connection, and is initially True. If any of the remote
filesystem methods raises a RemoteConnectionError, 'connected' will
switch to False and remain so until a successful remote method call.
Application code can use the method 'wait_for_connection' to block
until the connection is re-established. Currently this reconnection
is checked by a simple polling loop; eventually more sophisticated
operating-system integration may be added.
Since some remote FS classes can raise RemoteConnectionError during
initialization, this class makes use of lazy initialization. The
remote FS can be specified as an FS instance, an FS subclass, or a
(class,args) or (class,args,kwds) tuple. For example::
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
...
RemoteConnectionError: couldn't connect to "http://www.example.com/"
>>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"]))
>>> fs.connected
False
>>>
"""
poll_interval = 1
def __init__(self,wrapped_fs,poll_interval=None,connected=True):
super(ConnectionManagerFS,self).__init__(wrapped_fs)
if poll_interval is not None:
self.poll_interval = poll_interval
self._connection_cond = threading.Condition()
self._poll_thread = None
self._poll_sleeper = threading.Event()
self.connected = connected
def setcontents(self, path, data, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size)
def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__()
del state["_connection_cond"]
del state["_poll_sleeper"]
state["_poll_thread"] = None
return state
def __setstate__(self,state):
super(ConnectionManagerFS,self).__setstate__(state)
self._connection_cond = threading.Condition()
self._poll_sleeper = threading.Event()
def wait_for_connection(self,timeout=None):
self._connection_cond.acquire()
try:
if not self.connected:
if not self._poll_thread:
target = self._poll_connection
self._poll_thread = threading.Thread(target=target)
self._poll_thread.daemon = True
self._poll_thread.start()
self._connection_cond.wait(timeout)
finally:
self._connection_cond.release()
def _poll_connection(self):
while not self.connected and not self.closed:
try:
self.wrapped_fs.isdir("")
except RemoteConnectionError:
self._poll_sleeper.wait(self.poll_interval)
self._poll_sleeper.clear()
except FSError:
break
else:
break
self._connection_cond.acquire()
try:
if not self.closed:
self.connected = True
self._poll_thread = None
self._connection_cond.notifyAll()
finally:
self._connection_cond.release()
def close(self):
if not self.closed:
try:
super(ConnectionManagerFS,self).close()
except (RemoteConnectionError,):
pass
if self._poll_thread:
self.connected = True
self._poll_sleeper.set()
self._poll_thread.join()
self._poll_thread = None
def _ConnectionManagerFS_method_wrapper(func):
"""Method wrapper for ConnectionManagerFS.
This method wrapper keeps an eye out for RemoteConnectionErrors and
adjusts self.connected accordingly.
"""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
result = func(self,*args,**kwds)
except RemoteConnectionError:
self.connected = False
raise
except FSError:
self.connected = True
raise
else:
self.connected = True
return result
return wrapper
wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS)
class CachedInfo(object):
"""Info objects stored in cache for CacheFS."""
__slots__ = ("timestamp","info","has_full_info","has_full_children")
def __init__(self,info={},has_full_info=True,has_full_children=False):
self.timestamp = time.time()
self.info = info
self.has_full_info = has_full_info
self.has_full_children = has_full_children
def clone(self):
new_ci = self.__class__()
new_ci.update_from(self)
return new_ci
def update_from(self,other):
self.timestamp = other.timestamp
self.info = other.info
self.has_full_info = other.has_full_info
self.has_full_children = other.has_full_children
@classmethod
def new_file_stub(cls):
info = {"info" : 0700 | statinfo.S_IFREG}
return cls(info,has_full_info=False)
@classmethod
def new_dir_stub(cls):
info = {"info" : 0700 | statinfo.S_IFDIR}
return cls(info,has_full_info=False)
class CacheFSMixin(FS):
"""Simple FS mixin to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
If you want to add caching to an existing FS object, use the CacheFS
class instead; it's an easy-to-use wrapper rather than a mixin.
This mixin class is provided for FS implementors who want to use
caching internally in their own classes.
FYI, the implementation of CacheFS is this:
class CacheFS(CacheFSMixin,WrapFS):
pass
"""
def __init__(self,*args,**kwds):
"""CacheFSMixin constructor.
The optional keyword argument 'cache_timeout' specifies the cache
timeout in seconds. The default timeout is 1 second. To prevent
cache entries from ever timing out, set it to None.
The optional keyword argument 'max_cache_size' specifies the maximum
number of entries to keep in the cache. To allow the cache to grow
without bound, set it to None. The default is 1000.
"""
self.cache_timeout = kwds.pop("cache_timeout",1)
self.max_cache_size = kwds.pop("max_cache_size",1000)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
super(CacheFSMixin,self).__init__(*args,**kwds)
def clear_cache(self,path=""):
with self.__cache_lock:
self.__cache.clear(path)
try:
scc = super(CacheFSMixin,self).clear_cache
except AttributeError:
pass
else:
scc()
def __getstate__(self):
state = super(CacheFSMixin,self).__getstate__()
state.pop("_CacheFSMixin__cache",None)
state.pop("_CacheFSMixin__cache_size",None)
state.pop("_CacheFSMixin__cache_lock",None)
return state
def __setstate__(self,state):
super(CacheFSMixin,self).__setstate__(state)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
def __get_cached_info(self,path,default=_SENTINAL):
try:
info = self.__cache[path]
if self.cache_timeout is not None:
now = time.time()
if info.timestamp < (now - self.cache_timeout):
with self.__cache_lock:
self.__expire_from_cache(path)
raise KeyError
return info
except KeyError:
if default is not _SENTINAL:
return default
raise
def __set_cached_info(self,path,new_ci,old_ci=None):
was_room = True
with self.__cache_lock:
# Free up some room in the cache
if self.max_cache_size is not None and old_ci is None:
while self.__cache_size >= self.max_cache_size:
try:
to_del = iter(self.__cache).next()
except StopIteration:
break
else:
was_room = False
self.__expire_from_cache(to_del)
# Atomically add to the cache.
# If there's a race, newest information wins
ci = self.__cache.setdefault(path,new_ci)
if ci is new_ci:
self.__cache_size += 1
else:
if old_ci is None or ci is old_ci:
if ci.timestamp < new_ci.timestamp:
ci.update_from(new_ci)
return was_room
def __expire_from_cache(self,path):
del self.__cache[path]
self.__cache_size -= 1
for ancestor in recursepath(path):
try:
self.__cache[ancestor].has_full_children = False
except KeyError:
pass
def open(self,path,mode="r",**kwds):
# Try to validate the entry using the cached info
try:
ci = self.__get_cached_info(path)
except KeyError:
if path in ("","/"):
raise ResourceInvalidError(path)
try:
ppath = dirname(path)
pci = self.__get_cached_info(ppath)
except KeyError:
pass
else:
if not fs.utils.isdir(super(CacheFSMixin,self),ppath,pci.info):
raise ResourceInvalidError(path)
if pci.has_full_children:
raise ResourceNotFoundError(path)
else:
if not fs.utils.isfile(super(CacheFSMixin,self),path,ci.info):
raise ResourceInvalidError(path)
f = super(CacheFSMixin,self).open(path,mode,**kwds)
if "w" in mode or "a" in mode or "+" in mode:
with self.__cache_lock:
self.__cache.clear(path)
f = self._CacheInvalidatingFile(self,path,f,mode)
return f
class _CacheInvalidatingFile(FileWrapper):
def __init__(self,owner,path,wrapped_file,mode=None):
self.path = path
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
sup.__init__(wrapped_file,mode)
self.owner = owner
def _write(self,string,flushing=False):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._write(string,flushing=flushing)
def _truncate(self,size):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._truncate(size)
def exists(self,path):
try:
self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return True
def isdir(self,path):
try:
self.__cache.iternames(path).next()
return True
except StopIteration:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isdir(super(CacheFSMixin,self),path,info)
def isfile(self,path):
try:
self.__cache.iternames(path).next()
return False
except StopIteration:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isfile(super(CacheFSMixin,self),path,info)
def getinfo(self,path):
try:
ci = self.__get_cached_info(path)
if not ci.has_full_info:
raise KeyError
info = ci.info
except KeyError:
info = super(CacheFSMixin,self).getinfo(path)
self.__set_cached_info(path,CachedInfo(info))
return info
def listdir(self,path="",*args,**kwds):
return list(nm for (nm, _info) in self.listdirinfo(path,*args,**kwds))
def ilistdir(self,path="",*args,**kwds):
for (nm, _info) in self.ilistdirinfo(path,*args,**kwds):
yield nm
def listdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).listdirinfo(path,*args,**kwds)
with self.__cache_lock:
names = set()
for (nm,info) in items:
names.add(basename(nm))
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
to_del = []
for nm in self.__cache.iternames(path):
if nm not in names:
to_del.append(nm)
for nm in to_del:
self.__cache.clear(pathjoin(path,nm))
#try:
# pci = self.__cache[path]
#except KeyError:
# pci = CachedInfo.new_dir_stub()
# self.__cache[path] = pci
#pci.has_full_children = True
return items
def ilistdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).ilistdirinfo(path,*args,**kwds)
for (nm,info) in items:
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
yield (nm,info)
def getsize(self,path):
return self.getinfo(path)["size"]
def setcontents(self, path, contents="", chunk_size=64*1024):
supsc = super(CacheFSMixin,self).setcontents
res = supsc(path, contents, chunk_size=chunk_size)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
return res
def createfile(self, path):
super(CacheFSMixin,self).createfile(path)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
def makedir(self,path,*args,**kwds):
super(CacheFSMixin,self).makedir(path,*args,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_dir_stub()
def remove(self,path):
super(CacheFSMixin,self).remove(path)
with self.__cache_lock:
self.__cache.clear(path)
def removedir(self,path,**kwds):
super(CacheFSMixin,self).removedir(path,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
def rename(self,src,dst):
super(CacheFSMixin,self).rename(src,dst)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def copy(self,src,dst,**kwds):
super(CacheFSMixin,self).copy(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def copydir(self,src,dst,**kwds):
super(CacheFSMixin,self).copydir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def move(self,src,dst,**kwds):
super(CacheFSMixin,self).move(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def movedir(self,src,dst,**kwds):
super(CacheFSMixin,self).movedir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def settimes(self,path,*args,**kwds):
super(CacheFSMixin,self).settimes(path,*args,**kwds)
with self.__cache_lock:
self.__cache.pop(path,None)
class CacheFS(CacheFSMixin,WrapFS):
"""Simple FS wrapper to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
"""
pass
|
supervised_popen.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
from python_qt_binding.QtCore import QObject, Signal
import subprocess
import threading
from .detailed_msg_box import MessageBox
class SupervisedPopen(QObject):
'''
The class overrides the subprocess.Popen and waits in a thread for its finish.
If an error is printed out, it will be shown in a message dialog.
'''
error = Signal(str, str, str)
''':ivar: the signal is emitted if error output was detected (id, decription, message)'''
finished = Signal(str)
''':ivar: the signal is emitted on exit (id)'''
def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None,
stderr=subprocess.PIPE, preexec_fn=None, close_fds=False,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, object_id='', description=''):
'''
For arguments see https://docs.python.org/2/library/subprocess.html
Additional arguments:
:param str object_id: the identification string of this object and title of the
error message dialog
:param str description: the description string used as addiotional information
in dialog if an error was occured
'''
try:
QObject.__init__(self)
self._args = args
self._object_id = object_id
self._description = description
self.error.connect(self.on_error)
# wait for process to avoid 'defunct' processes
self.popen = subprocess.Popen(args=args, bufsize=bufsize, executable=executable, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=preexec_fn, close_fds=close_fds, shell=shell, cwd=cwd, env=env,
universal_newlines=universal_newlines, startupinfo=startupinfo, creationflags=creationflags)
thread = threading.Thread(target=self._supervise)
thread.setDaemon(True)
thread.start()
except Exception as _:
raise
# def __del__(self):
# print "Deleted:", self._description
@property
def stdout(self):
return self.popen.stdout
@property
def stderr(self):
return self.popen.stderr
@property
def stdin(self):
return self.popen.stdin
def _supervise(self):
'''
Wait for process to avoid 'defunct' processes
'''
self.popen.wait()
result_err = ''
if self.stderr is not None:
result_err = self.stderr.read()
if result_err:
self.error.emit(self._object_id, self._description, result_err)
self.finished.emit(self._object_id)
def on_error(self, object_id, descr, msg):
MessageBox.warning(None, object_id, '%s\n\n'
'%s' % (descr, msg), ' '.join(self._args))
|
main.py | #! /usr/bin/env python
# noted by chan
#orion -v --debug hunt -c ./orion_config.yaml -n ppo_test ./algorithms/ppo/main.py --epochs~'fidelity(5000, 10000)' --lr~'uniform(1e-5, 1)'
import os
import torch
import numpy as np
from torch.multiprocessing import SimpleQueue, Process, Value, Event, Barrier
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from algorithms.ppo.core import ActorCritic, PPOBuffer, count_vars
from algorithms.ppo.worker import worker
from algorithms.ppo.learner import learner
from gym_ai2thor.envs.ai2thor_env import AI2ThorEnv
def train_ai2thor(model, args, rank=0, b=None):
seed = args.seed + 10000 *rank
torch.manual_seed(seed)
np.random.seed(seed)
# torch.cuda.set_device(rank)
# device = torch.device(f'cuda:{rank}')
device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
# if torch.cuda.is_available():
# os.environ['DISPLAY'] = f':{rank}'
model = model.to(device)
model.share_memory()
# Experience buffer
storage = PPOBuffer(model.obs_shape, args.steps, args.num_workers, args.state_size, args.gamma, device=device)
storage.share_memory()
#torch.multiprocessing.set_start_method('spawn')
# start multiple processes
ready_to_works = [Event() for _ in range(args.num_workers)]
exit_flag = Value('i', 0)
queue = SimpleQueue()
processes = []
# task_config_file = "config_files/multiMugTaskTrain.json"
task_config_file = "config_files/multiMugTaskTrain.json"
# start workers
for worker_id in range(args.num_workers):
print('START>>>>>>>>>>>>>>>>')
p = Process(target=worker, args=(worker_id, model, storage, ready_to_works[worker_id], queue, exit_flag, args.use_priors, task_config_file))
p.start()
processes.append(p)
# start trainer
train_params = {"epochs": args.epochs,
"steps": args.steps,
"world_size": args.world_size,
"num_workers": args.num_workers
}
ppo_params = {"clip_param": args.clip_param,
"train_iters": args.train_iters,
"mini_batch_size": args.mini_batch_size,
"value_loss_coef": args.value_loss_coef,
"entropy_coef": args.entropy_coef,
"rnn_steps": args.rnn_steps,
"lr": args.lr,
"max_kl": args.max_kl
}
distributed = False
if args.world_size > 1:
if distributed==True:
distributed = True
# Initialize Process Group, distributed backend type
dist_backend = 'nccl'
# Url used to setup distributed training
dist_url = "tcp://127.0.0.1:23456"
print("Initialize Process Group... pid:", os.getpid())
dist.init_process_group(backend=dist_backend, init_method=dist_url, rank=rank, world_size=args.world_size)
# Make model DistributedDataParallel
model = DistributedDataParallel(model, device_ids=[rank], output_device=rank)
else: print('Distribution is not allowed')
learner(model, storage, train_params, ppo_params, ready_to_works, queue, exit_flag, rank, distributed, b)
for p in processes:
print("process ", p.pid, " joined")
p.join()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--world-size', type=int, default=1)
parser.add_argument('--steps', type=int, default=2048)
parser.add_argument('--num-workers', type=int, default=1)
parser.add_argument('--mini-batch-size', type=int, default=128)
parser.add_argument('--epochs', type=int, default=10000)
parser.add_argument('--train-iters', type=int, default=10)
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--state-size', type=int, default=128)
parser.add_argument('--rnn-steps', type=int, default=16)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--clip-param', type=float, default=0.2)
parser.add_argument('--value_loss_coef', type=float, default=0.2)
parser.add_argument('--entropy_coef', type=float, default=0.001)
parser.add_argument('--max-kl', type=float, default=0.01)
parser.add_argument('--use-priors', type=bool, default=False)
parser.add_argument('--use-attention', type=bool, default=True)
parser.add_argument('--attention', type=str, default='CBAM')
args = parser.parse_args()
torch.multiprocessing.set_start_method('spawn')
# get observation dimension
env = AI2ThorEnv(config_file="config_files/multiMugTaskTrain.json")
env.reset()
obs_dim = env.observation_space.shape
# Share information about action space with policy architecture
ac_kwargs = dict()
ac_kwargs['action_space'] = env.action_space
ac_kwargs['state_size'] = args.state_size
ac_kwargs['use_attention'] = args.use_attention
ac_kwargs['attention'] = args.attention
env.close()
# Main model
print("Initialize Model...")
# Construct Model
ac_model = ActorCritic(obs_shape=obs_dim, **ac_kwargs)
if args.model_path:
ac_model.load_state_dict(torch.load(args.model_path))
# Count variables
var_counts = tuple(count_vars(m) for m in [ac_model.policy, ac_model.value_function, ac_model.feature_base])
print('\nNumber of parameters: \t pi: %d, \t v: %d \tbase: %d\n' % var_counts)
if args.world_size > 1:
processes = []
b = Barrier(args.world_size)
for rank in range(args.world_size):
p = Process(target=train_ai2thor, args=(ac_model, args, rank, b))
p.start()
processes.append(p)
for p in processes:
p.join()
print("process ", p.pid, " joined")
else:
train_ai2thor(ac_model, args)
print("main exits")
|
Cluster.py | __author__ = 'cmantas'
from lib.tiramola_logging import get_logger
from multiprocessing import Process
from lib.persistance_module import get_script_text, home, env_vars
class Cluster(object):
# the logger for this file
log = get_logger('CLUSTER', 'DEBUG', logfile=home+'files/logs/Coordinator.log')
def __init__(self):
self.all_nodes = []
self.log = Cluster.log
# the name of the cluster is used as a prefix for the VM names
self.cluster_name = "cluster"
pass
@staticmethod
def wait_proc(proc, node, timeout, log=None):
"""
Waits for a process to finish running for a given timeout and throws an exception if not finished
:param proc:
:param node:
:return:
"""
proc.join(timeout)
#check if it has not finished yet fail if so
if proc.is_alive():
if not log is None:
log.error("Timeout occurred for process")
proc.terminate()
raise Exception("Script timed out for "+node.name)
elif not log is None: log.debug(node.name+" DONE")
@staticmethod
def run_script(script_content, nodes, serial=True, timeout=600, log=None):
"""
Runs a script to the specified VMs
:param script_content:
:param serial:
:param timeout:
:return: None
"""
if not log is None:
log.info('Running a script to %d nodes' % len(nodes))
procs = []
#start the procs that add the nodes
for node in nodes:
p = Process(target=node.run_command, args=(script_content,))
procs.append(p)
p.start()
if serial:
# if adding in serial, wait each proc
if not log is None:log.debug("waiting for node #"+node.name)
Cluster.wait_proc(p, node, timeout)
if not serial:
#wait for all the procs to finish in parallel
if not log is None:log.debug("Waiting for all the procs to finish")
for i in range(len(nodes)):
Cluster.wait_proc(procs[i], nodes[i], timeout)
if not log is None: log.info("Finished running script")
def run_to_all(self, script_content, serial=True, timeout=600):
"""
Runs a script to all the nodes in the cluster
:param script_content:
:param serial:
:param timeout:
"""
self.run_script(script_content, self.all_nodes, serial, timeout, self.log)
def wait_everybody(self):
"""
Waits for all the Nodes in the cluster to be SSH-able
"""
self.log.info('Waiting for SSH on all nodes')
for i in self.all_nodes:
i.wait_ready()
def bootstrap_cluster(self):
"""
Runs the necessary boostrap commnands to each of the Seed Node and the other nodes
"""
for n in self.all_nodes:
n.bootstrap()
self.inject_hosts_files()
def kill_nodes(self):
"""
Runs the kill scripts for all the nodes in the cluster
"""
self.log.info("Killing nodes")
for n in self.all_nodes:
n.kill()
def update_hostfiles(self, servers):
if not env_vars["update_hostfiles"]:
self.log.info("Not updtading ycsb client host files")
return
self.log.info("updating hostfiles")
# generate ycsb-specific hosts file text
host_text = ""
if "cassandra_seednode" in servers.keys(): del servers["cassandra_seednode"]
#generate the "hosts" text for YCSB
for key, value in servers.iteritems(): host_text += value+"\n"
host_text = host_text[:-1] # remove trailing EOL
#DEBUG keep just one host
#host_text = servers["cassandra_node_01"]
command = "echo '%s' > /opt/hosts;" % host_text
self.run_script(command, self.all_nodes, serial=False)
def get_hosts(self, string=False, private=False):
"""
Produces a mapping of hostname-->IP for the nodes in the cluster
:param include_clients: if False (default) the clients are not included
:param string: if True the output is a string able to be appended in /etc/hosts
:return: a dict or a string of hostnames-->IPs
"""
hosts = dict()
for i in self.all_nodes:
if private:
hosts[i.name] = i.get_private_addr()
else:
hosts[i.name] = i.get_public_addr()
return hosts
def node_count(self):
return len(self.all_nodes)
def exists(self):
if len(self.all_nodes) == 0:
return False
else:
return True
def get_monitoring_endpoint(self):
"""
returns the IP of the node that has the monitoring data we want
"""
return self.all_nodes[0].get_public_addr() |
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'fujicoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'FJC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('fujicoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for fujicoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
compound.py | #!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import logging
import os
import queue
import sqlite3
import sys
import threading
import time
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
from pathlib import Path
import apprise
from py3cw.request import Py3CW
class NotificationHandler:
"""Notification class."""
def __init__(self, enabled=False, notify_urls=None):
if enabled and notify_urls:
self.apobj = apprise.Apprise()
urls = json.loads(notify_urls)
for url in urls:
self.apobj.add(url)
self.queue = queue.Queue()
self.start_worker()
self.enabled = True
else:
self.enabled = False
def start_worker(self):
"""Start notification worker."""
threading.Thread(target=self.process_queue, daemon=True).start()
def process_queue(self):
"""Process the queue."""
while True:
message, attachments = self.queue.get()
if attachments:
self.apobj.notify(body=message, attach=attachments)
else:
self.apobj.notify(body=message)
self.queue.task_done()
def send_notification(self, message, attachments=None):
"""Send a notification if enabled."""
if self.enabled:
msg = f"[3Commas bots helper {program}]\n" + message
self.queue.put((msg, attachments or []))
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(self, notificationhandler, debug_enabled, notify_enabled):
"""Logger init."""
self.my_logger = logging.getLogger(program)
self.notify_enabled = notify_enabled
self.notificationhandler = notificationhandler
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = True
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
# Create directory if not exists
if not os.path.exists("logs"):
os.makedirs("logs")
# Log to file and rotate if needed
logrotate = int(config.get("settings", "logrotate", fallback=7))
file_handle = TimedRotatingFileHandler(
filename=f"{datadir}/logs/{program}.log", backupCount=logrotate
)
file_handle.setLevel(logging.DEBUG)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(formatter)
self.my_logger.addHandler(console_handle)
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message, notify=False):
"""Info level."""
self.log(message, "info")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def warning(self, message, notify=True):
"""Warning level."""
self.log(message, "warning")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def error(self, message, notify=True):
"""Error level."""
self.log(message, "error")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def debug(self, message, notify=False):
"""Debug level."""
self.log(message, "debug")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser()
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"timeinterval": 3600,
"debug": False,
"logrotate": 7,
"botids": [12345, 67890],
"profittocompound": 1.0,
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1", "notify-url2"],
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
def init_threecommas_api(cfg):
"""Init the 3commas API."""
return Py3CW(
key=cfg.get("settings", "3c-apikey"),
secret=cfg.get("settings", "3c-apisecret"),
request_options={
"request_timeout": 10,
"nr_of_retries": 3,
"retry_status_codes": [502],
},
)
def get_threecommas_deals(botid):
"""Get all deals from 3Commas from a bot."""
error, data = api.request(
entity="deals",
action="",
payload={
"scope": "finished",
"bot_id": str(botid),
"limit": 100,
},
)
if error:
logger.error("Fetching 3Commas deals failed with error: %s" % error)
else:
logger.debug("Fetched 3Commas deals '%s' OK" % data)
return data
def check_deal(dealid):
"""Check if deal was already logged."""
data = cursor.execute(f"SELECT * FROM deals WHERE dealid = {dealid}").fetchone()
if data is None:
return False
return True
def compound_bot(thebot):
"""Find profit from deals and calculate new SO and BO values."""
deals = get_threecommas_deals(thebot["id"])
bot_name = thebot["name"]
if deals:
dealscount = 0
profitsum = 0.0
for deal in deals:
dealid = deal["id"]
# Register deal in database
exist = check_deal(dealid)
if exist:
logger.debug("Deal with id '%s' already processed, skipping." % dealid)
else:
# Deal not processed yet
profit = float(deal["final_profit"])
dealscount = dealscount + 1
profitsum = profitsum + profit
db.execute(f"INSERT INTO deals (dealid) VALUES ({dealid})")
logger.info("Finished deals: %s total profit: %s" % (dealscount, profitsum))
db.commit()
if profitsum:
# Bot values to calculate with
base_order_size = float(thebot["base_order_volume"])
safety_order_size = float(thebot["safety_order_volume"])
max_active_deals = thebot["max_active_deals"]
max_safety_orders = thebot["max_safety_orders"]
martingale_volume_coefficient = float(
thebot["martingale_volume_coefficient"]
)
fundssoneeded = safety_order_size
totalsofunds = safety_order_size
if max_safety_orders > 1:
for i in range(1, max_safety_orders):
fundssoneeded = fundssoneeded * float(martingale_volume_coefficient)
totalsofunds += fundssoneeded
ratiofunds = safety_order_size / totalsofunds
totalorderfunds = totalsofunds + base_order_size
logger.info("Current bot settings :")
logger.info("Base order size : %s" % base_order_size)
logger.info("Safety order size : %s" % safety_order_size)
logger.info("Max active deals : %s" % max_active_deals)
logger.info("Max safety orders : %s" % max_safety_orders)
logger.info("SO volume scale : %s" % martingale_volume_coefficient)
logger.info("Total funds for BO : %s" % base_order_size)
logger.info("Total funds for SO(s): %s" % totalsofunds)
logger.info("Total funds for order: %s" % totalorderfunds)
logger.info("Funds ratio for SO(s): %s" % ratiofunds)
# Calculate current order table
logger.info("Current order table:")
order_1 = safety_order_size
logger.info(f"Order BO = {base_order_size}")
order_x = base_order_size
i = 0
while i < max_safety_orders:
order_x += order_1 * pow(martingale_volume_coefficient, i)
logger.info(f"Order #{i+1} = {order_x}")
i += 1
# Calculate profit part to compound
profitpercentage = float(
config.get("settings", "profittocompound", fallback=1.0)
)
logger.info("Profit available to compound: %s" % profitsum)
profitsum = profitsum * profitpercentage
logger.info(
"Profit to compound after applying percentage value from settings (%s): %s "
% (profitpercentage, profitsum)
)
# Calculate the BO/SO ratio
bopercentage = (
100
* float(base_order_size)
/ (float(base_order_size) + float(totalsofunds))
)
sopercentage = (
100
* float(totalsofunds)
/ (float(totalsofunds) + float(base_order_size))
)
logger.info("BO percentage: %s" % bopercentage)
logger.info("SO percentage: %s" % sopercentage)
# Calculate compound values
boprofitsplit = ((profitsum * bopercentage) / 100) / max_active_deals
soprofitsplit = (
((profitsum * sopercentage) / 100)
/ max_active_deals
/ max_safety_orders
* ratiofunds
)
logger.info("BO compound value: %s" % boprofitsplit)
logger.info("SO compound value: %s" % soprofitsplit)
# Compound the profits to BO and SO settings
newbaseordervolume = base_order_size + boprofitsplit
newsafetyordervolume = safety_order_size + soprofitsplit
logger.info(
"Base order size increased from %s to %s"
% (base_order_size, newbaseordervolume)
)
logger.info(
"Safety order size increased from %s to %s"
% (safety_order_size, newsafetyordervolume)
)
# Calculate new order table
logger.info("Order table after compounding:")
order_1 = newsafetyordervolume
logger.info(f"Order BO = {newbaseordervolume}")
order_x = newbaseordervolume
i = 0
while i < max_safety_orders:
order_x += order_1 * pow(martingale_volume_coefficient, i)
logger.info(f"Order #{i+1} = {order_x}")
i += 1
# Update bot settings
error, data = api.request(
entity="bots",
action="update",
action_id=str(thebot["id"]),
payload={
"bot_id": thebot["id"],
"name": thebot["name"],
"pairs": thebot["pairs"],
"base_order_volume": newbaseordervolume, # new base order volume
"safety_order_volume": newsafetyordervolume, # new safety order volume
"take_profit": thebot["take_profit"],
"martingale_volume_coefficient": thebot[
"martingale_volume_coefficient"
],
"martingale_step_coefficient": thebot[
"martingale_step_coefficient"
],
"max_active_deals": max_active_deals,
"max_safety_orders": max_safety_orders,
"safety_order_step_percentage": thebot[
"safety_order_step_percentage"
],
"take_profit_type": thebot["take_profit_type"],
"strategy_list": thebot["strategy_list"],
"active_safety_orders_count": thebot["active_safety_orders_count"],
},
)
if data:
logger.info(
f"Compounded ${round(profitsum, 4)} in profit from {dealscount} deal(s) made by '{bot_name}'\nChanged BO from ${round(base_order_size, 4)} to ${round(newbaseordervolume, 4)}\nand SO from ${round(safety_order_size, 4)} to ${round(newsafetyordervolume, 4)}",
True,
)
else:
logger.error(
"Error occurred updating bot with new BO/SO values: %s"
% error["msg"]
)
else:
logger.info(
f"{bot_name}\nNo (new) profit made, no BO/SO value updates needed!",
True,
)
else:
logger.info(f"{bot_name}\nNo (new) deals found for this bot!", True)
def init_compound_db():
"""Create or open database to store bot and deals data."""
try:
dbname = f"{program}.sqlite3"
dbpath = f"file:{dbname}?mode=rw"
dbconnection = sqlite3.connect(dbpath, uri=True)
logger.info(f"Database '{dbname}' opened successfully")
except sqlite3.OperationalError:
dbconnection = sqlite3.connect(dbname)
dbcursor = dbconnection.cursor()
logger.info(f"Database '{dbname}' created successfully")
dbcursor.execute("CREATE TABLE deals (dealid int Primary Key)")
logger.info("Database tables created successfully")
return dbconnection
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument("-d", "--datadir", help="data directory to use", type=str)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# Create or load configuration file
config = load_config()
if not config:
logger = Logger(None, False, False)
logger.info(f"3Commas bot helper {program}!")
logger.info("Started at %s." % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program."
)
sys.exit(0)
else:
# Handle timezone
os.environ["TZ"] = config.get("settings", "timezone", fallback="Europe/Amsterdam")
time.tzset()
# Init notification handler
notification = NotificationHandler(
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Init logging
logger = Logger(
notification,
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
logger.info(f"3Commas bot helper {program}")
logger.info("Started at %s" % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
if notification.enabled:
logger.info("Notifications are enabled")
else:
logger.info("Notifications are disabled")
# Initialize 3Commas API
api = init_threecommas_api(config)
# Initialize or open database
db = init_compound_db()
cursor = db.cursor()
if "compound" in program:
# Auto compound profit by tweaking SO/BO
while True:
config = load_config()
logger.info(f"Reloaded configuration from '{datadir}/{program}.ini'")
botids = json.loads(config.get("settings", "botids"))
# Walk through all bots specified
for bot in botids:
boterror, botdata = api.request(
entity="bots",
action="show",
action_id=str(bot),
)
if botdata:
compound_bot(botdata)
else:
logger.error("Error occurred compounding bots: %s" % boterror["msg"])
# pylint: disable=C0103
timeint = int(config.get("settings", "timeinterval"))
if timeint > 0:
localtime = time.time()
nexttime = localtime + int(timeint)
timeresult = time.strftime("%H:%M:%S", time.localtime(nexttime))
logger.info("Next update in %s Seconds at %s" % (timeint, timeresult), True)
time.sleep(timeint)
else:
break
|
evaluate.py | from TTS.text2speech import tts_class
from multiprocessing import Process
import faiss
import time
import sqlite3
import csv
import random
import copy
import tensorflow_hub as hub
import tensorflow_text
import math
import numpy as np
import pickle
from Retriever.Retrieve import retrieve
import Utils.functions as utils
from ReRanker.rerank import rank_and_choose
from Generator.generator import generate as DialoGPT_Generate
from Classifier.model.dialog_acts import Encoder as Classifier
from Sentence_Encoder.meta_response_encoder_fast import encode as response_encode
from Sentence_Encoder.meta_query_encoder_fast import encode as query_encode
import Sentence_Encoder.encoder_client as encoder_client
import tensorflow as tf
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import torch.nn.functional as F
import torch.nn as nn
import torch as T
import os
import sys
import argparse
import logging
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.basicConfig(level=logging.CRITICAL)
parser = argparse.ArgumentParser(description="Chatbot")
parser.add_argument('--voice', dest='voice', action='store_true')
parser.add_argument('--no-voice', dest='voice', action='store_false')
parser.set_defaults(voice=True)
flags = parser.parse_args()
device = "cuda"
with open("Retriever/Faiss_index/thread_idx.pkl", 'rb') as fp:
idx = pickle.load(fp)
index = faiss.read_index('Retriever/Faiss_index/large.index')
# LOAD DATABASE
conn = sqlite3.connect('Retriever/Database/reddit.db')
c = conn.cursor()
# LOAD SCRIPTS
with open('Scripted/Processed_Scripts/Bot_Profile.pkl', 'rb') as fp:
bot_profile = pickle.load(fp)
bot_queries = [k for k, v in bot_profile.items()]
with open('Scripted/Processed_Scripts/Chatterbot.pkl', 'rb') as fp:
chatterbot = pickle.load(fp)
chatterbot_queries = [k for k, v in chatterbot.items()]
# LOAD SCRIPT EMBEDDINGS
with open('Scripted/Processed_Scripts/embedded_bot_queries.pkl', 'rb') as fp:
bot_queries_embd = pickle.load(fp)
with open('Scripted/Processed_Scripts/embedded_chatterbot_queries.pkl', 'rb') as fp:
chatterbot_queries_embd = pickle.load(fp)
# Load Dialog Acts Classifer
with open("Classifier/data/processed_data.pkl", "rb") as fp:
data = pickle.load(fp)
labels2idx = data["labels2idx"]
idx2labels = {v: k for k, v in labels2idx.items()}
with T.no_grad():
dialog_act_classifier = Classifier(
D=bot_queries_embd.shape[-1], classes_num=len(labels2idx)).cuda()
checkpoint = T.load("Classifier/Model_Backup/model.pt")
dialog_act_classifier.load_state_dict(checkpoint['model_state_dict'])
dialog_act_classifier = dialog_act_classifier.eval()
# Load TTS model
with T.no_grad():
text2speech = tts_class()
# LOAD DialoGPT Generator
with T.no_grad():
tokenizer = GPT2Tokenizer.from_pretrained('Generator/DialoGPT/Configs/')
weights = T.load('Generator/DialoGPT/Parameters/medium_ft.pkl')
weights_reverse = T.load('Generator/DialoGPT/Parameters/small_reverse.pkl')
cfg = GPT2Config.from_json_file('Generator/DialoGPT/Configs/config.json')
model = GPT2LMHeadModel(cfg)
model_reverse = GPT2LMHeadModel(cfg)
# fix misused key value
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight", None)
weights_reverse["lm_head.weight"] = weights_reverse["lm_head.decoder.weight"]
weights_reverse.pop("lm_head.decoder.weight", None)
model.load_state_dict(weights)
model.to('cuda')
model.eval()
model_reverse.load_state_dict(weights_reverse)
model_reverse.to('cuda')
model_reverse.eval()
with tf.device("/cpu:0"):
# Hub Models
ConvRT_model = encoder_client.EncoderClient(
"Sentence_Encoder/Embeddings/ConvRT", use_extra_context=True)
USE_QA_model = hub.load("Sentence_Encoder/Embeddings/USE_QA/")
# %%
command_codes = ["<PASS>", "<JOKE>", "<GENERATE>",
"<INITIATE>", "<TIL>", "<STORY>", "<SHOWER>", "<STOP>"]
code_map = {"<INITIATE>": ["Scripted/Random_Reddit_Data/nostupidq.csv",
"Scripted/Random_Reddit_Data/jokesq.csv",
"Scripted/Random_Reddit_Data/showerthoughtsq.csv",
"Scripted/Random_Reddit_Data/tilq.csv"],
"<TIL>": ["Scripted/Random_Reddit_Data/tilq.csv"],
"<SHOWER>": ["Scripted/Random_Reddit_Data/showerthoughtsq.csv"],
"<STORY>": ["Scripted/Random_Reddit_Data/writingpromptsa.csv"],
"<JOKE>": ["Scripted/Random_Reddit_Data/jokesq.csv"]}
def random_response(candidates, conversation_history, p=None):
loop = 5
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i = 0
while response in conversation_history:
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i += 1
if i > loop:
break
return response
# %%
def load_random_reddit(directory, conversation_history):
candidates = []
with open(directory, newline='') as csvfile:
csv_reader = csv.DictReader(csvfile)
for i, row in enumerate(csv_reader):
if 'writing' in directory:
parent_id = str(row['parent_id'])[3:]
thread_id = str(row['link_id'])[3:]
if parent_id == thread_id:
candidate = str(row["body"])
else:
candidate = str(row["title"])
if 'joke' in directory:
candidate += ".... "+str(row['selftext'])
candidates.append(candidate)
return random_response(candidates, conversation_history)
# extract top candidates (queries or responses)
def top_candidates(candidates, scores, top=1):
sorted_score_idx = np.flip(np.argsort(scores), axis=-1)
candidates = [candidates[i] for i in sorted_score_idx.tolist()]
scores = [scores[i] for i in sorted_score_idx.tolist()]
return candidates[0:top], scores[0:top], sorted_score_idx.tolist()
# %%
def generate(texts, past):
candidates, _ = DialoGPT_Generate(texts, model, tokenizer)
return candidates, past
# START DOING STUFF
conversation_history = []
past = None
stop_flag = 0
print("\n")
utterances = ["Hello, how are you?",
"What is your name?",
"Do you like movies? If so, what kind?",
"Are you sentient?",
"Tell me a Joke.",
"What can you tell me about Quantum Field Theory?",
"Do you know anything about Sellar's myth of the given?",
"I am quite tired today; I didn’t know that the project deadline was so soon. I had a sleepless night working on the project.",
"Did you know I accidentally deleted major portions of the project and had to reconstruct it again? Version control is important huh. Who knew?",
"Just my luck, I have three project presentations in the same day.",
"By the way, can you tell me about Language Models?",
"Please tell me more about them."]
for utterance in utterances:
print("Say Something: {}".format(utterance))
utils.delay_print("\nThinking......")
candidates = []
temp_candidates = []
temp_scores = []
if not conversation_history:
query_context = []
response_context = [""]
else:
if len(conversation_history) > 5:
truncated_history = copy.deepcopy(conversation_history[-5:])
else:
truncated_history = copy.deepcopy(conversation_history)
response_context = [conversation_history[-1]]
# ConveRT needs reversed Context, not sure about USE QA but assuming it's not reverse
query_context = [stuff for stuff in truncated_history]
query_encoding = query_encode([utterance], USE_QA_model, ConvRT_model, [query_context])
if conversation_history:
if len(conversation_history) > 5:
truncated_history = conversation_history[-5:]
else:
truncated_history = conversation_history
generated_responses, past = generate(truncated_history+[utterance], past)
else:
generated_responses, past = generate([utterance], past)
bot_cosine_scores = utils.cosine_similarity_nd(query_encoding, bot_queries_embd)
bot_queries_, bot_cosine_scores_, _ = top_candidates(bot_queries, bot_cosine_scores, top=1)
active_codes = []
bot_candidates = bot_profile[bot_queries_[0]]
filtered_bot_candidates = []
for candidate in bot_candidates:
flag = 0
for code in command_codes:
if code in candidate:
active_codes.append(code)
candidate = candidate.replace(code, "")
filtered_bot_candidates.append(candidate)
flag = 1
break
if flag == 0:
candidates.append(candidate)
filtered_bot_candidates.append(candidate)
active_codes.append("")
with T.no_grad():
logits = dialog_act_classifier(T.tensor(query_encoding).to(device))
_, sorted_idx = T.sort(logits, dim=-1, descending=True)
sorted_idx = sorted_idx.squeeze(0)
sorted_idx = sorted_idx[0:2].cpu().tolist()
labels = [idx2labels[i] for i in sorted_idx]
# print(labels)
"""
Possible Dialog Acts:
['nonsense', 'dev_command', 'open_question_factual', 'appreciation', 'other_answers', 'statement', \
'respond_to_apology', 'pos_answer', 'closing', 'comment', 'neg_answer', 'yes_no_question', 'command', \
'hold', 'NULL', 'back-channeling', 'abandon', 'opening', 'other', 'complaint', 'opinion', 'apology', \
'thanking', 'open_question_opinion']
"""
if bot_cosine_scores_[0] >= 0.75:
response, id = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
filtered_bot_candidates,
response_context,
conversation_history)
code = active_codes[id]
if code in code_map:
directories = code_map[code]
directory = random.choice(directories)
response += " "+load_random_reddit(directory, conversation_history)
elif code == "<GENERATE>":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
elif code == "<STOP>":
stop_flag = 1
elif stop_flag != 1:
mode = "DEFAULT"
bias = None
if 'open_question_factual' in labels \
or ('yes_no_question' in labels and 'NULL' not in labels) \
or 'open_question_opinion' in labels or 'command' in labels:
bias = 0.07 # biases towards retrieval
elif "apology" in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "No need to apologize.",
"No worries.", "You are forgiven"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
elif "abandon" in labels or "nonsense" in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["what?", "Can you rephrase what you mean?",
"What do you mean exactly?"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'hold' in labels:
mode = "BREAK"
candidates = ["Do you want to add something more?",
"I think you want to say something more."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'closing' in labels:
mode = "BREAK"
candidates = ["Nice talking to you.", "Goodbye.", "See you later."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
stop_flag = 1
elif 'opening' in labels:
mode = "BREAK"
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
stop_flag = 1
elif 'thanking' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["No need to mention", "You are welcome."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'apology' in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "Apology granted",
"No Worries!", "No need to apologize."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'response_to_apology' in labels\
or 'pos_answer' in labels or 'neg_answer' in labels\
or 'appreciation' in labels or 'back_channeling' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
if mode != "BREAK":
chatterbot_cosine_scores = utils.cosine_similarity_nd(
query_encoding, chatterbot_queries_embd)
chatterbot_queries_, chatterbot_cosine_scores_, _ = top_candidates(
chatterbot_queries, chatterbot_cosine_scores, top=1)
candidates += chatterbot[chatterbot_queries_[0]]
if chatterbot_cosine_scores_[0] >= 0.75:
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
else:
#print("\n\nABOUT TO BE RETRIEVED\n\n")
retrieved_candidates = retrieve(
conn, c, idx, index, query_encoding, query_context)
#print("\n\nABOUT TO BE RETRIEVED\n\n")
if bias is not None:
biases = [0.0 for _ in candidates]
for _ in generated_responses:
biases.append(0.0)
for _ in retrieved_candidates:
biases.append(bias)
biases = np.asarray(biases, np.float32)
else:
biases = None
candidates += generated_responses + retrieved_candidates
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history,
bias=biases)
print("\n")
if len(str(response).split(" ")) <= 100:
entry = utils.simple_preprocess(str(response).lower(),
for_speech=True,
return_tokenized=True)
entry = " ".join(entry)
wavefiles = text2speech.process(entry)
def f1():
utils.delay_print("Bot: "+response)
def f2():
text2speech.play(wavefiles)
p1 = Process(target=f1)
p2 = Process(target=f2)
p1.start()
p2.start()
p1.join()
p2.join()
else:
utils.delay_print("Bot: "+response, t=0.01)
print("\n")
conversation_history.append(utterance)
conversation_history.append(response)
if stop_flag == 1:
break
# break
|
teleoperator.py |
#############################################################################
# DEVELOPED BY KANISHK #############
# THIS SCRIPT CONSTRUCTS A TELEOPERATOR CLASS #############
#############################################################################
# Library Imports
import pygame
import numpy as np
from threading import Thread
import memory
#############################################################################
# KEYBOARD TELEOPERATION CLASS #############
#############################################################################
class NumStick:
def __init__(self, LinearPlanner, step_size=0.01):
# Init. Pygame
pygame.init()
self.display = pygame.display.set_mode((400, 400))
pygame.display.set_caption('Teleoperation: Keybaord')
self.RenderText('Click Here! to Start')
# Step Size
self.step_size = step_size
# Linear Planner
self.LinearPlanner = LinearPlanner
def run(self):
self.controller = Thread(target=self.keyboard_listener)
self.controller.start()
def RenderText(self, msg):
"""
Description:
1. Renders a string on the pygame window.
Args:
msg -> string: Message to display on the window.
"""
# Clear the Window
self.display.fill((0, 0, 0))
pygame.display.update()
# Prepare the text
green = (0, 255, 0)
font = pygame.font.Font('freesansbold.ttf', 18)
text = font.render(msg, True, green)
textRect = text.get_rect()
# Compute the center for the text
textRect.center = (400 // 2, 400 // 2)
# Refresh the Display
self.display.blit(text, textRect)
pygame.display.update()
def keyboard_listener(self):
"""
Description:
1. Uses the keyboard for teleoperating the robot.
2. Moves the robot with the fixed step size of 0.01m.
3. KeyBindings,
a. NumPad '6': +Y
b. NumPad '4': -Y
c. NumPad '8': -X
d. NumPad '2': +X
e. NumPad '7': -Y & -X
f. NumPad '9': +Y & -X
g. NumPad '1': +X & -Y
h. NumPad '3': +X & +Y
i. ArrowKey UP: +Z
j. ArrowKey Down : -Z
"""
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
self.RenderText('Waiting for an Input!')
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP6:
print('Moving Robot: Direction +Y')
self.RenderText('Moving Robot: Direction +Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([0,
self.step_size,
0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP4:
print('Moving Robot: Direction -Y')
self.RenderText('Moving Robot: Direction -Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([0,
-self.step_size,
0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP8:
print('Moving Robot: Direction -X')
self.RenderText('Moving Robot: Direction -X')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([-self.step_size,
0,
0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP2:
print('Moving Robot: Direction +X')
self.RenderText('Moving Robot: Direction +X')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([self.step_size, 0, 0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_UP:
print('Moving Robot: Direction +Z')
self.RenderText('Moving Robot: Direction +Z')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([0, 0, self.step_size])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_DOWN:
print('Moving Robot: Direction -Z')
self.RenderText('Moving Robot: Direction -Z')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([0,
0,
-self.step_size])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP7:
print('Moving Robot: Direction -X & -Y')
self.RenderText('Moving Robot: Direction -X & -Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([-self.step_size,
-self.step_size, 0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP9:
print('Moving Robot: Direction -X & +Y')
self.RenderText('Moving Robot: Direction -X & +Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([-self.step_size,
self.step_size, 0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP1:
print('Moving Robot: Direction +X & -Y')
self.RenderText('Moving Robot: Direction +X & -Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([self.step_size,
-self.step_size, 0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_KP3:
print('Moving Robot: Direction +X & +Y')
self.RenderText('Moving Robot: Direction +X & +Y')
self.RenderText('Moving the Robot')
target = memory.pose + np.array([self.step_size,
self.step_size, 0])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
if event.key == pygame.K_HOME:
print('Moving Robot: HOME')
self.RenderText('Moving Robot: HOME')
self.RenderText('Moving the Robot')
target = np.array([0.08, -0.05, -0.015])
self.LinearPlanner(target)
self.RenderText('Done!')
self.RenderText('Waiting for a New Input!')
|
MockSocketProgram.py | #Mock socket program with multithreading
import threading
import time
PORT = 5050
SERVER = "195.888.756"
ADDR = (SERVER, PORT)
names = ["steven", "jon", "sarah"]
address = [("195.888.7.56", 4040), ("192.168.1.26", 3030), ("196.888.1.56", 2020)]
messages = ["steven: Sent a concern", "jon: Ordered a package", "sarah: Making a transaction"]
def handle_client(addr, i):
print(f"[NEW CONNECTION] {addr} connected.")
print("I am waiting for a message from " + names[i])
time.sleep(3)
msg = client_send(i)
print(f"[{addr}] {msg}")
print("Msg received")
def server_accept(i):
return address[i]
def start():
print(f"[LISTENING] Server is listening on {SERVER}")
i = 0
while i < 3:
#time consuming as it waits for a cient server to accept
time.sleep(1)
addr = server_accept(i)
thread = threading.Thread(target=handle_client, args=(addr, i))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
i += 1
def client_send(i):
return messages[i]
t1 = time.perf_counter()
print("[STARTING] server is starting...")
start()
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds') |
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, bytearray()))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def _basetest_sock_recv_into(self, httpd, sock):
# same as _basetest_sock_client_ops, but using sock_recv_into
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = bytearray(1024)
with memoryview(data) as buf:
nbytes = self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[:1024]))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[nbytes:]))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket()
self._basetest_sock_recv_into(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_recv_into(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
def test_get_event_loop_new_process(self):
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.unpatch_get_running_loop()
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = Coro()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_recv_into, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
if __name__ == '__main__':
unittest.main()
|
XInput_edit.py | #!/usr/bin/env python2.7
import ctypes, ctypes.util
from ctypes import Structure, POINTER
from math import sqrt
import time
from threading import Thread, Lock
# loading the DLL #
XINPUT_DLL_NAMES = (
"XInput1_4.dll",
"XInput9_1_0.dll",
"XInput1_3.dll",
"XInput1_2.dll",
"XInput1_1.dll"
)
libXInput = None
for name in XINPUT_DLL_NAMES:
found = ctypes.util.find_library(name)
if found:
libXInput = ctypes.WinDLL(found)
break
if not libXInput:
raise IOError("XInput library was not found.")
#/loading the DLL #
# defining static global variables #
WORD = ctypes.c_ushort
BYTE = ctypes.c_ubyte
SHORT = ctypes.c_short
DWORD = ctypes.c_ulong
ERROR_SUCCESS = 0
ERROR_BAD_ARGUMENTS = 160
ERROR_DEVICE_NOT_CONNECTED = 1167
XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE = 7849
XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE = 8689
XINPUT_GAMEPAD_TRIGGER_THRESHOLD = 30
BATTERY_DEVTYPE_GAMEPAD = 0x00
BATTERY_TYPE_DISCONNECTED = 0x00
BATTERY_TYPE_WIRED = 0x01
BATTERY_TYPE_ALKALINE = 0x02
BATTERY_TYPE_NIMH = 0x03
BATTERY_TYPE_UNKNOWN = 0xFF
BATTERY_LEVEL_EMPTY = 0x00
BATTERY_LEVEL_LOW = 0x01
BATTERY_LEVEL_MEDIUM = 0x02
BATTERY_LEVEL_FULL = 0x03
BUTTON_DPAD_UP = 0x000001
BUTTON_DPAD_DOWN = 0x000002
BUTTON_DPAD_LEFT = 0x000004
BUTTON_DPAD_RIGHT = 0x000008
BUTTON_START = 0x000010
BUTTON_BACK = 0x000020
BUTTON_LEFT_THUMB = 0x000040
BUTTON_RIGHT_THUMB = 0x000080
BUTTON_LEFT_SHOULDER = 0x000100
BUTTON_RIGHT_SHOULDER = 0x000200
BUTTON_A = 0x001000
BUTTON_B = 0x002000
BUTTON_X = 0x004000
BUTTON_Y = 0x008000
STICK_LEFT = 0x010000
STICK_RIGHT = 0x020000
TRIGGER_LEFT = 0x040000
TRIGGER_RIGHT = 0x080000
FILTER_PRESSED_ONLY = 0x100000
FILTER_RELEASED_ONLY = 0x200000
FILTER_NONE = 0xffffff-FILTER_PRESSED_ONLY-FILTER_RELEASED_ONLY
DEADZONE_LEFT_THUMB = 0
DEADZONE_RIGHT_THUMB = 1
DEADZONE_TRIGGER = 2
DEADZONE_DEFAULT = -1
EVENT_CONNECTED = 1
EVENT_DISCONNECTED = 2
EVENT_BUTTON_PRESSED = 3
EVENT_BUTTON_RELEASED = 4
EVENT_TRIGGER_MOVED = 5
EVENT_STICK_MOVED = 6
LEFT = 0
RIGHT = 1
#/defining static global variables #
# defining XInput compatible structures #
class XINPUT_GAMEPAD(Structure):
_fields_ = [("wButtons", WORD),
("bLeftTrigger", BYTE),
("bRightTrigger", BYTE),
("sThumbLX", SHORT),
("sThumbLY", SHORT),
("sThumbRX", SHORT),
("sThumbRY", SHORT),
]
class XINPUT_STATE(Structure):
_fields_ = [("dwPacketNumber", DWORD),
("Gamepad", XINPUT_GAMEPAD),
]
State = XINPUT_STATE
class XINPUT_VIBRATION(Structure):
_fields_ = [("wLeftMotorSpeed", WORD),
("wRightMotorSpeed", WORD),
]
class XINPUT_BATTERY_INFORMATION(Structure):
_fields_ = [("BatteryType", BYTE),
("BatteryLevel", BYTE),
]
libXInput.XInputGetState.argtypes = [DWORD, POINTER(XINPUT_STATE)]
libXInput.XInputGetState.restype = DWORD
def XInputGetState(dwUserIndex, state):
return libXInput.XInputGetState(dwUserIndex, ctypes.byref(state))
libXInput.XInputSetState.argtypes = [DWORD, POINTER(XINPUT_VIBRATION)]
libXInput.XInputSetState.restype = DWORD
def XInputSetState(dwUserIndex, vibration):
return libXInput.XInputSetState(dwUserIndex, ctypes.byref(vibration))
libXInput.XInputGetBatteryInformation.argtypes = [DWORD, BYTE, POINTER(XINPUT_BATTERY_INFORMATION)]
libXInput.XInputGetBatteryInformation.restype = DWORD
def XInputGetBatteryInformation(dwUserIndex, devType, batteryInformation):
return libXInput.XInputGetBatteryInformation(dwUserIndex, devType, ctypes.byref(batteryInformation))
#/defining XInput compatible structures #
# defining file-local variables #
_battery_type_dict = {BATTERY_TYPE_DISCONNECTED : "DISCONNECTED",
BATTERY_TYPE_WIRED : "WIRED",
BATTERY_TYPE_ALKALINE : "ALKALINE",
BATTERY_TYPE_NIMH : "NIMH",
BATTERY_TYPE_UNKNOWN : "UNKNOWN"}
_battery_level_dict = {BATTERY_LEVEL_EMPTY : "EMPTY",
BATTERY_LEVEL_LOW : "LOW",
BATTERY_LEVEL_MEDIUM : "MEDIUM",
BATTERY_LEVEL_FULL : "FULL"}
_last_states = (State(), State(), State(), State())
_last_norm_values = [None, None, None, None]
_connected = [False, False, False, False]
_last_checked = 0
_deadzones = [{DEADZONE_RIGHT_THUMB : XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE,
DEADZONE_LEFT_THUMB : XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE,
DEADZONE_TRIGGER : XINPUT_GAMEPAD_TRIGGER_THRESHOLD},
{DEADZONE_RIGHT_THUMB : XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE,
DEADZONE_LEFT_THUMB : XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE,
DEADZONE_TRIGGER : XINPUT_GAMEPAD_TRIGGER_THRESHOLD},
{DEADZONE_RIGHT_THUMB : XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE,
DEADZONE_LEFT_THUMB : XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE,
DEADZONE_TRIGGER : XINPUT_GAMEPAD_TRIGGER_THRESHOLD},
{DEADZONE_RIGHT_THUMB : XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE,
DEADZONE_LEFT_THUMB : XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE,
DEADZONE_TRIGGER : XINPUT_GAMEPAD_TRIGGER_THRESHOLD}]
_button_dict = {0x0001 : "DPAD_UP",
0x0002 : "DPAD_DOWN",
0x0004 : "DPAD_LEFT",
0x0008 : "DPAD_RIGHT",
0x0010 : "START",
0x0020 : "BACK",
0x0040 : "LEFT_THUMB",
0x0080 : "RIGHT_THUMB",
0x0100 : "LEFT_SHOULDER",
0x0200 : "RIGHT_SHOULDER",
0x1000 : "A",
0x2000 : "B",
0x4000 : "X",
0x8000 : "Y",
}
#/defining file-local variables #
# defining custom classes and methods #
class XInputNotConnectedError(Exception):
pass
class XInputBadArgumentError(ValueError):
pass
def set_deadzone(dzone, value):
"""Sets the deadzone <dzone> to <value>.
Any raw value retruned by the respective stick or trigger
will be clamped to 0 if it's lower than <value>.
The supported deadzones are:
DEADZONE_RIGHT_THUMB (default value is 8689, max is 32767)
DEADZONE_LEFT_THUMB (default value is 7849, max is 32767)
DEADZONE_TRIGGER (default value is 30, max is 255 )"""
global XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE, XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE, XINPUT_GAMEPAD_TRIGGER_THRESHOLD
assert dzone >= 0 and dzone <= 2, "invalid deadzone"
if value == DEADZONE_DEFAULT:
value = 7849 if dzone == DEADZONE_LEFT_THUMB else \
8689 if dzone == DEADZONE_RIGHT_THUMB else \
30
if dzone == DEADZONE_LEFT_THUMB:
assert value >= 0 and value <= 32767
if value == DEADZONE_DEFAULT: XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE = 7849
else: XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE = value
elif dzone == DEADZONE_RIGHT_THUMB:
assert value >= 0 and value <= 32767
if value == DEADZONE_DEFAULT: XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE = 8689
else: XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE = value
else:
assert value >= 0 and value <= 255
if value == DEADZONE_DEFAULT: XINPUT_GAMEPAD_TRIGGER_THRESHOLD = 30
else: XINPUT_GAMEPAD_TRIGGER_THRESHOLD = value
def get_connected():
"""get_connected() -> (bool, bool, bool, bool)
Returns wether or not the controller at each index is
connected.
You shouldn't check this too frequently."""
state = XINPUT_STATE()
out = [False] * 4
for i in range(4):
out[i] = (XInputGetState(i, state) == 0)
return tuple(out)
def get_state(user_index):
"""get_state(int) -> XINPUT_STATE
Returns the raw state of the controller."""
state = XINPUT_STATE()
res = XInputGetState(user_index, state)
if res == ERROR_DEVICE_NOT_CONNECTED:
raise XInputNotConnectedError("Controller [{}] appears to be disconnected.".format(user_index))
if res == ERROR_BAD_ARGUMENTS:
raise XInputBadArgumentError("Controller [{}] doesn't exist. IDs range from 0 to 3.".format(user_index))
assert res == 0, "Couldn't get the state of controller [{}]. Is it disconnected?".format(user_index)
return state
def get_battery_information(user_index):
"""get_battery_information(int) -> (str, str)
Returns the battery information for controller <user_index>.
The return value is formatted as (<battery_type>, <battery_level>)"""
battery_information = XINPUT_BATTERY_INFORMATION()
XInputGetBatteryInformation(user_index, BATTERY_DEVTYPE_GAMEPAD, battery_information)
return (_battery_type_dict[battery_information.BatteryType], _battery_level_dict[battery_information.BatteryLevel])
def set_vibration(user_index, left_speed, right_speed):
"""Sets the vibration motor speed for controller <user_index>.
The speed ranges from 0.0 to 1.0 (float values) or
0 to 65535 (int values)."""
if type(left_speed) == float and left_speed <= 1.0:
left_speed = (round(65535 * left_speed, 0))
if type(right_speed) == float and right_speed <= 1.0:
right_speed = (round(65535 * right_speed, 0))
vibration = XINPUT_VIBRATION()
vibration.wLeftMotorSpeed = int(left_speed)
vibration.wRightMotorSpeed = int(right_speed)
return XInputSetState(user_index, vibration) == 0
def get_button_values(state):
"""get_button_values(XINPUT_STATE) -> dict
Returns a dict with string keys and boolean values,
representing the button and it's value respectively.
You can get the required state using get_state()"""
wButtons = state.Gamepad.wButtons
return {"DPAD_UP" : bool(wButtons & 0x0001),
"DPAD_DOWN" : bool(wButtons & 0x0002),
"DPAD_LEFT" : bool(wButtons & 0x0004),
"DPAD_RIGHT" : bool(wButtons & 0x0008),
"START" : bool(wButtons & 0x0010),
"BACK" : bool(wButtons & 0x0020),
"LEFT_THUMB" : bool(wButtons & 0x0040),
"RIGHT_THUMB" : bool(wButtons & 0x0080),
"LEFT_SHOULDER" : bool(wButtons & 0x0100),
"RIGHT_SHOULDER" : bool(wButtons & 0x0200),
"A" : bool(wButtons & 0x1000),
"B" : bool(wButtons & 0x2000),
"X" : bool(wButtons & 0x4000),
"Y" : bool(wButtons & 0x8000),
}
def get_trigger_values(state):
"""get_trigger_values(XINPUT_STATE) -> (float, float)
Returns the normalized left and right trigger values.
You can get the required state using get_state()"""
LT = state.Gamepad.bLeftTrigger
RT = state.Gamepad.bRightTrigger
normLT = 0
normRT = 0
if LT > XINPUT_GAMEPAD_TRIGGER_THRESHOLD:
LT -= XINPUT_GAMEPAD_TRIGGER_THRESHOLD
normLT = LT / (255. - XINPUT_GAMEPAD_TRIGGER_THRESHOLD)
else:
LT = 0
if RT > XINPUT_GAMEPAD_TRIGGER_THRESHOLD:
RT -= XINPUT_GAMEPAD_TRIGGER_THRESHOLD
normRT = RT / (255. - XINPUT_GAMEPAD_TRIGGER_THRESHOLD)
else:
RT = 0
return (normLT, normRT)
def get_thumb_values(state):
"""get_thumb_values(XINPUT_STATE) -> ((float, float), (float, float))
Returns the normalized left and right thumb stick values,
represented as X and Y values.
You can get the required state using get_state()"""
LX = state.Gamepad.sThumbLX
LY = state.Gamepad.sThumbLY
RX = state.Gamepad.sThumbRX
RY = state.Gamepad.sThumbRY
magL = sqrt(LX*LX + LY*LY)
magR = sqrt(RX*RX + RY*RY)
if magL != 0:
normLX = LX / magL
normLY = LY / magL
else: # if magL == 0 the stick is centered, there is no direction
normLX = 0
normLY = 0
if magR != 0:
normRX = RX / magR
normRY = RY / magR
else: # if magR == 0 the stick is centered, there is no direction
normRX = 0
normRY = 0
normMagL = 0
normMagR = 0
if (magL > XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE):
magL = min(32767, magL)
magL -= XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE
normMagL = magL / (32767. - XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE)
else:
magL = 0
if (magR > XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE):
magR = min(32767, magR)
magR -= XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE
normMagR = magR / (32767. - XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE)
else:
magR = 0
return ((normLX * normMagL, normLY * normMagL), (normRX * normMagR, normRY * normMagR))
class Event:
def __init__(self, user_index, type_):
self.user_index = user_index
self.type = type_
def __str__(self):
return str(self.__dict__)
def get_events():
"""get_events() -> generator
Returns a generator that yields events for each change that
occured since this function was last called.
Each event has a <type> and <user_index> associated.
The other variables vary."""
global _last_states, _connected, _last_checked, _button_dict, _last_norm_values
this_time = time.time()
these_states = (State(), State(), State(), State())
if _last_checked + 1 < this_time:
_last_checked = this_time
for i in range(4):
is_connected = (XInputGetState(i, these_states[i]) == 0)
if is_connected != _connected[i]:
yield Event(i, EVENT_CONNECTED if is_connected else EVENT_DISCONNECTED)
_connected[i] = is_connected
else:
for i in range(4):
was_connected = _connected[i]
if not was_connected:
continue
is_connected = (XInputGetState(i, these_states[i]) == 0)
if not is_connected:
yield Event(i, EVENT_DISCONNECTED)
_connected[i] = False
continue
for i in range(4):
is_connected = _connected[i]
if not is_connected: continue
if these_states[i].Gamepad.wButtons != _last_states[i].Gamepad.wButtons:
changed = these_states[i].Gamepad.wButtons ^ _last_states[i].Gamepad.wButtons
if changed:
for button in _button_dict:
if changed & button:
event = Event(i, EVENT_BUTTON_PRESSED if changed & button & these_states[i].Gamepad.wButtons else EVENT_BUTTON_RELEASED)
event.button = _button_dict[button]
event.button_id = button
yield event
if these_states[i].Gamepad.bLeftTrigger != _last_states[i].Gamepad.bLeftTrigger:
LT = these_states[i].Gamepad.bLeftTrigger
normLT = 0
if LT > XINPUT_GAMEPAD_TRIGGER_THRESHOLD:
LT -= XINPUT_GAMEPAD_TRIGGER_THRESHOLD
normLT = LT / (255. - XINPUT_GAMEPAD_TRIGGER_THRESHOLD)
else:
LT = 0
if normLT != _last_norm_values[0]:
event = Event(i, EVENT_TRIGGER_MOVED)
event.trigger = LEFT
event.value = normLT
yield event
_last_norm_values[0] = normLT
if these_states[i].Gamepad.bRightTrigger != _last_states[i].Gamepad.bRightTrigger:
RT = these_states[i].Gamepad.bRightTrigger
normRT = 0
if RT > XINPUT_GAMEPAD_TRIGGER_THRESHOLD:
RT -= XINPUT_GAMEPAD_TRIGGER_THRESHOLD
normRT = RT / (255. - XINPUT_GAMEPAD_TRIGGER_THRESHOLD)
else:
RT = 0
if normRT != _last_norm_values[1]:
event = Event(i, EVENT_TRIGGER_MOVED)
event.trigger = RIGHT
event.value = normRT
yield event
_last_norm_values[1] = normRT
if these_states[i].Gamepad.sThumbLX != _last_states[i].Gamepad.sThumbLX or these_states[i].Gamepad.sThumbLY != _last_states[i].Gamepad.sThumbLY:
LX = these_states[i].Gamepad.sThumbLX
LY = these_states[i].Gamepad.sThumbLY
magL = sqrt(LX*LX + LY*LY)
if magL != 0:
normLX = LX / magL
normLY = LY / magL
else: # if magL == 0 the stick is centered, there is no direction
normLX = 0
normLY = 0
normMagL = 0
if (magL > XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE):
magL = min(32767, magL)
magL -= XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE
normMagL = magL / (32767. - XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE)
else:
magL = 0
out = (normLX * normMagL, normLY * normMagL)
if out != _last_norm_values[2]:
event = Event(i, EVENT_STICK_MOVED)
event.stick = LEFT
event.x = out[0]
event.y = out[1]
event.value = normMagL
event.dir = (normLX, normLY) if event.value else (0.0, 0.0)
yield event
_last_norm_values[2] = out
if these_states[i].Gamepad.sThumbRX != _last_states[i].Gamepad.sThumbRX or these_states[i].Gamepad.sThumbRY != _last_states[i].Gamepad.sThumbRY:
RX = these_states[i].Gamepad.sThumbRX
RY = these_states[i].Gamepad.sThumbRY
magR = sqrt(RX*RX + RY*RY)
if magR != 0:
normRX = RX / magR
normRY = RY / magR
else: # if magR == 0 the stick is centered, there is no direction
normRX = 0
normRY = 0
normMagR = 0
if (magR > XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE):
magR = min(32767, magR)
magR -= XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE
normMagR = magR / (32767. - XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE)
else:
magR = 0
out = (normRX * normMagR, normRY * normMagR)
if out != _last_norm_values[3]:
event = Event(i, EVENT_STICK_MOVED)
event.stick = RIGHT
event.x = out[0]
event.y = out[1]
event.value = normMagR
event.dir = (normRX, normRY) if event.value else (0.0, 0.0)
yield event
_last_norm_values[3] = out
_last_states = these_states
class EventHandler:
def __init__(self, *controllers):
self.set_controllers(*controllers)
filter = FILTER_NONE
self.filter = filter
def process_button_event(self, event):
raise NotImplementedError("Method not implemented. Must be implemented in the child class")
def process_stick_event(self, event):
raise NotImplementedError("Method not implemented. Must be implemented in the child class")
def process_trigger_event(self, event):
raise NotImplementedError("Method not implemented. Must be implemented in the child class")
def process_connection_event(self, event):
raise NotImplementedError("Method not implemented. Must be implemented in the child class")
def add_controller(self, user_index):
"""Adds a given controller to the ones that are processed"""
assert 0 <= user_index <= 3, "controllers must have a user_index between 0 and 3"
self.controllers.add(user_index)
def set_controllers(self, *controllers):
"""Sets the controllers that are processed"""
if not controllers:
raise ValueError("You need to specify at least one controller")
for user_index in controllers:
assert 0 <= user_index <= 3, "controllers must have a user_index between 0 and 3"
self.controllers = set(controllers)
def remove_controller(self, user_index):
"""Removes a given controller from the ones that are processed"""
assert 0 <= user_index <= 3, "controllers must have a user_index between 0 and 3"
assert len(self.controllers) >= 2, "you have to keep at least one controller"
try:
self.controllers.remove(user_index)
return True
except KeyError:
return False
def has_controller(self, user_index):
"""Checks, wether or not this handler handles controller <user_index>"""
assert 0 <= user_index <= 3, "controllers must have a user_index between 0 and 3"
return user_index in self.controllers
def set_filter(self, filter_):
"""Applies a new filter mask to this handler.
A filter can be any combination of filters, such as
(BUTTON_A | BUTTON_B) to only get events for buttons A and B or
(FILTER_RELEASED_ONLY | BUTTON_Y) to get an event when Y is released."""
self.filter = filter_
# remove any filter
# the "controller" attribute remove the filter only for the selected controller. By default will remove every filter
def clear_filter(self):
"""Removes all filters"""
self.filter = FILTER_NONE
class GamepadThread:
def __init__(self, *event_handlers):
auto_start=True
for event_handler in event_handlers:
if (event_handler is None or not issubclass(type(event_handler), EventHandler)):
raise TypeError("The event handler must be a subclass of XInput.EventHandler")
self.handlers = set(event_handlers)
self.lock = Lock()
self.queued_new_handlers = []
self.queued_removed_handlers = []
if auto_start:
self.start()
def __tfun(self): # thread function
while(self.running): # polling
self.lock.acquire()
for new_handler in self.queued_new_handlers:
self.handlers.add(new_handler)
for removed_handler in self.queued_removed_handlers:
if removed_handler in self.handlers:
self.handlers.remove(removed_handler)
self.queued_new_handlers.clear()
self.queued_removed_handlers.clear()
self.lock.release()
events = get_events()
for event in events: # filtering events
if event.type == EVENT_CONNECTED or event.type == EVENT_DISCONNECTED:
for handler in self.handlers:
if handler.has_controller(event.user_index):
handler.process_connection_event(event)
elif event.type == EVENT_BUTTON_PRESSED or event.type == EVENT_BUTTON_RELEASED:
for handler in self.handlers:
if handler.has_controller(event.user_index):
if not((handler.filter & (FILTER_PRESSED_ONLY+FILTER_RELEASED_ONLY)) and not(handler.filter & (FILTER_PRESSED_ONLY << (event.type - EVENT_BUTTON_PRESSED)))):
if event.button_id & handler.filter:
handler.process_button_event(event)
elif event.type == EVENT_TRIGGER_MOVED:
for handler in self.handlers:
if handler.has_controller(event.user_index):
if (TRIGGER_LEFT << event.trigger) & handler.filter:
handler.process_trigger_event(event)
elif event.type == EVENT_STICK_MOVED:
for handler in self.handlers:
if handler.has_controller(event.user_index):
if (STICK_LEFT << event.stick) & handler.filter:
handler.process_stick_event(event)
else:
raise ValueError("Event type not recognized")
def start(self): # starts the thread
self.running = True
if(not hasattr(self,"__thread")):
self.__thread = Thread(target=self.__tfun, args=())
self.__thread.daemon = True
self.__thread.start()
def stop(self): # stops the thread
self.running = False
self.__thread.join()
def add_event_handler(self, event_handler):
if (event_handler is None or not issubclass(type(event_handler), EventHandler)):
raise TypeError("The event handler must be a subclass of XInput.EventHandler")
self.lock.acquire()
self.queued_new_handlers.append(event_handler)
self.lock.release()
def remove_event_handler(self, event_handler):
if (event_handler is None or not issubclass(type(event_handler), EventHandler)):
raise TypeError("The event handler must be a subclass of XInput.EventHandler")
self.lock.acquire()
self.queued_removed_handlers.append(event_handler)
self.lock.release()
def __del__(self):
if hasattr(self, "__thread"):
self.stop()
#/defining custom classes and methods #
|
eval_utils.py | import argparse
import json
import os
import random
from typing import Counter
from tqdm import tqdm
from PIL import Image
import numpy as np
import voc12.data
from torch.utils.data import DataLoader
from tool import pyutils, imutils, torchutils
import importlib
from torchvision import transforms
import torch
import torch.nn.functional as F
import shutil
from multiprocessing import Array, Process
# from utils.util import chunks
import random
from matplotlib import pyplot as plt
import png
from tqdm import tqdm
def chunks(lst, num_workers=None, n=None):
"""
a helper function for seperate the list to chunks
Args:
lst (list): the target list
num_workers (int, optional): Default is None. When num_workers are not None, the function divide the list into num_workers chunks
n (int, optional): Default is None. When the n is not None, the function divide the list into n length chunks
Returns:
llis: a list of small chunk lists
"""
chunk_list = []
if num_workers is None and n is None:
print("the function should at least pass one positional argument")
exit()
elif n == None:
n = int(np.ceil(len(lst)/num_workers))
for i in range(0, len(lst), n):
chunk_list.append(lst[i:i + n])
return chunk_list
else:
for i in range(0, len(lst), n):
chunk_list.append(lst[i:i + n])
return chunk_list
def online_cut_patches(im, im_size=96, stride=32):
"""
function for crop the image to subpatches, will include corner cases
the return position (x,y) is the up left corner of the image
Args:
im (np.ndarray): the image for cropping
im_size (int, optional): the sub-image size. Defaults to 56.
stride (int, optional): the pixels between two sub-images. Defaults to 28.
Returns:
(list, list): list of image reference and list of its corresponding positions
"""
im_list = []
position_list = []
h, w = im.shape[:2]
if h < im_size:
h_ = np.array([0])
else:
h_ = np.arange(0, h - im_size + 1, stride)
if h % stride != 0:
h_ = np.append(h_, h-im_size)
if w < im_size:
w_ = np.array([0])
else:
w_ = np.arange(0, w - im_size + 1, stride)
if w % stride != 0:
w_ = np.append(w_, w - im_size)
if len(im.shape) == 3:
for i in h_:
for j in w_:
temp = Image.fromarray(np.uint8(im[i:i+im_size,j:j+im_size,:].copy()))
im_list.append(temp)
position_list.append((i,j))
else:
for i in h_:
for j in w_:
temp = Image.fromarray(np.uint8(im[i:i+im_size,j:j+im_size].copy()))
im_list.append(temp)
position_list.append((i,j))
return im_list, position_list
def multiscale_online_crop(im, im_size, stride):
"""
first resize the image to different scales, then crop according to `im_size`
Returns:
scale_im_list: the image list
scale_position_list: the images position
"""
# im = Image.fromarray(im)
# im = np.asarray(im)
im_list, position_list = online_cut_patches(im, im_size, stride)
return im_list, position_list
def crop_validation_images(dataset_path, gt_path, bg_path, side_length, stride, validation_folder_name):
"""
if the scales are not modified, this function can run only once.
crop the validation images to reduce the validation time
the output is in `validation_cam_folder_name/crop_images`
images are stored according to the image name
Args:
dataset_path (str): the validation dataset path
side_length (int): the crop size
stride (int): the distance between two crops
scales (list): a list of scales to crop
validation_cam_folder_name (str): the destination to store the validation cam
"""
images = os.listdir(dataset_path)
if not os.path.exists(f'{validation_folder_name}/img'):
os.mkdir(f'{validation_folder_name}/img')
if not os.path.exists(f'{validation_folder_name}/mask'):
os.mkdir(f'{validation_folder_name}/mask')
if not os.path.exists(f'{validation_folder_name}/background-mask'):
os.mkdir(f'{validation_folder_name}/background-mask')
with open(f'../WSSS4LUAD/val_image_label/groundtruth.json') as f:
big_labels = json.load(f)
for image in tqdm(images):
if int(image.split('.')[0]) < 31:
label = big_labels[image]
image_path = os.path.join(dataset_path, image)
gt_mask_path = os.path.join(gt_path, image)
bg_mask_path = os.path.join(bg_path, image)
shutil.copyfile(image_path, f'{validation_folder_name}/img/{image.split(".")[0]}_{label}.png')
shutil.copyfile(gt_mask_path, f'{validation_folder_name}/mask/{image.split(".")[0]}_{label}.png')
shutil.copyfile(bg_mask_path, f'{validation_folder_name}/background-mask/{image.split(".")[0]}_{label}.png')
else:
image_path = os.path.join(dataset_path, image)
gt_mask_path = os.path.join(gt_path, image)
bg_mask_path = os.path.join(bg_path, image)
im = np.asarray(Image.open(image_path))
im_list, position_list = multiscale_online_crop(im, side_length, stride)
gt_im = np.asarray(Image.open(gt_mask_path))
gt_list, _ = multiscale_online_crop(gt_im, side_length, stride)
bg_im = np.asarray(Image.open(bg_mask_path))
bg_list, _ = multiscale_online_crop(bg_im, side_length, stride)
label = big_labels[image]
for j in range(len(im_list)):
im_list[j].save(f'{validation_folder_name}/img/{image.split(".")[0]}_{position_list[j]}_{label}.png')
for j in range(len(gt_list)):
gt_list[j].save(f'{validation_folder_name}/mask/{image.split(".")[0]}_{position_list[j]}_{label}.png')
for j in range(len(bg_list)):
bg_list[j].save(f'{validation_folder_name}/background-mask/{image.split(".")[0]}_{position_list[j]}_{label}.png')
def prepare_wsss(side_length: int, stride: int) -> None:
"""
offline crop the images into wsss_valid_out_cam/crop_images
Args:
side_length (int): the crop image length
stride (int): the steps for cutting a new image
"""
validation_folder_name = 'wsss_valid'
validation_dataset_path = '../WSSS4LUAD/Dataset_wsss/2.validation/img'
gt_path = '../WSSS4LUAD/Dataset_wsss/2.validation/mask'
bg_path = '../WSSS4LUAD/Dataset_wsss/2.validation/background-mask'
if not os.path.exists(validation_folder_name):
os.mkdir(validation_folder_name)
print('crop validation set images ...')
crop_validation_images(validation_dataset_path, gt_path, bg_path, side_length, stride, validation_folder_name)
print('cropping finishes!')
def prepare_crag(side_length: int, stride: int) -> None:
"""
offline crop the images into crag_valid_out_cam/crop_images
Args:
side_length (int): the crop image length
stride (int): the steps for cutting a new image
"""
validation_folder_name = 'crag_valid'
validation_dataset_path = '../WSSS4LUAD/Dataset_crag/2.validation/img'
gt_path = '../WSSS4LUAD/Dataset_crag/2.validation/mask'
# bg_path = '../WSSS4LUAD/Dataset_crag/2.validation/background-mask'
if not os.path.exists(validation_folder_name):
os.mkdir(validation_folder_name)
print('crop validation set images ...')
crop_crag_validation_images(validation_dataset_path, gt_path, side_length, stride, validation_folder_name)
print('cropping finishes!')
def crop_crag_validation_images(dataset_path, gt_path, side_length, stride, validation_folder_name):
images = os.listdir(dataset_path)
if not os.path.exists(f'{validation_folder_name}/img'):
os.mkdir(f'{validation_folder_name}/img')
if not os.path.exists(f'{validation_folder_name}/mask'):
os.mkdir(f'{validation_folder_name}/mask')
for image in tqdm(images):
image_path = os.path.join(dataset_path, image)
gt_mask_path = os.path.join(gt_path, image)
im = np.asarray(Image.open(image_path))
im_list, position_list = multiscale_online_crop(im, side_length, stride)
gt_im = np.asarray(Image.open(gt_mask_path))
gt_list, _ = multiscale_online_crop(gt_im, side_length, stride)
for j in range(len(im_list)):
im_list[j].save(f'{validation_folder_name}/img/{image.split(".")[0]}_{position_list[j]}_[1, 1].png')
for j in range(len(gt_list)):
gt_list[j].save(f'{validation_folder_name}/mask/{image.split(".")[0]}_{position_list[j]}_[1, 1].png')
def get_overall_crag_valid_score(pred_image_path, groundtruth_path, num_workers=5, mask_path=None, num_class=3):
image_names = list(map(lambda x: x.split('.')[0], os.listdir(pred_image_path)))
random.shuffle(image_names)
image_list = chunks(image_names, num_workers)
def f(intersection, union, image_list):
gt_list = []
pred_list = []
for im_name in image_list:
cam = np.load(os.path.join(pred_image_path, f"{im_name}.npy"), allow_pickle=True).astype(np.uint8).reshape(-1)
groundtruth = np.asarray(Image.open(groundtruth_path + f"/{im_name}.png")).reshape(-1)
if mask_path:
mask = np.asarray(Image.open(mask_path + f"/{im_name}.png")).reshape(-1)
cam = cam[mask == 0]
groundtruth = groundtruth[mask == 0]
gt_list.extend(groundtruth)
pred_list.extend(cam)
pred = np.array(pred_list)
real = np.array(gt_list)
for i in range(num_class):
if i in pred:
inter = sum(np.logical_and(pred == i, real == i))
u = sum(np.logical_or(pred == i, real == i))
intersection[i] += inter
union[i] += u
intersection = Array("d", [0] * num_class)
union = Array("d", [0] * num_class)
p_list = []
for i in range(len(image_list)):
p = Process(target=f, args=(intersection, union, image_list[i]))
p.start()
p_list.append(p)
for p in p_list:
p.join()
eps = 1e-7
total = 0
for i in range(num_class):
class_i = intersection[i] / (union[i] + eps)
total += class_i
return total / num_class
def get_overall_valid_score(pred_image_path, groundtruth_path, num_workers=5, mask_path=None, num_class=3):
"""
get the scores with validation groundtruth, the background will be masked out
and return the score for all photos
Args:
pred_image_path (str): the prediction require to test, npy format
groundtruth_path (str): groundtruth images, png format
num_workers (int): number of process in parallel, default is 5.
mask_path (str): the white background, png format
num_class (int): default is 3.
Returns:
float: the mIOU score
"""
image_names = list(map(lambda x: x.split('.')[0], os.listdir(pred_image_path)))
random.shuffle(image_names)
image_list = chunks(image_names, num_workers)
def f(intersection, union, image_list):
gt_list = []
pred_list = []
for im_name in image_list:
cam = np.load(os.path.join(pred_image_path, f"{im_name}.npy"), allow_pickle=True).astype(np.uint8).reshape(-1)
groundtruth = np.asarray(Image.open(groundtruth_path + f"/{im_name}.png")).reshape(-1)
if mask_path:
mask = np.asarray(Image.open(mask_path + f"/{im_name}.png")).reshape(-1)
cam = cam[mask == 0]
groundtruth = groundtruth[mask == 0]
gt_list.extend(groundtruth)
pred_list.extend(cam)
pred = np.array(pred_list)
real = np.array(gt_list)
for i in range(num_class):
if i in pred:
inter = sum(np.logical_and(pred == i, real == i))
u = sum(np.logical_or(pred == i, real == i))
intersection[i] += inter
union[i] += u
intersection = Array("d", [0] * num_class)
union = Array("d", [0] * num_class)
p_list = []
for i in range(len(image_list)):
p = Process(target=f, args=(intersection, union, image_list[i]))
p.start()
p_list.append(p)
for p in p_list:
p.join()
eps = 1e-7
total = 0
for i in range(num_class):
class_i = intersection[i] / (union[i] + eps)
total += class_i
return total / num_class
def add_background_mask(prediction_path, origin_ims_path, destination):
if not os.path.exists(destination):
os.mkdir(destination)
for image in tqdm(os.listdir(origin_ims_path)):
truth_image = np.asarray(Image.open(os.path.join(origin_ims_path, image)))
sum_image = np.sum(truth_image, axis=2)
img_name = image.split('.')[0]
prediction = np.load(os.path.join(prediction_path, img_name+'.npy'), allow_pickle=True)
# print(truth_image.shape, prediction.shape, sum_image.shape)
prediction[sum_image>600] = 255
np.save(os.path.join(destination, img_name+'.npy'), prediction)
def visualize(path):
if not os.path.exists(f'temp'):
os.mkdir(f'temp')
img_path = "../WSSS4LUAD/Dataset_wsss/1.training"
for file in tqdm(os.listdir(path)[:5]):
cam_path = path
name = file.split('.')[0]
cam = np.load(os.path.join(cam_path, f'{name}.npy'), allow_pickle=True).astype(np.uint8)
palette = [(0, 64, 128), (64, 128, 0), (243, 152, 0), (255, 255, 255)]
with open(f'temp/{name}.png', 'wb') as f:
w = png.Writer(cam.shape[1], cam.shape[0],palette=palette, bitdepth=8)
w.write(f, cam)
plt.figure(1, figsize=(40, 40))
origin_im = plt.imread(f'{img_path}/{name}.png')
im = plt.imread(f'temp/{name}.png')
# prediction2 = plt.imread(f'temp2/{i:02d}.png')
plt.subplot(131)
plt.imshow(origin_im)
plt.title('origin image')
plt.subplot(132)
plt.imshow(im)
plt.title(f'prediction of model')
# plt.subplot(133)
# plt.imshow(gt)
# plt.title('groundtruth')
plt.savefig(f'temp/{name}.png')
print(sorted(Counter(cam.reshape(-1))))
if __name__ == "__main__":
# prepare_crag(224, 224)
result = get_overall_valid_score("validoutcampred", "/home/yyubm/WSSS4LUAD/test_CRAG_labelconsider_mask_112", num_class=2)
# result2 = get_overall_valid_score("validoutcampred_label", "/home/yyubm/WSSS4LUAD/test_CRAG_labelconsider_mask")
print(result)
# add_background_mask("outcampred", "../WSSS4LUAD/Dataset_wsss/1.training", "biglabel_addbg_wsss_train_pseudo")
# visualize('biglabel_addbg_wsss_train_pseudo') |
PortScanner.py | import threading
import socket
from Sources.PortList import *
openPorts = []
def portScanThread(target, port):
global openPorts
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)#
try:
con = s.connect((target,port))
openPorts.append(port)
con.close()
except:
pass
def portScan(target):
global openPorts
portThreads = []
for portTuple in ports:
for portNumber, scope in portTuple.items():
portThreads.append(threading.Thread(target=portScanThread, args=(target, int(portNumber))))
for thread in portThreads:
thread.start()
for thread in portThreads:
thread.join()
return openPorts
|
client.py | import socket
import json
import time
import collections
import threading
import h2.connection
import h2.events
host = socket.gethostname()
port = 6001
class Client:
def __init__(self, client_id):
self.client_id = client_id
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn = h2.connection.H2Connection(client_side = True)
self.setup()
threading.Thread(target = self.listen).start()
def setup(self):
self.socket.connect((host, port))
self.conn.initiate_connection()
self.socket.sendall(self.conn.data_to_send())
body = json.dumps({"client": self.client_id}).encode('utf-8')
request_headers = collections.OrderedDict([(':method', 'POST'),
(':path', 'http://localhost:6001/login/'),
('content-length', len(body))])
stream_id = self.conn.get_next_available_stream_id()
self.conn.send_headers(stream_id, request_headers)
self.conn.send_data(stream_id, body)
self.socket.sendall(self.conn.data_to_send())
def listen(self):
while True:
data = self.socket.recv(4096)
if not data:
break
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, h2.events.DataReceived):
content = event.data.decode('utf-8')
print(content)
def send_message(self, receiver, message):
body = json.dumps({"to": receiver, "message": message}).encode('utf-8')
request_headers = collections.OrderedDict([(':method', 'POST'),
(':path', 'http://localhost:6001/send/'),
('content-length', len(body))])
stream_id = self.conn.get_next_available_stream_id()
self.conn.send_headers(stream_id, request_headers)
self.conn.send_data(stream_id, body)
self.socket.sendall(self.conn.data_to_send())
def end_chat(self):
request_headers = collections.OrderedDict([(':method', 'GET'),
(':path', 'http://localhost:6001/end/')])
stream_id = self.conn.get_next_available_stream_id()
self.conn.send_headers(stream_id, request_headers)
self.socket.sendall(self.conn.data_to_send())
|
workers.py |
import multiprocessing
class holderClass():
def __init__(self):
self.x = 12
def increment(self,in_q,out_q):
while in_q:
object_class = in_q.get()
object_class.x = object_class.x + 1
out_q.put(object_class)
class testClass():
def __init__(self):
self.object = holderClass()
self.x = self.object.x
def process(self):
#process1 = multiprocessing.Process(target=self.test1)
#process1.start()
#process1.join()
process2 = multiprocessing.Process(target=self.object.increment)
process2.start()
process2.join()
def pool(self):
pool = multiprocessing.Pool(1)
#for answer in pool.imap(increment, range(10)):
# print(answer)
#print
for answer in pool.imap(self.square, range(10)):
print(answer)
def test2(self):
print("Hello, world 2")
def square(self, x):
return x * x
def self_square(self):
self.x = 12
def worker(x):
return x*x
def is_even(numbers, q):
for n in numbers:
if n % 2 == 0:
q.put(n)
q.put(None)
def even_is(in_q,out_q):
while in_q:
number = in_q.get()
if number == None:
out_q.put(None)
else:
if number % 2 == 0:
out_q.put(number)
def square(in_q,out_q):
while in_q:
number = in_q.get()
if number == None:
out_q.put(None)
else:
out_q.put(number*number)
|
base.py | # -*- coding: utf-8 -*-
import uuid
import enum
import abc
import threading
import queue
import datetime
import logging
import argparse
_MOD_LOGGER = logging.getLogger(__name__)
from .errors import LeetPluginError, LeetError
class LeetJobStatus(enum.Enum):
'''Flags the status of an individual job.
How are the states supposed to flow:
PENDING -> EXECUTING, CANCELLED, ERROR
EXECUTING -> COMPLETED, CANCELLED, ERROR, PENDING
There might be a situation where a job has been cancelled, but it is already
on it's way, as such, we can also have:
CANCELLED -> COMPLETED
'''
#TODO one more status related to pending_cancellation?
PENDING = 0x0
EXECUTING = 0x1
COMPLETED = 0x2
CANCELLED = 0x3
ERROR = 0x4
class LeetSOType(enum.Enum):
"""Defines the types of SO in a standard way."""
WINDOWS = 0x1
LINUX = 0x2
MAC = 0x3
UNKNOWN = 0X10
class LeetFileAttributes(enum.Enum):
"""Defines the types of attributes in a file/directory"""
DIRECTORY = 0x0
HIDDEN = 0x1
SYSTEM = 0x2
class LeetMachine(metaclass=abc.ABCMeta):
"""Represents a machine with the relevant information and how to
interact in the scope of the LEET.
This class is one of the classes that HAS to be overloaded when writing a
backend.
Attributes:
hostname (str): The machine hostname
can_connect (bool): If True, LEET will try to connect to the machine if
there is a job scheduled
drive_list (list of str): A list of drives for the machine
so_type (LeetSOType): The type of SO of the machine
backend_name (str): The name of the backend where the machine was found.
"""
def __init__(self, hostname, backend_name):
"""Returns a LeetMachine object.
Args:
hostname (str): hostname of the machine
backend_name (str): Name of the backend the machine can be found
"""
self.hostname = hostname
self.so_type = LeetSOType.UNKNOWN
self.drive_list = None
self.backend_name = backend_name
@property
@abc.abstractmethod
def can_connect(self):
"""A flag that shows if the machine can receive a connection. It should
be updated automatically or when the refresh method is called."""
@abc.abstractmethod
def refresh(self):
"""Refresh the status of the machine and updates can_connect attribute"""
@abc.abstractmethod
def connect(self):
"""Starts the session with the machine, it has to return a LeetSession subclass
Returns:
(LeetSession*): A subclass of LeetSession
"""
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(hostname={self.hostname}, '
f'can_connect={self.can_connect}, so_type={self.so_type}, '
f'drive_list={self.drive_list})'
)
class LeetSession(metaclass=abc.ABCMeta):
"""An abstraction of a session, allowing the decoupling of the backend and
the plugin. This is class is (or subclasses) are passed to plugins where
it will be used to interact with the machine.
Note:
Any implementation of this class can raise only two errors aside from the
standard python errors: 'LeetSessionError' and 'LeetCommandError'. This
allows the plugins to handle only those, simplyfing plugin design.
Warning:
The 'raw_session' attribute can be used by any plugin, but it becomes
responsibility of the plugin to check if the class is of the right
type and correclty handle and/or raise the necessary errors.
Attributes:
raw_session (?): The raw session created by the backend, this depends
completely on the backend in use.
path_separator (str): Tells what is the path separator based on the
remote machine type
"""
def __init__(self, session, machine_info):
"""Returns an object of LeetSession"""
self.raw_session = session
if machine_info.so_type == LeetSOType.WINDOWS:
self.path_separator = "\\"
#TODO in case of unknow, should we throw an error?
else:
self.path_separator = "/"
@abc.abstractmethod
def list_processes(self):
"""Returns a list of processes currently executing on the machine.
Returns:
(list of dicts): A list of dicts where each entry on the list represents
a process and each dictionary MUST have the following format:
{"username" (str): Username the process is executing,
"pid" (int): The process ID,
"ppid" (int): The parent process ID,
"start_time" (datetime): The date and time, in UTC, that the process started,
"command_line" (str): The commandline used to start the process,
"path" (str): The path of the executable}
For example:
[{"username": "NT AUTHORITY\\SYSTEM",
"ppid": 644,
"pid": 856,
"command_line": 'svchost.exe -k dcomlaunch -p -s PlugPlay',
"start_time": datetime.datetime(2019-05-01 13:00:00),
"path": "c:\\windows\\system32\\svchost.exe",
}]
"""
@abc.abstractmethod
def get_file(self, remote_file_path):
"""Returns the contents of a remote file. The file will be completed
loaded in memory. There is NO guarantee it will work for locked files.
This request must block until the whole file has been read.
Args:
remove_file_path (str): The absolute path on the remote machine.
timeout (int): In seconds
Returns:
(binary content): The contents of the file, as binary stream
"""
#TODO should we require the session backend returns any file, including locked ones?
@abc.abstractmethod
def put_file(self, fp, remote_file_path, overwrite):
"""Transfer a file to the remote machine.
If the destination path does not exists, the backend implemenation MUST
create it, unless the disk doesn't exists.
Args:
fp (file like object): A file like object with the data opened as
binary
remote_file_path (str): Absolute path where the file will be saved
overwrite (bool): If the it is True, it will overwrite the file.
Returns:
None
Raises:
(LeetCommandError): If the file exists and the overwrite is set to False,
or the path does not exists.
"""
@abc.abstractmethod
def delete_file(self, remote_file_path):
"""Delete a file or directory from the remote machine.
If the a folder is passed to `remote_file_path`, it will recursively
delete all its contents.
Args:
remote_file_path (str): File path of the file to be deleted.
Returns:
None
Raises:
(LeetCommandError): If the file doesn't exists, if the file is locked
by the OS.
"""
@abc.abstractmethod
def make_dir(self, remote_path, recursive=True):
"""Creates a directory on the remote machine. This function WILL NOT
check for root path and providing a path like "c:" or "/" is a violation.
Args:
remote_path (str): The remote path that will be created, WITHOUT the
filename
recursive (bool): If this flag is true, it will create all necessary
subdirectories in the path
"""
@abc.abstractmethod
def list_dir(self, remote_path):
"""Lists the contents of a directory on the remote machines.
Args:
remote_path (str): The remote path that will be created, WITHOUT the
filename
Returns:
(list of dicts): A list of dicts where each entry on the list represents
a file or directory and each dictionary MUST have the following format:
{"name" (str): Name of the file or directory,
"size" (int): Size (in bytes),
"attributes" (list of LeetFileAttributes): A list of the attributes for the entry,
"create_time" (datetime): Date of creation, as seen by standard SO tools
"modification_time" (datetime): Date of modification, as seen by standard SO tools
}
For example:
[{"name": "abc.txt",
"size": 644,
"attributes": 856,
"create_time": datetime.datetime(2019-05-01 13:00:00),
"modification_time": datetime.datetime(2019-05-01 13:00:00),
}]
"""
@abc.abstractmethod
def exists(self, remote_file_path):
"""Checks if a path or file exist.
Checkes if a path or file exists, it also considers the target
OS and performs the correct case comparison.
Note:
This method HAS to correctly handle files and directories!
Args:
remote_file_path (str): File path to be checked.
Returns:
(bool): True if it exists, False otherwise
"""
@abc.abstractmethod
def start_process(self, cmd_string, cwd="", background=False):
"""
Returns:
(str): If the command is not executed on the background,
returns the command output as a string
(None): If the the process is marked to start in the background
"""
@abc.abstractmethod
def __enter__(self):
"""Enter context"""
@abc.abstractmethod
def __exit__(self, exeception_type, exception_value, traceback):
"""Exit context"""
class LeetSearchRequest():
"""Represents a search request from LEET to the backends with the necessary
information on what we are looking for and, if something is found, what
is necessary to create a job from it.
Attributes:
id (uuid.UUID): Automatically random generated search ID
start_time (datetime.datetime): The time, in UTC, when the search was
created
end_time (datetime.datetime): The time, in UTC, when the search was
finished
hostnames (list of str): A list of hostnames the backends will look for
plugin (LeetPlugin*): An instance of the LeetPlugin that will be executed
on the machines
ready (bool): A boolean that tells if the search is finished or not
backend_quantity (int): The number of backends that are expected results
"""
def __init__(self, hostnames, plugin, backend_numbers=0):
"""Returns a new object of LeetSearchRequest.
Args:
hostnames (list of str): A list of hostnames the backends will look for
plugin (LeetPlugin*): An instance of the LeetPlugin that will be executed
on the machines
backend_numbers (int): The number of backends that are expected results
"""
self.id = uuid.uuid4()
self.start_time = datetime.datetime.utcnow()
self.end_time = None
self.hostnames = hostnames
self.plugin = plugin
self.ready = False
self.backend_quantity = backend_numbers
#we can have any number of threads adding machines or setting themselves
#as completed, lock to control all
self._completed_backends = set()
self._change_lock = threading.RLock()
self._found_machines = []
#TODO do we need two locks?
self._machine_lock = threading.RLock()
@property
def found_machines(self):
"""Stores all the machines found on the search, by backend."""
return self._found_machines
def add_completed_backend(self, backend_name):
"""Informs that a backend has completed the search.
This information is tracked and if all backends have returned information,
the search is completed. Once that happens, it is informed to the class
that this happened.
Args:
backend_name (str): The uniqe name of the backend that has finished
the search
"""
if not self.ready:
with self._change_lock:
self._completed_backends.add(backend_name)
if len(self._completed_backends) >= self.backend_quantity:
self.end_time = datetime.datetime.utcnow()
self.ready = True
def add_found_machines(self, machine_list):
"""Adds the machines found by the backend to the list of found machines.
Note:
There is no guarantee that a machine won't be found in more than one
backend.
Args:
machine_list (list of LeetMachine*): A list of subclasses of LeetMachine
with all the machines found by the backend
"""
if not self.ready:
with self._machine_lock:
self._found_machines += machine_list
def __eq__(self, other):
if isinstance(other, LeetJob):
return self.id == other.id
else:
return False
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(id={self.id}, '
f'start_time={self.start_time}, end_time={self.end_time}, '
f'hostnames={self.hostnames}, plugin={self.plugin}, '
f'completed_backends={self._completed_backends})'
)
class _JobFSM():
"""A very, very, very simplified state machine used to control how a job
status can change. The machine is simple enough that it can be used by
different types of variables, but as this should be used only internally
for LEET, so we can define the expected types.
Attributes:
current_state (LeetJobStatus): Indicates what is the current status of the job
"""
def __init__(self, transitions_table, initial):
"""Creates a new _JobFSM() object. The transition table is a list of dicts
that contains the source state, the destination state and a trigger.
Args:
transitions_table (list of dict): Each entry in the list has to be a dict
with the keys 'source', 'trigger' and 'dest'. The type of values of 'source'
and 'dest' must be the same and, in this case, LeetJobStatus.
The format is mandatory.
initial (LeetJobStatus): The initial state of the FSM
Returns:
_JobFSM: New object using with the correct transition table
"""
self._transitions = {}
self.current_state = initial
#this lock controls the change of status by the machine
self._t_lock = threading.RLock()
self._process_transitions(transitions_table)
def _process_transitions(self, transitions_table):
"""Process the provided transition table so it is better used by
the class. Effectively, it changes the format from:
[{"source": source_state, "trigger": "trigger_name", "dest": dest_state},
{"source": source_state, "trigger": "trigger_name", "dest": dest_state},
...]
to a dictionary:
{(source_state, "trigger_name"): dest_state,
(source_state, "trigger_name"): dest_state,
...}
This information is stored and used to move between states.
"""
for t in transitions_table:
self._transitions[(t["source"], t["trigger"])] = t["dest"]
def next(self, condition):
"""Function used to transition between machine states. The condition HAS
to be the same as the trigger that was passed, i.e., the operation '=='
has to be valid and return True
Args:
condition (str): The condition that happened to change the trigger.
Raises:
LeetError: If there is a condition that has not been registered. Basically,
if there is an attempt to move from a valid state, without the right
trigger.
"""
try:
self._t_lock.acquire()
self.current_state = self._transitions[(self.current_state, condition)]
except KeyError as e:
raise LeetError(f"Invalid transition from {self.current_state} with trigger {condition}") from e
finally:
self._t_lock.release()
class LeetJob():
"""Class that represents a Job in LEET. It creates a unique, random, identifier for the
job that contains which machine the job will run, which plugin, the result
of the plugin and state of the job.
Attributes:
id (UUID): ID of the job. Should not be manually set or changed at any point
machine (string): The name of the machine where the plugin will be executed
plugin_result (PluginResult): Where the result of the plugin execution will
be stored
plugin_instance (PluginBase*): An instance of any class that implements 'PluginBase'.
"""
def __init__(self, machine, plugin_instance):
"""Creates a new LeetJob() object. Receives the name of the host and the
plugin instance.
Args:
machine (string): The name of the machine
plugin_instance (PluginBase*): An instance of any class that implements 'PluginBase'.
Returns:
LeetJob: New object representing the job.
"""
self.id = uuid.uuid4()
self.machine = machine
self.start_time = datetime.datetime.utcnow()
self.plugin_result = None
self.plugin_instance = plugin_instance
self._status_machine = None
self._conf_status_machine()
@property
def status(self):
"""Status of the job"""
return self._status_machine.current_state
def _conf_status_machine(self):
"""Defines the transaction table of all the jobs.
It follows what is documented in LeetJobStatus documentation.
"""
#TODO having a machine per job is wasteful. It is the same machine for all jobs,
#replace this for a single machine for all jobs.
# two special cases of note:
# pending to pending -> a job can go from pending to pending, by itself,
# it is already in the state so there is no issue
# cancelled receiving executin keeps in the same state:
# if a job has been cancelled while LEET is trying to connect, it
# is a waste to just drop the connection, as such, we keep in cancelled
# and if the job is successful, just move it to finished.
#TODO I don't like the last statement
t = [
{"trigger" : "pending", "source" : LeetJobStatus.PENDING, "dest" : LeetJobStatus.PENDING},
{"trigger" : "executing", "source" : LeetJobStatus.PENDING, "dest" : LeetJobStatus.EXECUTING},
{"trigger" : "cancel", "source" : LeetJobStatus.PENDING, "dest" : LeetJobStatus.CANCELLED},
{"trigger" : "pending", "source" : LeetJobStatus.EXECUTING, "dest" : LeetJobStatus.PENDING},
{"trigger" : "cancel", "source" : LeetJobStatus.EXECUTING, "dest" : LeetJobStatus.CANCELLED},
{"trigger" : "completed", "source" : LeetJobStatus.EXECUTING, "dest" : LeetJobStatus.COMPLETED},
{"trigger" : "completed", "source" : LeetJobStatus.CANCELLED, "dest" : LeetJobStatus.COMPLETED},
{"trigger" : "executing", "source" : LeetJobStatus.CANCELLED, "dest" : LeetJobStatus.CANCELLED},
{"trigger" : "error", "source" : LeetJobStatus.EXECUTING, "dest" : LeetJobStatus.ERROR},
{"trigger" : "error", "source" : LeetJobStatus.PENDING, "dest" : LeetJobStatus.ERROR}
]
self._status_machine = _JobFSM(t, LeetJobStatus.PENDING)
def pending(self):
"""Change the job status to pending.
Raises:
LeetError: If the job can't be moved into this state.
"""
self._status_machine.next("pending")
def executing(self):
"""Change the job status to executing.
Raises:
LeetError: If the job can't be moved into this state.
"""
self._status_machine.next("executing")
def cancel(self):
"""Change the job status to cancelled.
Raises:
LeetError: If the job can't be moved into this state.
"""
self._status_machine.next("cancel")
def completed(self):
"""Change the job status to completed.
Raises:
LeetError: If the job can't be moved into this state.
"""
self._status_machine.next("completed")
def error(self):
"""Change the job status to error.
Raises:
LeetError: If the job can't be moved into this state.
"""
self._status_machine.next("error")
def __eq__(self, other):
if isinstance(other, LeetJob):
return self.id == other.id
else:
return False
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(id={self.id}, '
f'machine={self.machine}, status={self.status}, '
f'plugin_result={self.plugin_result}, plugin_instance={self.plugin_instance})'
)
class _BackendControl(enum.Enum):
"""Controls what activity will be performed by the LeetBackend.
Control command | Value
=======================================
STOP | None
SEARCH | LeetSearchRequest
"""
STOP = 0x0
SEARCH = 0x1
class LeetBackend(metaclass=abc.ABCMeta):
"""The main class for a LeetBackend. It abstracts the interaction between
the Leet main class the backends.
It is one of the classes that has to be overloaded by a backend.
Attributes:
backend_name (str): Name of the backend. Needs to be unique per instance
of the backend
max_sessions (int): The maximum number of sessions that a backend can
hold simultaneously when connecting to the remote machines
leet (Leet): A "pointer" to the main Leet class, allowing the backend
to inform it of things it has done.
"""
def __init__(self, backend_name, max_sessions):
"""Returns a new object of LeetBackend. Can't be used directly, only
by subclasses.
Args:
backend_name (str): The name of the backend. It has to be unique by
instance.
max_sessions (int): The maximum number of sessions that a backend can
hold simultaneously when connecting to the remote machines
"""
self.backend_name = backend_name
self.max_sessions = max_sessions
#change this to to a threadpool?
self._monitor_thread = threading.Thread(target=self._monitor_queue, name="Thr-" + backend_name)
self._queue = queue.Queue()
self.leet = None
def start(self):
"""Start the backend thread and resources.
Note:
If overloaded by the subclass, the subclass MUST call the parent
and return 'self'"""
self._monitor_thread.start()
return self
def shutdown(self):
"""Deallocates the backend threads and resources.
Note:
If overloaded, by the subclass, the subclass MUST call the parent
"""
self._queue.put((_BackendControl.STOP, None))
self._monitor_thread.join()
def search_machines(self, search_request):
"""Search for a group of machines on the backend.
Args:
search_request (LeetSearchRequest): The request with the information
to be searched.
"""
self._queue.put((_BackendControl.SEARCH, search_request))
def _monitor_queue(self):
"""This method is the main loop for the thread present in the class.
It monitors the internal queue for anything coming from the Leet class
or the backend implementation and interfaces between them.
"""
while True:
code, value = self._queue.get()
if code == _BackendControl.STOP:
break
elif code == _BackendControl.SEARCH:
search_request = value
machines = self._search_machines(search_request)
_MOD_LOGGER.debug("Search finished. %d/%d found in this instance.", len(machines), len(search_request.hostnames))
search_request.add_found_machines(machines)
search_request.add_completed_backend(self.backend_name)
_MOD_LOGGER.debug("Backend '%s' has finished searching.", self.backend_name)
if search_request.ready:
_MOD_LOGGER.debug("Search is ready, sending notification")
self.leet.notify_search_completed(search_request)
else:
raise LeetError("'%s' is not a valid internal code", code)
self._queue.task_done()
def __enter__(self):
"""Enter context"""
return self.start()
def __exit__(self, exeception_type, exception_value, traceback):
"""Leave context"""
#print(exeception_type, exception_value, traceback)
self.shutdown()
@abc.abstractmethod
def _search_machines(self, search_request):
"""Method that search for the machines in the backend.
This method needs to be overloaded by the subclass.
Args:
search_request (LeetSearchRequest): The search request to be processed
"""
##############################################################################
# Plugin basic data class section
##############################################################################
class LeetPluginParser(argparse.ArgumentParser):
"""A very simple extension of the standard ArgumentParser, so in case
of errors we actually trigger an exception.
"""
def error(self, message):
raise LeetPluginError(message)
class PluginBase(metaclass=abc.ABCMeta):
"""The base class for all plugins. Defines basic methods on how to handle
parameters and what the plugin needs to implement.
Instructions on how to implement the plugin can be found on the "PLUGIN_INSTRUCTIONS"
document. It is very important to note that plugins MUST be stateless.
As a plugin might fail in the middle of execution. Effectively, what this means
is that before doing something, check if it has been done before.
A plugin behaves fully as a python code and can execute anything. For example,
save files.
Also, a plugin must return a PluginResult object. See the documentation there
and the "PLUGIN_INSTRUCTIONS" for more information.
Attributes:
LEET_PG_NAME (str): name of the plugin, as it is going to be presented to
the user
LEET_PG_DESCRIPTION (str): A short description of the plugin
"""
#TODO provide a hashing function as part of the backend?
def __init__(self):
"""Creates a new PluginBase object.
As a metaclass, it can't be instantiated by itself.
Returns:
PluginBase: New object representing the job.
"""
self.arg_parser = LeetPluginParser(prog=self.LEET_PG_NAME, add_help=False)
self.args = None
#self._ltpg_param = {}
def parse_parameters(self, args):
self.args = self.arg_parser.parse_args(args)
def get_help(self):
"""Returns a plugin help text based on description and parameters.
Returns:
str: A string containing the help of the plugin
"""
header = [self.LEET_PG_DESCRIPTION, "=" * 40]
help_msg = self.arg_parser.format_help()
help_msg = help_msg.split(" ", 1)[1].replace("optional arguments:", "").split("\n")
help_msg = "\n".join(header + [a for a in help_msg if a])
return help_msg
def get_plugin_parameters(self):
"""Returns all the parameters of a plugin.
Returns:
list of LeetPluginParameter: A list with all the parameters accepted
by the plugin
"""
return vars(self.args)
@abc.abstractmethod
def run(self, session, hostname):
"""This function will be called by the backend to execute the plugin
and has to be overloaded. It will receive a session object, depending on
the backend and the hostname of the machine in question.
Args:
session (depends on the backed): A session where the plugin can interact
with the endpoint machine.
hostname (str): The hostname where the plugin is executing.
Returns:
(list of dict): All the dicts should have the same keys, as the
results will be passed to the user interface. For anything other
than that, the plugin MUST implement what is necessary, including
error handling.
Example:
data = [{"file name": "example.txt", size: "123"},
{"file name": "super.txt", size: "567"}]
Raises:
LeetPluginError: In case of any errors during the execution
of the plugin
"""
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(name={self.LEET_PG_NAME}, '
f'description={self.LEET_PG_DESCRIPTION}, '
f'_ltpg_param={self._ltpg_param})'
)
|
ternjs.py | # pylint: disable=E0401,C0111,R0903
import os
import re
import json
import platform
import subprocess
import threading
from urllib import request
from urllib.error import HTTPError
from urllib.error import URLError
from deoplete.source.base import Base
is_window = platform.system() == "Windows"
import_re = r'=?\s*require\(["\'"][@?\w\./-]*$|\s+from\s+["\'][@?\w\./-]*$'
import_pattern = re.compile(import_re)
opener = request.build_opener(request.ProxyHandler({}))
class Source(Base):
def __init__(self, vim):
super(Source, self).__init__(vim)
self.name = 'tern'
self.mark = '[TernJS]'
self.input_pattern = (r'\.\w*$|^\s*@\w*$|' + import_re)
self.rank = 900
self.filetypes = ['javascript']
self.filetypes.extend(vim.vars.get(
'deoplete#sources#ternjs#filetypes', []))
def on_init(self, context):
vars = context['vars']
self._localhost = (is_window and '127.0.0.1') or 'localhost'
self._tern_command = vars.get(
'deoplete#sources#ternjs#tern_bin', 'tern')
self._tern_types = bool(vars.get('deoplete#sources#ternjs#types', 0))
self._tern_depths = bool(vars.get('deoplete#sources#ternjs#depths', 0))
self._tern_docs = bool(vars.get('deoplete#sources#ternjs#docs', 0))
self._tern_filter = bool(vars.get('deoplete#sources#ternjs#filter', 1))
self._tern_case_insensitive = \
bool(vars.get('deoplete#sources#ternjs#case_insensitive', 0))
self._tern_guess = bool(vars.get('deoplete#sources#ternjs#guess', 1))
self._tern_sort = bool(vars.get('deoplete#sources#ternjs#sort', 1))
self._tern_expand_word_forward = \
bool(vars.get('deoplete#sources#ternjs#expand_word_forward', 1))
self._tern_omit_object_prototype = \
bool(vars.get('deoplete#sources#ternjs#omit_object_prototype', 1))
self._tern_include_keywords = \
bool(vars.get('deoplete#sources#ternjs#include_keywords', 0))
self._tern_in_literal = \
bool(vars.get('deoplete#sources#ternjs#in_literal', 1))
# Call to vim/nvim on init to do async the source
self._vim_current_path = self.vim.eval("expand('%:p:h')")
self._vim_current_cwd = self.vim.eval('getcwd()')
# Start ternjs in thread
self._is_server_started = False
self._port = None
self._proc = None
self._buffer_length = 0
self._current_buffer = []
# If something was wrong this source will do nothing: Eg. Tern crashed or not installed.
self._do_nothing = False
def get_complete_position(self, context):
m = import_pattern.search(context['input'])
if m:
# need to tell from what position autocomplete as
# needs to autocomplete from start quote return that
return re.search(r'["\']', context['input']).start()
m = re.search(r'\w*$', context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
if not self._do_nothing:
if not self._is_server_started:
# self.debug('gather_candidates: Server is not started, starting')
startThread = threading.Thread(
target=self.initialize, name='Start Tern Server')
startThread.start()
startThread.join()
self._is_server_started = True
elif self._port:
if context['is_async']:
if self.candidates is not None:
context['is_async'] = False
return self.candidates
else:
self.candidates = None
context['is_async'] = True
line = context['position'][1]
col = context['complete_position']
pos = {"line": line - 1, "ch": col}
# Cache variables of neovim
self._current_buffer = self.vim.current.buffer[:]
self._buffer_length = len(self._current_buffer)
# NOTE: This could be pos.line????
self._current_line = self.vim.eval("line('.')") - 1
self._relative_file = self.vim.eval("expand('%:p')")
self._relative_file = self._relative_file[len(
self._project_directory) + 1:]
# Update autocomplete position need to send the position
# where cursor is because the position is the start of
# quote
m = import_pattern.search(context['input'])
if m:
pos['ch'] = m.end()
startThread = threading.Thread(
target=self.completation, name='Request Completion', args=(pos,))
startThread.start()
startThread.join()
# This ensure that async request will work
return []
else:
# clean any async call
context['is_async'] = False
return []
def initialize(self):
self._project_directory = self._search_tern_project_dir()
# self.debug('Directory to use: {}'.format(self._project_directory))
try:
self.start_server()
self._url = 'http://{}:{}/'.format(self._localhost, self._port)
# self.debug('URL to connect: {}'.format(self._url))
except FileNotFoundError:
self._do_nothing = True
def __del__(self):
if self.is_initialized:
self.stop_server()
def start_server(self):
if not self._tern_command:
self.error('No tern bin set.')
return
if not self._project_directory:
self.error('Project directory is not valid.')
return
env = None
portFile = os.path.join(self._project_directory, '.tern-port')
if os.path.isfile(portFile):
self._port = int(open(portFile, 'r').read())
# self.debug(
# 'Using running tern server with port: {}'.format(self._port))
return
if platform.system() == 'Darwin':
env = os.environ.copy()
env['PATH'] += ':/usr/local/bin'
self._proc = subprocess.Popen(
[self._tern_command, '--persistent'],
cwd=self._project_directory,
shell=is_window,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
output = ""
while True:
line = self._proc.stdout.readline().decode('utf-8')
if not line:
self.error('Failed to start server' +
(output and ':\n' + output))
return
match = re.match('Listening on port (\\d+)', line)
if match:
self._port = int(match.group(1))
# self.debug(
# 'Tern server started on port: {}'.format(self._port))
return
else:
output += line
def stop_server(self):
if self._proc is None:
return
self._proc.stdin.close()
self._proc.wait()
self._proc = None
def _search_tern_project_dir(self):
directory = self._vim_current_path
# If not a directory, don't start the server
if not os.path.isdir(directory):
return None
if directory:
while True:
parent = os.path.dirname(directory[:-1])
if not parent:
return self._vim_current_cwd
if os.path.isfile(os.path.join(directory, '.tern-project')):
return directory
directory = parent
def make_request(self, doc, silent):
payload = json.dumps(doc).encode('utf-8')
# self.debug('Payload: {}'.format(payload))
try:
req = opener.open(self._url, payload)
result = req.read()
# self.debug('make_request result: {}'.format(result))
return json.loads(result.decode('utf8'))
except HTTPError as error:
message = error.read()
self.error(message)
except URLError as error:
self._do_nothing = True
self.vim.err_write(
'Looks like tern was stopped or crashed. Delete .tern-port file and restart [n]vim\n')
def run_command(self, query, pos, fragments=True, silent=False):
if isinstance(query, str):
query = {'type': query}
doc = {'query': query, 'files': []}
if self._buffer_length > 250 and fragments:
f = self.buffer_fragment()
doc['files'].append(f)
pos = {'line': pos['line'] - f['offsetLines'], 'ch': pos['ch']}
fname = '#0'
else:
doc['files'].append(self.full_buffer())
fname = '#0'
query['file'] = fname
query['end'] = pos
query['lineCharPositions'] = True
query['omitObjectPrototype'] = False
query['sort'] = False
data = self.make_request(doc, silent)
return data
def full_buffer(self):
text = self.buffer_slice(self._current_buffer, 0,
len(self._current_buffer))
return {'type': 'full',
'name': self._relative_file,
'text': text}
def buffer_slice(self, buf, start, end):
return '\n'.join(buf[start:end])
# text = ''
# while pos < len(buf):
# text += buf[pos] + '\n'
# pos += 1
# return text
def buffer_fragment(self):
line = self._current_line
buffer = self._current_buffer
min_indent = None
start = None
for i in range(max(0, line - 50), line):
if not re.match('.*\\bfunction\\b', buffer[i]):
continue
indent = len(re.match('^\\s*', buffer[i]).group(0))
if min_indent is None or indent <= min_indent:
min_indent = indent
start = i
if start is None:
start = max(0, line - 50)
end = min(len(buffer) - 1, line + 20)
return {'type': 'part',
'name': self._relative_file,
'text': self.buffer_slice(buffer, start, end),
'offsetLines': start}
def completation(self, pos):
command = {
'type': 'completions',
'types': self._tern_types,
'depths': self._tern_types,
'docs': self._tern_docs,
'filter': self._tern_filter,
'caseInsensitive': self._tern_case_insensitive,
'guess': self._tern_guess,
'sort': self._tern_sort,
'expandWordForward': self._tern_expand_word_forward,
'omitObjectPrototype': self._tern_omit_object_prototype,
'includeKeywords': self._tern_include_keywords,
'inLiteral': self._tern_in_literal,
}
data = self.run_command(command, pos)
completions = []
# self.debug('completation data: {}'.format(data))
if data is not None:
for rec in data['completions']:
item = {
'dup': 0,
}
if isinstance(rec, str):
item['word'] = rec
else:
icon = rec.get('type')
if icon == rec['name']:
icon = 'object'
item['kind'] = icon
item['word'] = rec['name']
item['abbr'] = rec['name']
if self._tern_docs:
item['info'] = self.type_doc(rec)
completions.append(item)
self.candidates = completions
def type_doc(self, rec):
tp = rec.get('type')
result = rec.get('doc', ' ')
if tp and tp != '?':
result = tp + '\n' + result
return result
|
_reloader.py | import os
import sys
import time
import subprocess
import threading
from itertools import chain
from werkzeug._internal import _log
from werkzeug._compat import PY2, iteritems, text_type
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
if os.path.isdir(filename) and \
os.path.exists(os.path.join(filename, "__init__.py")):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(os.path.dirname(os.path.abspath(x))
if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, '__file__', None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _get_args_for_reloading():
"""Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading.
"""
rv = [sys.executable]
py_script = sys.argv[0]
if os.name == 'nt' and not os.path.exists(py_script) and \
os.path.exists(py_script + '.exe'):
py_script += '.exe'
if os.path.splitext(rv[0])[1] == '.exe' and os.path.splitext(py_script)[1] == '.exe':
rv.pop(0)
rv.append(py_script)
rv.extend(sys.argv[1:])
return rv
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add('/'.join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ,
close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename):
filename = os.path.abspath(filename)
_log('info', ' * Detected change in %r, reloading' % filename)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(),
self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo', '.py')):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
def on_moved(self, event):
_check_modification(event.src_path)
_check_modification(event.dest_path)
def on_deleted(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:-8]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
try:
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
finally:
observer.stop()
observer.join()
sys.exit(3)
reloader_loops = {
'stat': StatReloaderLoop,
'watchdog': WatchdogReloaderLoop,
}
try:
__import__('watchdog.observers')
except ImportError:
reloader_loops['auto'] = reloader_loops['stat']
else:
reloader_loops['auto'] = reloader_loops['watchdog']
def ensure_echo_on():
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload."""
# tcgetattr will fail if stdin isn't a tty
if not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
|
http.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from collections import defaultdict, namedtuple
import json
from threading import Thread
from select import select
import socket
MetricKey = namedtuple('MetricKey', ['host', 'client_id', 'name', 'group', 'tags'])
MetricValue = namedtuple('MetricValue', ['time', 'value'])
# Python's logging library doesn't define anything more detailed than DEBUG, but we'd like a finer-grained setting for
# for highly detailed messages, e.g. logging every single incoming request.
TRACE = 5
class HttpMetricsCollector(object):
"""
HttpMetricsCollector enables collection of metrics from various Kafka clients instrumented with the
PushHttpMetricsReporter. It starts a web server locally and provides the necessary configuration for clients
to automatically report metrics data to this server. It also provides basic functionality for querying the
recorded metrics. This class can be used either as a mixin or standalone object.
"""
# The port to listen on on the worker node, which will be forwarded to the port listening on this driver node
REMOTE_PORT = 6789
def __init__(self, **kwargs):
"""
Create a new HttpMetricsCollector
:param period the period, in seconds, between updates that the metrics reporter configuration should define.
defaults to reporting once per second
:param args:
:param kwargs:
"""
self._http_metrics_period = kwargs.pop('period', 1)
super(HttpMetricsCollector, self).__init__(**kwargs)
# TODO: currently we maintain just a simple map from all key info -> value. However, some key fields are far
# more common to filter on, so we'd want to index by them, e.g. host, client.id, metric name.
self._http_metrics = defaultdict(list)
self._httpd = HTTPServer(('', 0), _MetricsReceiver)
self._httpd.parent = self
self._httpd.metrics = self._http_metrics
self._http_metrics_thread = Thread(target=self._run_http_metrics_httpd,
name='http-metrics-thread[%s]' % str(self))
self._http_metrics_thread.start()
self._forwarders = {}
@property
def http_metrics_url(self):
"""
:return: the URL to use when reporting metrics
"""
return "http://%s:%d" % ("localhost", self.REMOTE_PORT)
@property
def http_metrics_client_configs(self):
"""
Get client configurations that can be used to report data to this collector. Put these in a properties file for
clients (e.g. console producer or consumer) to have them push metrics to this driver. Note that in some cases
(e.g. streams, connect) these settings may need to be prefixed.
:return: a dictionary of client configurations that will direct a client to report metrics to this collector
"""
return {
"metric.reporters": "org.apache.kafka.tools.PushHttpMetricsReporter",
"metrics.url": self.http_metrics_url,
"metrics.period": self._http_metrics_period,
}
def start_node(self, node):
local_port = self._httpd.socket.getsockname()[1]
self.logger.debug('HttpMetricsCollector listening on %s', local_port)
self._forwarders[self.idx(node)] = _ReverseForwarder(self.logger, node, self.REMOTE_PORT, local_port)
super(HttpMetricsCollector, self).start_node(node)
def stop(self):
super(HttpMetricsCollector, self).stop()
if self._http_metrics_thread:
self.logger.debug("Shutting down metrics httpd")
self._httpd.shutdown()
self._http_metrics_thread.join()
self.logger.debug("Finished shutting down metrics httpd")
def stop_node(self, node):
super(HttpMetricsCollector, self).stop_node(node)
idx = self.idx(node)
self._forwarders[idx].stop()
del self._forwarders[idx]
def metrics(self, host=None, client_id=None, name=None, group=None, tags=None):
"""
Get any collected metrics that match the specified parameters, yielding each as a tuple of
(key, [<timestamp, value>, ...]) values.
"""
for k, values in self._http_metrics.iteritems():
if ((host is None or host == k.host) and
(client_id is None or client_id == k.client_id) and
(name is None or name == k.name) and
(group is None or group == k.group) and
(tags is None or tags == k.tags)):
yield (k, values)
def _run_http_metrics_httpd(self):
self._httpd.serve_forever()
class _MetricsReceiver(BaseHTTPRequestHandler):
"""
HTTP request handler that accepts requests from the PushHttpMetricsReporter and stores them back into the parent
HttpMetricsCollector
"""
def log_message(self, format, *args, **kwargs):
# Don't do any logging here so we get rid of the mostly useless per-request Apache log-style info that spams
# the debug log
pass
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length']))
data = json.loads(data)
self.server.parent.logger.log(TRACE, "POST %s\n\n%s\n%s", self.path, self.headers,
json.dumps(data, indent=4, separators=(',', ': ')))
self.send_response(204)
self.end_headers()
client = data['client']
host = client['host']
client_id = client['client_id']
ts = client['time']
metrics = data['metrics']
for raw_metric in metrics:
name = raw_metric['name']
group = raw_metric['group']
# Convert to tuple of pairs because dicts & lists are unhashable
tags = tuple([(k, v) for k, v in raw_metric['tags'].iteritems()]),
value = raw_metric['value']
key = MetricKey(host=host, client_id=client_id, name=name, group=group, tags=tags)
metric_value = MetricValue(time=ts, value=value)
self.server.metrics[key].append(metric_value)
class _ReverseForwarder(object):
"""
Runs reverse forwarding of a port on a node to a local port. This allows you to setup a server on the test driver
that only assumes we have basic SSH access that ducktape guarantees is available for worker nodes.
"""
def __init__(self, logger, node, remote_port, local_port):
self.logger = logger
self._node = node
self._local_port = local_port
self.logger.debug('Forwarding %s port %d to driver port %d', node, remote_port, local_port)
self._stopping = False
self._transport = node.account.ssh_client.get_transport()
self._transport.request_port_forward('', remote_port)
self._accept_thread = Thread(target=self._accept)
self._accept_thread.start()
def stop(self):
self._stopping = True
self._accept_thread.join(30)
if self._accept_thread.isAlive():
raise RuntimeError("Failed to stop reverse forwarder on %s", self._node)
def _accept(self):
while not self._stopping:
chan = self._transport.accept(1)
if chan is None:
continue
thr = Thread(target=self._handler, args=(chan,))
thr.setDaemon(True)
thr.start()
def _handler(self, chan):
sock = socket.socket()
try:
sock.connect(("localhost", self._local_port))
except Exception as e:
self.logger.error('Forwarding request to port %d failed: %r', self._local_port, e)
return
self.logger.log(TRACE, 'Connected! Tunnel open %r -> %r -> %d', chan.origin_addr, chan.getpeername(),
self._local_port)
while True:
r, w, x = select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
self.logger.log(TRACE, 'Tunnel closed from %r', chan.origin_addr)
|
threading.py | import multiprocessing
def async(func):
"""
A decorator to make a function asynchronous,
using the multiprocessing python lib.
:param func: the function to decorate
"""
def wrapped(*args, **kwargs):
process = multiprocessing.Process(target=func, args=args, kwargs=kwargs)
process.start()
return wrapped
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import Multisig_Wallet, CannotBumpFee
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418; try to at least show popup:
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Fujicoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Fujicoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://www.fujicoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('fujicoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % self.wallet.electrum_version + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Fujicoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Fujicoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Fujicoin address where the payment should be received. Note that each payment request uses a different Fujicoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Fujicoin addresses.'),
_('The fujicoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Fujicoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Fujicoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Fujicoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Fujicoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Fujicoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', False)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast_transaction(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid fujicoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a fujicoin URI
if str(data).startswith("fujicoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.get_transaction(txid)
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', False))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 FJC = 1000 mFJC. 1 mFJC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
django.py | import json
import logging
import os
import threading
from django.http import HttpResponse, HttpRequest
from . import utils
from .httpbased import HttpContext, HttpHandler, run_event_loop
from .utils import make_applications, cdn_validation
from ..utils import STATIC_PATH, iscoroutinefunction, isgeneratorfunction, get_free_port, parse_file_size
logger = logging.getLogger(__name__)
class DjangoHttpContext(HttpContext):
backend_name = 'django'
def __init__(self, request: HttpRequest):
self.request = request
self.response = HttpResponse()
def request_obj(self):
"""返回当前请求对象"""
return self.request
def request_method(self):
"""返回当前请求的方法,大写"""
return self.request.method
def request_headers(self):
"""返回当前请求的header字典"""
return self.request.headers
def request_url_parameter(self, name, default=None):
"""返回当前请求的URL参数"""
return self.request.GET.get(name, default=default)
def request_body(self):
return self.request.body
def set_header(self, name, value):
"""为当前响应设置header"""
self.response[name] = value
def set_status(self, status: int):
"""为当前响应设置http status"""
self.response.status_code = status
def set_content(self, content, json_type=False):
"""设置相应的内容
:param content:
:param bool json_type: content是否要序列化成json格式,并将 content-type 设置为application/json
"""
# self.response.content accept str and byte
if json_type:
self.set_header('content-type', 'application/json')
self.response.content = json.dumps(content)
else:
self.response.content = content
def get_response(self):
"""获取当前的响应对象,用于在私图函数中返回"""
return self.response
def get_client_ip(self):
"""获取用户的ip"""
return self.request.META.get('REMOTE_ADDR')
def webio_view(applications, cdn=True,
session_expire_seconds=None,
session_cleanup_interval=None,
allowed_origins=None, check_origin=None):
"""Get the view function for running PyWebIO applications in Django.
The view communicates with the browser by HTTP protocol.
The arguments of ``webio_view()`` have the same meaning as for :func:`pywebio.platform.flask.webio_view`
"""
cdn = cdn_validation(cdn, 'error')
handler = HttpHandler(applications=applications, cdn=cdn,
session_expire_seconds=session_expire_seconds,
session_cleanup_interval=session_cleanup_interval,
allowed_origins=allowed_origins, check_origin=check_origin)
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def view_func(request):
context = DjangoHttpContext(request)
return handler.handle_request(context)
view_func.__name__ = 'webio_view'
return view_func
urlpatterns = []
def start_server(applications, port=8080, host='', cdn=True, static_dir=None,
allowed_origins=None, check_origin=None,
session_expire_seconds=None,
session_cleanup_interval=None,
debug=False, max_payload_size='200M', **django_options):
"""Start a Django server to provide the PyWebIO application as a web service.
:param bool debug: Django debug mode.
See `Django doc <https://docs.djangoproject.com/en/3.0/ref/settings/#debug>`_ for more detail.
:param django_options: Additional settings to django server.
For details, please refer: https://docs.djangoproject.com/en/3.0/ref/settings/ .
Among them, ``DEBUG``, ``ALLOWED_HOSTS``, ``ROOT_URLCONF``, ``SECRET_KEY`` are set by PyWebIO and cannot be specified in ``django_options``.
The rest arguments of ``start_server()`` have the same meaning as for :func:`pywebio.platform.flask.start_server`
"""
global urlpatterns
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from django.urls import path
from django.utils.crypto import get_random_string
from django.views.static import serve
if port == 0:
port = get_free_port()
if not host:
host = '0.0.0.0'
cdn = cdn_validation(cdn, 'warn')
max_payload_size = parse_file_size(max_payload_size)
utils.MAX_PAYLOAD_SIZE = max_payload_size
django_options.update(dict(
DEBUG=debug,
ALLOWED_HOSTS=["*"], # Disable host header validation
ROOT_URLCONF=__name__, # Make this module the urlconf
SECRET_KEY=get_random_string(10), # We aren't using any security features but Django requires this setting
DATA_UPLOAD_MAX_MEMORY_SIZE=max_payload_size
))
django_options.setdefault('LOGGING', {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.server': {
'level': 'INFO' if debug else 'WARN',
'handlers': ['console'],
},
},
})
settings.configure(**django_options)
webio_view_func = webio_view(
applications=applications, cdn=cdn,
session_expire_seconds=session_expire_seconds,
session_cleanup_interval=session_cleanup_interval,
allowed_origins=allowed_origins,
check_origin=check_origin
)
urlpatterns = [
path(r"", webio_view_func),
path(r'<path:path>', serve, {'document_root': STATIC_PATH}),
]
if static_dir is not None:
urlpatterns.insert(0, path(r'static/<path:path>', serve, {'document_root': static_dir}))
use_tornado_wsgi = os.environ.get('PYWEBIO_DJANGO_WITH_TORNADO', True)
app = get_wsgi_application() # load app
if use_tornado_wsgi:
import tornado.wsgi
container = tornado.wsgi.WSGIContainer(app)
http_server = tornado.httpserver.HTTPServer(container, max_buffer_size=max_payload_size)
http_server.listen(port, address=host)
tornado.ioloop.IOLoop.current().start()
else:
from django.core.management import call_command
has_coro_target = any(iscoroutinefunction(target) or isgeneratorfunction(target) for
target in make_applications(applications).values())
if has_coro_target:
threading.Thread(target=run_event_loop, daemon=True).start()
call_command('runserver', '%s:%d' % (host, port))
|
comm.py | """
comm.py:
This is the F prime communications adapter. This allows the F prime ground tool suite to interact with running F prime
deployments that exist on the other end of a "wire" (some communication bus). This is done with the following mechanics:
1. An adapter is instantiated to handle "read" and "write" functions against the wire
2. A framer/deframer is instantiated in order to frame/deframe those packets as transported across the wire.
3. "Uplink" and "Downlink" threads are created to loop on data from flight (F prime) and ground (F prime ground)
interfaces ensuring that ground data is framed and written to the wire, and flight data is deframed and sent to the
ground side.
Note: assuming the module containing the ground adapter has been imported, then this code should provide it as a CLI
argument, removing the need to rewrite most of this class to use something different.
@author lestarch
"""
from __future__ import print_function
import sys
import queue
import argparse
import threading
import logging
import fprime_gds.common.logger
# Required adapters built on standard tools
import fprime_gds.common.adapters.base
import fprime_gds.common.adapters.ground
import fprime_gds.common.adapters.ip
from fprime_gds.common.adapters.framing import FpFramerDeframer
from fprime.common.models.serialize.u32_type import U32Type
from fprime_gds.common.utils.data_desc_type import DataDescType
import fprime_gds.executables.cli
# Uses non-standard PIP package pyserial, so test the waters before getting a hard-import crash
try:
import fprime_gds.common.adapters.uart
except ImportError:
pass
LOGGER = logging.getLogger("comm")
class Uplinker(object):
"""
Pulls out the code useful for uplink into a single designated place. This will run as a thread, which essentially
calls does the following:
1. Read packets of incoming ground system data
2. Frame packet
3. Uplink packet
4. Repeat 2-3 until queue of packets is drained
5. Repeat 1-4
Note: this class also implements an uplink handshake to the other side of the ground system, to ensure that uplinked
items do not roll this process over via the thundering herd.
"""
RETRY_COUNT = 3
def __init__(self, uplink_adapter, ground, downlinker):
"""
Uplinker requires an adapter reference to flight side (outgoing data), ground side handler(incoming data), and a
reference to the downlinker to queue return handshake packets.
:param uplink_adapter: adapter used to talk to the flight side of the system
:param ground: ground handler for sourcing uplink packets from the other ground components
:param downlinker: downlinker object to return handshaking requests
"""
self.uplink_adapter = uplink_adapter
self.ground = ground
self.downlink = downlinker
self.running = True
self.framer = FpFramerDeframer()
def uplink(self):
"""
Runs the data uplink to the FSW. The data will first be read from the ground handler, framed with the framing
tokens, and then uplinked by the flight adapter's 'write' definition. This will also retry the uplink up to
RETRY_COUNT times in case the write failed.
:param data: data to be framed.
"""
data_packets = self.ground.receive_all()
for valid_packet in filter(lambda packet: packet is not None and len(packet) > 0, data_packets):
framed = self.framer.frame(valid_packet)
for retry in range(0, Uplinker.RETRY_COUNT):
if self.uplink_adapter.write(framed):
self.downlink.queue_downlink(Uplinker.get_handshake(valid_packet))
break
else:
raise UplinkFailureException("Uplink failed to send {} bytes of data after {} retries"
.format(len(framed), Uplinker.RETRY_COUNT))
def stop(self):
""" Stop the thread depends will close the ground resource which may be blocking """
self.running = False
self.ground.close()
@staticmethod
def get_handshake(packet):
"""
Gets a handshake raw frame. It repeats the last packet.
:param packet: packet to repeat back out
:return: handshake raw-frame
"""
return U32Type(DataDescType["FW_PACKET_HAND"].value).serialize() + packet
def run(self):
"""
Method that run the uplinker thread. This should loop-forever. Will log any uplink errors that occur, but will
not stop upon uplink error.
"""
while self.running:
try:
self.uplink()
except UplinkFailureException as ufe:
LOGGER.warning("Uplink exception occured: {}".format(ufe))
# Shutdown exception handling, only keep exception when running
except OSError:
if self.running:
raise
class UplinkFailureException(Exception):
"""
After all retries were complete, uplink has still failed
"""
pass
class Downlinker(object):
"""
Handles the actions associated with downlinking. This boils down to the following steps:
1. Reading raw data from the raw data adapter facing the flight-side of the system
2. Appending any new data to pool of available data
3. Deframing all packets available from pool of available data
4. Sending all deframed packets out to ground system
5. Repeat steps 1-4
"""
def __init__(self, downlink_adapter, ground):
"""
Downlinker requires an adapter reference to flight side (incoming data), and ground side handler(outgoing data).
:param uplink_adapter: adapter used to talk to the flight side of the system
:param ground: ground handler for sinking downlink packets to the other ground components
:param downlinker: downlinker object to return handshaking requests
"""
self.downlink_adapter = downlink_adapter
self.ground = ground
self.running = True
self.deframer = FpFramerDeframer()
self.pool = b''
self.enqueued = queue.Queue()
def downlink(self):
"""
Runs the actual downlink of data.
"""
# Read the downlink data for a full (maximum) frame, and process if non-zero. No retries, as retry is implicit.
self.pool += self.downlink_adapter.read()
frames, self.pool = self.deframer.deframe_all(self.pool, no_copy=True)
# Add all enqueued items. Implemented as a try-catch as "empty" makes no guarentees, so a try-catch is required.
try:
while not self.enqueued.empty():
frames.append(self.enqueued.get_nowait())
except queue.Empty:
pass
# Send out all frames found to GDS
self.ground.send_all(frames)
def stop(self):
""" Stop the thread depends will close the ground resource which may be blocking """
self.running = False
self.downlink_adapter.close()
def queue_downlink(self, frame):
"""
Enqueues a frame to send as part of downlink. This should not require deframing.
:param frame: frame to enqueue
"""
self.enqueued.put(frame)
def run(self):
"""
Method that run the downlinker thread. This should loop-forever.
"""
while self.running:
self.downlink()
def parse_args(args):
"""
Parse the arguments to this application, then return the constructed namespace argument.
:param args: list of arguments to parse
:return: namespace argument
"""
parser = argparse.ArgumentParser(description="Connects data from F prime flight software to the GDS tcp server")
# Setup this parser to handle MiddleWare arguments
fprime_gds.executables.cli.MiddleWareParser.add_args(parser)
# Add a parser for each adapter
subparsers = parser.add_subparsers(help="Type of adapter used for processing", dest="subcommand")
for adapter_name in fprime_gds.common.adapters.base.BaseAdapter.get_adapters().keys():
adapter = fprime_gds.common.adapters.base.BaseAdapter.get_adapters()[adapter_name]
# Check adapter real quick before moving on
if not hasattr(adapter, "get_arguments") or not callable(getattr(adapter, "get_arguments", None)):
LOGGER.error("'{}' does not have 'get_arguments' method, skipping.".format(adapter_name))
continue
subparse = subparsers.add_parser(adapter_name)
# Add arguments for the parser
for argument in adapter.get_arguments().keys():
subparse.add_argument(*argument, **adapter.get_arguments()[argument])
args = parser.parse_args(args)
try:
extras = fprime_gds.executables.cli.refine(parser, args)
fprime_gds.common.logger.configure_py_log(extras["logs"], "comm-adapter.log")
except ValueError as exc:
print("[ERROR] {}".format(exc), file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(-1)
return args
def main():
"""
Main program, degenerates into the run loop.
:return: return code
"""
args, _ = fprime_gds.executables.cli.ParserBase.parse_args([fprime_gds.executables.cli.LogDeployParser,
fprime_gds.executables.cli.MiddleWareParser,
fprime_gds.executables.cli.CommParser],
description="F prime communications layer.", client=True)
# First setup the ground handler to talk to the ground system.
ground = fprime_gds.common.adapters.ground.TCPGround(args.tts_addr, args.tts_port)
# Create an adapter from input arguments in order to talk to the flight deployment.
adapter = args.comm_adapter
# Create uplink and downlink handlers
downlinker = Downlinker(adapter, ground)
uplinker = Uplinker(adapter, ground, downlinker)
# Open resources and fail if ground is unavailable
ground.open()
# Try to open adapted, but ignore failure as it should reconnect
adapter.open()
# Start-up threads to handle uplink and downlink
down_thread = threading.Thread(target=downlinker.run)
up_thread = threading.Thread(target=uplinker.run)
down_thread.start()
up_thread.start()
# Join on the threads before exiting
try:
down_thread.join()
up_thread.join()
except KeyboardInterrupt:
downlinker.stop()
uplinker.stop()
down_thread.join()
up_thread.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
eventSplashWindow.py | #-*- coding:utf-8 -*-
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtNetwork import *
from uiEvents.AWindowBase import *
from uiDefines.Ui_SplashWindow import *
from uiUtil.envs import *
from uiUtil.globaltool import *
import os
import sys
import pathlib
import datetime
'''
这是SplashWindow窗体的实现类
'''
#class FSplashWindow(IWindowImpl):
class FSplashWindow(IWindowImplM):
'''
初始化所有数据(抽象函数)
'''
def initUIAndData(self):
#初始化事件
self.initEvents()
#初始化投递线程
self.msgWorker = QTInvokeQueueWorkerWithProcess(self)
self.msgWorker.start()
'''
初始化事件
'''
def initEvents(self):
pass
'''
返回UI定义类的实例(例如uiDefines/Ui_MainWindow.py的实例,抽象函数)
'''
def getUIDefineObject(self):
return Ui_SplashWindow()
'''
InvokeUI的实现(用于跨线程操作UI内容)
'''
def runUIImpl(self, uiArgs):
self.uiObj.lblContent.setText(uiArgs.contentVal)
self.uiObj.pbProgress.setValue(uiArgs.progressVal)
'''
显示窗体
'''
def showWindow(title, doWorkImpl):
if doWorkImpl != None and title != None:
#显示窗体
windowObj, ui, event = WindowBuilder.buildWindow(None, FSplashWindow())
windowObj.setWindowTitle(title)
doWorkImpl.windowObj = windowObj
doWorkImpl.eventObj = event
windowObj.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
windowObj.setFixedSize(windowObj.width(), windowObj.height())
windowObj.show()
#运行线程
thread = threading.Thread(target=doWorkImpl.process)
thread.start()
'''
Splash处理类
'''
class ISplashDoWork:
'''
处理数据
'''
def process(self):
raise NotImplementedError
'''
Splash的Invoke参数
'''
class SplashInvokeArgs(QTInvokeArgs):
def __init__(self, progress, content):
super().__init__()
self.progressVal = progress
self.contentVal = content
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
import os
import json
import pkgutil
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob, print_error
DEFAULT_ENABLED = False
DEFAULT_CURRENCY = "USD"
DEFAULT_EXCHANGE = "CoinGecko" # Note the exchange here should ideally also support history rates
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'}, timeout=10)
if response.status_code != 200:
raise RuntimeWarning("Response status: " + str(response.status_code))
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron-Cash'})
if response.status_code != 200:
raise RuntimeWarning("Response status: " + str(response.status_code))
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
except:
h = None
else:
h = None
timestamp = False
if h:
self.history[ccy] = h
self.on_history()
else:
h = None
return h, timestamp
def get_historical_rates_safe(self, ccy, cache_dir):
h, timestamp = self.read_historical_rates(ccy, cache_dir)
if h is None or time.time() - timestamp < 24*3600:
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BCH", ""), PyDecimal(json[r]['last']))
for r in json if r != 'timestamp'])
# note: historical rates used to be freely available
# but this is no longer the case. see spesmilo#5188
# (Turned off until the unlikely event that the situation changes.)
#def history_ccys(self):
# return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
# 'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
# 'ZAR']
#
#def request_history(self, ccy):
# history = self.get_csv('apiv2.bitcoinaverage.com',
# "/indices/global/history/BCH%s?period=alltime&format=csv" % ccy)
# return dict([(h['DateTime'][:10], h['Average'])
# for h in history])
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BCCPLN/ticker.json')
return {'PLN': PyDecimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates/BCH')
return dict([(r['code'], PyDecimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker/?book=bch_btc')
return {'BTC': PyDecimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json_usd = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchusd')
json_eur = self.get_json('www.bitstamp.net', '/api/v2/ticker/bcheur')
json_btc = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchbtc')
return {
'USD': PyDecimal(json_usd['last']),
'EUR': PyDecimal(json_eur['last']),
'BTC': PyDecimal(json_btc['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=BCH')
return {ccy: PyDecimal(rate) for (ccy, rate) in json["data"]["rates"].items()}
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
pairs = ['BCH%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], PyDecimal(float(v['c'][0])))
for k, v in json['result'].items())
class CoinFloor(ExchangeBase):
# CoinFloor API only supports GBP on public API
def get_rates(self, ccy):
json = self.get_json('webapi.coinfloor.co.uk:8090/bist/BCH/GBP', '/ticker/')
return {'GBP': PyDecimal(json['last'])}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/bch_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/bch_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/bch_usd')
json_btc = self.get_json('wex.nz', '/api/3/ticker/bch_btc')
json_ltc = self.get_json('wex.nz', '/api/3/ticker/bch_ltc')
json_eth = self.get_json('wex.nz', '/api/3/ticker/bch_eth')
json_dsh = self.get_json('wex.nz', '/api/3/ticker/bch_dsh')
return {'EUR': PyDecimal(json_eur['bch_eur']['last']),
'RUB': PyDecimal(json_rub['bch_rur']['last']),
'USD': PyDecimal(json_usd['bch_usd']['last']),
'BTC': PyDecimal(json_btc['bch_btc']['last']),
'LTC': PyDecimal(json_ltc['bch_ltc']['last']),
'ETH': PyDecimal(json_eth['bch_eth']['last']),
'DSH': PyDecimal(json_dsh['bch_dsh']['last'])}
class CoinCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coincap.io', '/v2/rates/bitcoin-cash/')
return {'USD': PyDecimal(json['data']['rateUsd'])}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
from datetime import datetime as dt
# Currently 2000 days is the maximum in 1 API call which needs to be fixed
# sometime before the year 2023...
history = self.get_json('api.coincap.io',
"/v2/assets/bitcoin-cash/history?interval=d1&limit=2000")
return dict([(dt.utcfromtimestamp(h['time']/1000).strftime('%Y-%m-%d'),
h['priceUsd'])
for h in history['data']])
class CoinGecko(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coingecko.com', '/api/v3/coins/bitcoin-cash?localization=False&sparkline=false')
prices = json["market_data"]["current_price"]
return dict([(a[0].upper(),PyDecimal(a[1])) for a in prices.items()])
def history_ccys(self):
return ['AED', 'ARS', 'AUD', 'BTD', 'BHD', 'BMD', 'BRL', 'BTC',
'CAD', 'CHF', 'CLP', 'CNY', 'CZK', 'DKK', 'ETH', 'EUR',
'GBP', 'HKD', 'HUF', 'IDR', 'ILS', 'INR', 'JPY', 'KRW',
'KWD', 'LKR', 'LTC', 'MMK', 'MXH', 'MYR', 'NOK', 'NZD',
'PHP', 'PKR', 'PLN', 'RUB', 'SAR', 'SEK', 'SGD', 'THB',
'TRY', 'TWD', 'USD', 'VEF', 'XAG', 'XAU', 'XDR', 'ZAR']
def request_history(self, ccy):
history = self.get_json('api.coingecko.com', '/api/v3/coins/bitcoin-cash/market_chart?vs_currency=%s&days=max' % ccy)
from datetime import datetime as dt
return dict([(dt.utcfromtimestamp(h[0]/1000).strftime('%Y-%m-%d'), h[1])
for h in history['prices']])
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
try:
data = pkgutil.get_data(__name__, 'currencies.json')
return json.loads(data.decode('utf-8'))
except:
pass
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print_error(name, "ok")
except:
print_error(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
default_currency = DEFAULT_CURRENCY
default_exchange = DEFAULT_EXCHANGE
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas, default_prec=2, is_diff=False):
prec = CCY_PRECISIONS.get(self.ccy, default_prec)
diff_str = ''
if is_diff:
diff_str = '+' if amount >= 0 else '-'
fmt_str = "%s{:%s.%df}" % (diff_str, "," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout == 0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return self.config.get('use_exchange_rate', DEFAULT_ENABLED)
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", self.default_currency)
def config_exchange(self):
return self.config.get('use_exchange', self.default_exchange)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
if self.get_currency() != ccy:
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
default_class = globals().get(self.default_exchange)
class_ = globals().get(name, default_class)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
if self.get_history_config() and self.ccy not in self.exchange.history_ccys() and class_ != default_class:
# this exchange has no history for this ccy. Try the default exchange.
# If that also fails the user will be stuck in a strange UI
# situation where the checkbox is checked but they see no history
# Note this code is here to migrate users from previous history
# API exchanges in config that are no longer serving histories.
self.set_exchange(self.default_exchange)
return
self.print_error("using exchange", name)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a PyDecimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return PyDecimal(rate)
def format_amount_and_units(self, btc_balance, is_diff=False):
amount_str = self.format_amount(btc_balance, is_diff=is_diff)
return '' if not amount_str else "%s %s" % (amount_str, self.ccy)
def format_amount(self, btc_balance, is_diff=False):
rate = self.exchange_rate()
return '' if rate is None else self.value_str(btc_balance, rate, is_diff=is_diff)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
default_prec = 2
if (base_unit == "cash"):
default_prec = 4
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate, default_prec ), self.ccy )
def value_str(self, satoshis, rate, default_prec=2, is_diff=False):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = PyDecimal(satoshis) / COIN * PyDecimal(rate)
return "%s" % (self.ccy_amount_str(value, True, default_prec, is_diff=is_diff))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return PyDecimal(rate) if rate is not None else None
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
def historical_value(self, satoshis, d_t):
rate = self.history_rate(d_t)
if rate:
return PyDecimal(satoshis) / COIN * PyDecimal(rate)
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
test_error.py | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from threading import Thread
import zmq
from zmq import Again, ContextTerminated, ZMQError, strerror
from zmq.tests import BaseZMQTestCase
class TestZMQError(BaseZMQTestCase):
def test_strerror(self):
"""test that strerror gets the right type."""
for i in range(10):
e = strerror(i)
assert isinstance(e, str)
def test_zmqerror(self):
for errno in range(10):
e = ZMQError(errno)
assert e.errno == errno
assert str(e) == strerror(errno)
def test_again(self):
s = self.context.socket(zmq.REP)
self.assertRaises(Again, s.recv, zmq.NOBLOCK)
self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK)
s.close()
def atest_ctxterm(self):
s = self.context.socket(zmq.REP)
t = Thread(target=self.context.term)
t.start()
self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK)
self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK)
s.close()
t.join()
|
compare_num_layer_w_multiprocessing_sgd.py | import qiskit
import numpy as np
import sys
import multiprocessing
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
def run_w(num_layers, num_qubits):
thetas = np.ones(num_qubits*num_layers*5)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
loss_values_w = []
thetass_w = []
for i in range(0, 400):
if i % 20 == 0:
print('W (' + str(num_layers) + ' layer): ', i)
# G = qtm.fubini_study.calculate_linear_state(qc.copy(), thetas, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_Wchecker_linear,
thetas, num_layers = num_layers)
# grad1 = np.real(np.linalg.inv(G) @ grad_loss)
thetas -= qtm.constant.learning_rate*grad_loss
qc_copy = qtm.nqubit.create_Wchecker_linear(qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values_w.append(loss)
thetass_w.append(thetas.copy())
traces_w, fidelities_w = [], []
for thetas in thetass_w:
# Get |psi> = U_gen|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_linear_state(qc, thetas, num_layers = num_layers)
psi , rho_psi = qtm.base.extract_state(qc)
# Get |psi~> = U_target|000...>
qc1 = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc1 = qtm.nqubit.create_w_state(qc1)
psi_hat , rho_psi_hat = qtm.base.extract_state(qc1)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces_w.append(trace)
fidelities_w.append(fidelity)
print('Writting ...')
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/loss_values_w.csv", loss_values_w, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/thetass_w.csv", thetass_w, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/traces_w.csv", traces_w, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/fidelities_w.csv", fidelities_w, delimiter=",")
if __name__ == "__main__":
# creating thread
num_qubits = 5
num_layers = [1, 2, 3, 4, 5]
t_w = []
for i in num_layers:
t_w.append(multiprocessing.Process(target = run_w, args=(i, num_qubits)))
for i in range(0, len(num_layers)):
t_w[i].start()
for i in range(0, len(num_layers)):
t_w[i].join()
print("Done!") |
graphicsCrawlerDisplay.py | # graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and Pieter
# Abbeel in Spring 2013.
# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
import Tkinter
import qlearningAgents
import time
import threading
import sys
import crawler
#import pendulum
import math
from math import pi as PI
robotType = 'crawler'
class Application:
def sigmoid(self, x):
return 1.0 / (1.0 + 2.0 ** (-x))
def incrementSpeed(self, inc):
self.tickTime *= inc
# self.epsilon = min(1.0, self.epsilon)
# self.epsilon = max(0.0,self.epsilon)
# self.learner.setSpeed(self.epsilon)
self.speed_label['text'] = 'Step Delay: %.5f' % (self.tickTime)
def incrementEpsilon(self, inc):
self.ep += inc
self.epsilon = self.sigmoid(self.ep)
self.learner.setEpsilon(self.epsilon)
self.epsilon_label['text'] = 'Epsilon: %.3f' % (self.epsilon)
def incrementGamma(self, inc):
self.ga += inc
self.gamma = self.sigmoid(self.ga)
self.learner.setDiscount(self.gamma)
self.gamma_label['text'] = 'Discount: %.3f' % (self.gamma)
def incrementAlpha(self, inc):
self.al += inc
self.alpha = self.sigmoid(self.al)
self.learner.setLearningRate(self.alpha)
self.alpha_label['text'] = 'Learning Rate: %.3f' % (self.alpha)
def __initGUI(self, win):
## Window ##
self.win = win
## Initialize Frame ##
win.grid()
self.dec = -.5
self.inc = .5
self.tickTime = 0.1
## Epsilon Button + Label ##
self.setupSpeedButtonAndLabel(win)
self.setupEpsilonButtonAndLabel(win)
## Gamma Button + Label ##
self.setUpGammaButtonAndLabel(win)
## Alpha Button + Label ##
self.setupAlphaButtonAndLabel(win)
## Exit Button ##
#self.exit_button = Tkinter.Button(win,text='Quit', command=self.exit)
#self.exit_button.grid(row=0, column=9)
## Simulation Buttons ##
# self.setupSimulationButtons(win)
## Canvas ##
self.canvas = Tkinter.Canvas(root, height=200, width=1000)
self.canvas.grid(row=2,columnspan=10)
def setupAlphaButtonAndLabel(self, win):
self.alpha_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementAlpha(self.dec)))
self.alpha_minus.grid(row=1, column=3, padx=10)
self.alpha = self.sigmoid(self.al)
self.alpha_label = Tkinter.Label(win, text='Learning Rate: %.3f' % (self.alpha))
self.alpha_label.grid(row=1, column=4)
self.alpha_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementAlpha(self.inc)))
self.alpha_plus.grid(row=1, column=5, padx=10)
def setUpGammaButtonAndLabel(self, win):
self.gamma_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementGamma(self.dec)))
self.gamma_minus.grid(row=1, column=0, padx=10)
self.gamma = self.sigmoid(self.ga)
self.gamma_label = Tkinter.Label(win, text='Discount: %.3f' % (self.gamma))
self.gamma_label.grid(row=1, column=1)
self.gamma_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementGamma(self.inc)))
self.gamma_plus.grid(row=1, column=2, padx=10)
def setupEpsilonButtonAndLabel(self, win):
self.epsilon_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementEpsilon(self.dec)))
self.epsilon_minus.grid(row=0, column=3)
self.epsilon = self.sigmoid(self.ep)
self.epsilon_label = Tkinter.Label(win, text='Epsilon: %.3f' % (self.epsilon))
self.epsilon_label.grid(row=0, column=4)
self.epsilon_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementEpsilon(self.inc)))
self.epsilon_plus.grid(row=0, column=5)
def setupSpeedButtonAndLabel(self, win):
self.speed_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementSpeed(.5)))
self.speed_minus.grid(row=0, column=0)
self.speed_label = Tkinter.Label(win, text='Step Delay: %.5f' % (self.tickTime))
self.speed_label.grid(row=0, column=1)
self.speed_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementSpeed(2)))
self.speed_plus.grid(row=0, column=2)
def skip5kSteps(self):
self.stepsToSkip = 5000
def __init__(self, win):
self.ep = 0
self.ga = 2
self.al = 2
self.stepCount = 0
## Init Gui
self.__initGUI(win)
# Init environment
if robotType == 'crawler':
self.robot = crawler.CrawlingRobot(self.canvas)
self.robotEnvironment = crawler.CrawlingRobotEnvironment(self.robot)
elif robotType == 'pendulum':
self.robot = pendulum.PendulumRobot(self.canvas)
self.robotEnvironment = \
pendulum.PendulumRobotEnvironment(self.robot)
else:
raise "Unknown RobotType"
# Init Agent
simulationFn = lambda agent: \
simulation.SimulationEnvironment(self.robotEnvironment,agent)
actionFn = lambda state: \
self.robotEnvironment.getPossibleActions(state)
self.learner = qlearningAgents.QLearningAgent(actionFn=actionFn)
self.learner.setEpsilon(self.epsilon)
self.learner.setLearningRate(self.alpha)
self.learner.setDiscount(self.gamma)
# Start GUI
self.running = True
self.stopped = False
self.stepsToSkip = 0
self.thread = threading.Thread(target=self.run)
self.thread.start()
def exit(self):
self.running = False
for i in range(5):
if not self.stopped:
time.sleep(0.1)
try:
self.win.destroy()
except:
pass
sys.exit(0)
def step(self):
self.stepCount += 1
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
if len(actions) == 0.0:
self.robotEnvironment.reset()
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
print 'Reset!'
action = self.learner.getAction(state)
if action == None:
raise 'None action returned: Code Not Complete'
nextState, reward = self.robotEnvironment.doAction(action)
self.learner.observeTransition(state, action, nextState, reward)
def animatePolicy(self):
if robotType != 'pendulum':
raise 'Only pendulum can animatePolicy'
totWidth = self.canvas.winfo_reqwidth()
totHeight = self.canvas.winfo_reqheight()
length = 0.48 * min(totWidth, totHeight)
x,y = totWidth-length-30, length+10
angleMin, angleMax = self.robot.getMinAndMaxAngle()
velMin, velMax = self.robot.getMinAndMaxAngleVelocity()
if not 'animatePolicyBox' in dir(self):
self.canvas.create_line(x,y,x+length,y)
self.canvas.create_line(x+length,y,x+length,y-length)
self.canvas.create_line(x+length,y-length,x,y-length)
self.canvas.create_line(x,y-length,x,y)
self.animatePolicyBox = 1
self.canvas.create_text(x+length/2,y+10,text='angle')
self.canvas.create_text(x-30,y-length/2,text='velocity')
self.canvas.create_text(x-60,y-length/4,text='Blue = kickLeft')
self.canvas.create_text(x-60,y-length/4+20,text='Red = kickRight')
self.canvas.create_text(x-60,y-length/4+40,text='White = doNothing')
angleDelta = (angleMax-angleMin) / 100
velDelta = (velMax-velMin) / 100
for i in range(100):
angle = angleMin + i * angleDelta
for j in range(100):
vel = velMin + j * velDelta
state = self.robotEnvironment.getState(angle,vel)
max, argMax = None, None
if not self.learner.seenState(state):
argMax = 'unseen'
else:
for action in ('kickLeft','kickRight','doNothing'):
qVal = self.learner.getQValue(state, action)
if max == None or qVal > max:
max, argMax = qVal, action
if argMax != 'unseen':
if argMax == 'kickLeft':
color = 'blue'
elif argMax == 'kickRight':
color = 'red'
elif argMax == 'doNothing':
color = 'white'
dx = length / 100.0
dy = length / 100.0
x0, y0 = x+i*dx, y-j*dy
self.canvas.create_rectangle(x0,y0,x0+dx,y0+dy,fill=color)
def run(self):
self.stepCount = 0
self.learner.startEpisode()
while True:
minSleep = .01
tm = max(minSleep, self.tickTime)
time.sleep(tm)
self.stepsToSkip = int(tm / self.tickTime) - 1
if not self.running:
self.stopped = True
return
for i in range(self.stepsToSkip):
self.step()
self.stepsToSkip = 0
self.step()
# self.robot.draw()
self.learner.stopEpisode()
def start(self):
self.win.mainloop()
def run():
global root
root = Tkinter.Tk()
root.title( 'Crawler GUI' )
root.resizable( 0, 0 )
# root.mainloop()
app = Application(root)
def update_gui():
app.robot.draw(app.stepCount, app.tickTime)
root.after(10, update_gui)
update_gui()
root.protocol( 'WM_DELETE_WINDOW', app.exit)
try:
app.start()
except:
app.exit()
|
tello_base.py | import socket
import threading
import time
import numpy as np
import libh264decoder
from stats import Stats
class Tello:
"""Wrapper class to interact with the Tello drone."""
def __init__(self, local_ip, local_port, imperial=False, command_timeout=.3, tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip (str): Local IP address to bind.
:param local_port (int): Local port to bind.
:param imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout (int|float): Number of seconds to wait for a response to a command.
:param tello_ip (str): Tello IP.
:param tello_port (int): Tello port.
"""
self.last = False
self.command = "" #for debug
self.abort_flag = False
self.decoder = libh264decoder.H264Decoder()
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.frame = None # numpy array BGR -- current camera output frame
self.is_freeze = False # freeze current camera output
self.last_frame = None
self.log = []
self.MAX_TIME_OUT = 10.0
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream
self.socket_state=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)#state socket
self.tello_ip=tello_ip
self.tello_address = (tello_ip, tello_port)
self.local_video_port = 11111 # port for receiving video stream
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# to receive video -- send cmd: command, streamon
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
self.socket.sendto(b'streamon', self.tello_address)
print ('sent: streamon')
self.socket_video.bind((local_ip, self.local_video_port))
# thread for receiving video
self.receive_video_thread = threading.Thread(target=self._receive_video_thread)
self.receive_video_thread.daemon = True
self.receive_video_thread.start()
#state receive
self.results=None
self.socket_state.bind((local_ip,8890))
self.receive_state_thread=threading.Thread(target=self._recevie_state_thread)
self.receive_state_thread.daemon=True
self.receive_state_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
self.socket_video.close()
self.socket_state.close()
def read_frame(self):
"""Return the last frame from camera."""
if self.is_freeze:
return self.last_frame
else:
return self.frame
def read_state(self):
if self.results=='ok' or self.results==None:
return self.results
else:
return self.results[0:8]
def video_freeze(self, is_freeze=True):
"""Pause video output -- set is_freeze to True"""
self.is_freeze = is_freeze
if is_freeze:
self.last_frame = self.frame
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000)
if len(self.log)!=0:
self.log[-1].add_response(self.response)
#print(self.response)
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets self.frame to the most recent frame Tello captured.
"""
packet_data = ""
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460:
for frame in self._h264_decode(packet_data):
self.frame = frame
packet_data = ""
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _recevie_state_thread(self):
while True:
try:
state, ip = self.socket_state.recvfrom(1024)
out = state.replace(';', ';\n')
self.results = out.split()
#print("received result: " + str(self.results) )
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _h264_decode(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, ls / 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return (str): Response from Tello.
"""
self.log.append(Stats(command, len(self.log)))
print(">> send cmd: {}".format(command))
print(len(self.log),self.log[-1].got_response())
self.socket.sendto(command.encode('utf-8'), self.tello_address)
print(len(self.log),self.log[-1].got_response())
self.last = self.log[-1].got_response()
start = time.time()
#print(self.log[-1].got_response())
timelen = 0.
while True:
if not self.log[-1].got_response():
continue
elif (not self.last) and('ok' in str(self.log[-1].got_response())):
break
elif ('ok' in str(self.last)) and('ok' in str(self.log[-1].got_response())):
self.last = self.log[-1].got_response()
continue
elif 'ok' not in str(self.log[-1].got_response()):
now = time.time()
diff = now - start
if diff > timelen:
print(self.log[-1].got_response())
timelen += 1.
self.socket.sendto(command.encode('utf-8'), self.tello_address)
#print(len(self.log))
if diff > self.MAX_TIME_OUT:
print ('Max timeout exceeded... command %s' % command)
raise Exception('command timeout')
print ('Done!!! sent command: %s to %s' % (command, self.tello_ip))
print (self.log[-1].got_response())
return self.log[-1].got_response()
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
Args:
speed (int|float): Speed.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command('speed %s' % speed)
def rotate_cw(self, degrees):
"""
Rotates clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
def flip(self, direction):
"""
Flips.
Args:
direction (str): Direction to flip, 'l', 'r', 'f', 'b'.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_response(self):
"""
Returns response of tello.
Returns:
int: response of tello.
"""
response = self.response
return response
def get_height(self):
"""Returns height(dm) of tello.
Returns:
int: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""Returns percent battery life remaining.
Returns:
int: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""Returns the number of seconds elapsed during flight.
Returns:
int: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""Returns the current speed.
Returns:
int: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""Initiates landing.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
Args:
direction (str): Direction to move, 'forward', 'back', 'right' or 'left'.
distance (int|float): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""Moves backward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""Moves down for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""Moves forward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""Moves left for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""Moves right for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
"""
return self.move('right', distance)
def move_up(self, distance):
"""Moves up for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
listeners.py | import os.path
import sys
from threading import Thread
from django.conf import settings
from django.db.models import signals as model_signals
try:
from filebrowser.views import filebrowser_post_upload
from filebrowser.views import filebrowser_post_rename
from filebrowser.views import filebrowser_post_delete
from filebrowser.settings import DIRECTORY
FILEBROWSER_PRESENT = True
except ImportError:
FILEBROWSER_PRESENT = False
from linkcheck.models import all_linklists, Url, Link
listeners = []
# a global variable, showing whether linkcheck is still working
still_updating = False
#1, register listeners for the objects that contain Links
for linklist_name, linklist_cls in all_linklists.items():
def check_instance_links(sender, instance, linklist_cls=linklist_cls, **kwargs):
'''
When an object is saved:
new Link/Urls are created, checked
When an object is modified:
new Link/Urls are created, checked
existing Link/Urls are checked
disappering Links are deleted
'''
def do_check_instance_links(sender, instance, linklist_cls=linklist_cls):
try:
still_updating = True
content_type = linklist_cls.content_type()
new_links = []
old_links = Link.objects.filter(content_type=content_type, object_id=instance.pk)
linklists = linklist_cls().get_linklist(extra_filter={'pk':instance.pk,})
if not linklists:
# This object is no longer watched by linkcheck according to object_filter
links = []
else:
linklist = linklists[0]
links = linklist['urls']+linklist['images']
for link in links:
# url structure = (field, link text, url)
url = link[2]
internal_hash = False
if url.startswith('#'):
internal_hash = url
url = instance.get_absolute_url() + url
u, created = Url.objects.get_or_create(url=url)
l, created = Link.objects.get_or_create(url=u, field=link[0], text=link[1], content_type=content_type, object_id=instance.pk)
new_links.append(l.id)
u.still_exists = True
if internal_hash:
setattr(u, '_internal_hash', internal_hash)
setattr(u, '_instance', instance)
u.check()
gone_links = old_links.exclude(id__in=new_links)
gone_links.delete()
finally:
still_updating = False
# Don't run in a separate thread if we are running tests
if len(sys.argv)>1 and sys.argv[1] == 'test' or sys.argv[0] == 'runtests.py':
do_check_instance_links(sender, instance, linklist_cls,)
else:
t = Thread(target=do_check_instance_links, args=(sender, instance, linklist_cls,))
t.start()
listeners.append(check_instance_links)
model_signals.post_save.connect(listeners[-1], sender=linklist_cls.model)
def delete_instance_links(sender, instance, linklist_cls=linklist_cls, **kwargs):
'''delete all its links when an object is deleted'''
content_type = linklist_cls.content_type()
old_links = Link.objects.filter(content_type=content_type, object_id=instance.pk)
old_links.delete()
listeners.append(delete_instance_links)
model_signals.post_delete.connect(listeners[-1], sender=linklist_cls.model)
#2, register listeners for the objects that are targets of Links
def instance_pre_save(sender, instance, ModelCls=linklist_cls.model, **kwargs):
try:
current_url = instance.get_absolute_url()
previous = ModelCls.objects.get(pk=instance.pk)
#log.debug('instance exists modifying')
previous_url = previous.get_absolute_url()
if previous_url == current_url:
#log.debug('url did not change, return')
return
else:
#log.debug('url changed')
old_urls = Url.objects.filter(url__startswith=previous_url)
if old_urls:
old_urls.update(status=False, message='Broken internal link')
new_urls = Url.objects.filter(url__startswith=current_url)
if new_urls:
# mark these urls' status as False, so that post_save will check them
new_urls.update(status=False, message='Should be checked now!')
except:
#log.debug('new instance, post_save is in charge of this')
pass
listeners.append(instance_pre_save)
model_signals.pre_save.connect(listeners[-1], sender=linklist_cls.model)
def instance_post_save(sender, instance, ModelCls=linklist_cls.model, linklist=linklist_cls, **kwargs):
try:
current_url = instance.get_absolute_url()
active = linklist.objects().filter(pk=instance.pk).count()
if kwargs['created'] or (not active):
new_urls = Url.objects.filter(url__startswith=current_url)
else:
new_urls = Url.objects.filter(status=False).filter(url__startswith=current_url)
if new_urls:
for url in new_urls:
url.check()
except:
pass
listeners.append(instance_post_save)
model_signals.post_save.connect(listeners[-1], sender=linklist_cls.model)
def instance_pre_delete(sender, instance, ModelCls=linklist_cls.model, **kwargs):
instance.linkcheck_deleting = True
try:
deleted_url = instance.get_absolute_url()
old_urls = Url.objects.filter(url__startswith=deleted_url).exclude(status=False)
if old_urls:
old_urls.update(status=False, message='Broken internal link')
except:
pass
listeners.append(instance_pre_delete)
model_signals.pre_delete.connect(listeners[-1], sender=linklist_cls.model)
################################################
# Integrate with django-filebrowser if present #
################################################
def handle_upload(sender, path=None, **kwargs):
url = os.path.join(settings.RELATIVE_MEDIA_URL, kwargs['file'].url_relative)
url_qs = Url.objects.filter(url=url).filter(status=False)
count = url_qs.count()
if count:
url_qs.update(status=True, message='Working document link')
msg = "Please note. Uploading %s has corrected %s broken link%s. See the Link Manager for more details" % (url, count, count>1 and 's' or '')
sender.user.message_set.create(message=msg)
def handle_rename(sender, path=None, **kwargs):
old_url = os.path.join(settings.RELATIVE_MEDIA_URL, DIRECTORY, path, kwargs['filename'])
new_url = os.path.join(settings.RELATIVE_MEDIA_URL, DIRECTORY, path, kwargs['new_filename'])
# rename a file will cause the urls to it invalid
# rename a directory will cause the urls to its files invalid
old_url_qs = Url.objects.filter(url=old_url).filter(status=True)
if isdir(kwargs['filename']):
old_url_qs = Url.objects.filter(url__startswith=old_url).filter(status=True)
old_count = old_url_qs.count()
if old_count:
old_url_qs.update(status=False, message='Missing Document')
msg = "Warning. Renaming %s has caused %s link%s to break. Please use the Link Manager to fix them" % (old_url, old_count, old_count>1 and 's' or '')
sender.user.message_set.create(message=msg)
# the new directory may fix some invalid links, so we make a check here.
if isdir(kwargs['new_filename']):
new_count = 0
new_url_qs = Url.objects.filter(url__startswith=new_url).filter(status=False)
for url in new_url_qs:
if url.check():
new_count += 1
else:
new_url_qs = Url.objects.filter(url=new_url).filter(status=False)
new_count = new_url_qs.count()
if new_count:
new_url_qs.update(status=True, message='Working document link')
if new_count:
msg = "Please note. Renaming %s has corrected %s broken link%s. See the Link Manager for more details" % (new_url, new_count, new_count>1 and 's' or '')
sender.user.message_set.create(message=msg)
def handle_delete(sender, path=None, **kwargs):
url = os.path.join(settings.RELATIVE_MEDIA_URL, DIRECTORY, path, kwargs['filename'])
url_qs = Url.objects.filter(url=url).filter(status=True)
count = url_qs.count()
if count:
url_qs.update(status=False, message='Missing Document')
msg = "Warning. Deleting %s has caused %s link%s to break. Please use the Link Manager to fix them" % (url, count, count>1 and 's' or '')
sender.user.message_set.create(message=msg)
if FILEBROWSER_PRESENT:
filebrowser_post_upload.connect(handle_upload)
filebrowser_post_rename.connect(handle_rename)
filebrowser_post_delete.connect(handle_delete)
def isdir(filename):
'''!!!only used for filebrowser'''
if filename.count('.'):
return False
else:
return True
|
job_server.py |
import atexit
import cProfile
import ctypes
import errno
import json
import os
import shutil
import signal
import socket
import sqlite3
import struct
import sys
import threading
import traceback
import tracemalloc
from datetime import datetime
from glob import glob
from importlib import reload
from multiprocessing import Event, Process, Queue
from socketserver import BaseRequestHandler, ThreadingTCPServer
from marshmallow import Schema, fields, missing
from sqlalchemy import (
Boolean, Column, Float, ForeignKey, Integer, String, Text, create_engine,
event, text)
from sqlalchemy.engine import Engine
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from werkzeug.http import HTTP_STATUS_CODES
from . import app, plugins
from .errors import AfterglowError, MissingFieldError
from .errors.job import *
from .models import Job, JobResult, JobState, job_file_dir, job_file_path
from .resources.base import Date, DateTime, JSONType, Time
from .schemas import (
AfterglowSchema, Boolean as BooleanField, Date as DateField,
DateTime as DateTimeField, Float as FloatField, Time as TimeField)
__all__ = ['init_jobs', 'msg_hdr', 'msg_hdr_size']
msg_hdr = '!i'
msg_hdr_size = struct.calcsize(msg_hdr)
# Load job plugins
job_types = plugins.load_plugins('job', 'resources.job_plugins', Job)
JobBase = declarative_base()
class DbJobState(JobBase):
__tablename__ = 'job_states'
id = Column(
ForeignKey('jobs.id', ondelete='CASCADE'), index=True,
primary_key=True)
status = Column(String(16), nullable=False, index=True, default='pending')
created_on = Column(
DateTime, nullable=False, server_default=text('CURRENT_TIMESTAMP'))
completed_on = Column(DateTime)
progress = Column(Float, nullable=False, default=0)
class DbJobResult(JobBase):
__tablename__ = 'job_results'
id = Column(
ForeignKey('jobs.id', ondelete='CASCADE'), index=True,
primary_key=True)
type = Column(String(40), index=True)
errors = Column(JSONType, nullable=False, default=[])
warnings = Column(JSONType, nullable=False, default=[])
__mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': ''}
class DbJobFile(JobBase):
__tablename__ = 'job_files'
id = Column(Integer, primary_key=True, nullable=False)
job_id = Column(
ForeignKey('jobs.id', ondelete='CASCADE'), index=True)
file_id = Column(String(40), nullable=False, index=True)
mimetype = Column(String(40))
headers = Column(JSONType, default=None)
class DbJob(JobBase):
__tablename__ = 'jobs'
id = Column(Integer, primary_key=True, nullable=False)
type = Column(String(40), nullable=False, index=True)
user_id = Column(Integer, index=True)
session_id = Column(Integer, nullable=True, index=True)
state = relationship(DbJobState, backref='job', uselist=False)
result = relationship(
DbJobResult, backref='job', uselist=False,
foreign_keys=DbJobResult.id)
files = relationship(DbJobFile, backref='job')
__mapper_args__ = {'polymorphic_on': type}
db_field_type_mapping = {
fields.Boolean: Boolean,
fields.Date: Date,
fields.DateTime: DateTime,
fields.Decimal: Float,
fields.Dict: JSONType,
fields.Email: Text,
fields.Float: Float,
fields.Integer: Integer,
fields.List: JSONType,
fields.Nested: JSONType,
fields.String: Text,
fields.Time: Time,
fields.TimeDelta: Integer,
fields.UUID: Text,
fields.Url: Text,
BooleanField: Boolean,
DateField: Date,
DateTimeField: DateTime,
FloatField: Float,
TimeField: Time,
}
try:
# noinspection PyUnresolvedReferences
db_field_type_mapping[fields.LocalDateTime] = Text
except AttributeError:
# Newer marshmallow does not have LocalDateTime
pass
def db_from_schema(base_class, schema: AfterglowSchema,
plugin_name: str = None):
"""
Create a subclass of DbJob or DbJobResult for the given job plugin
:param base_class: base db model class
:param schema: job plugin class instance
:param str plugin_name: job plugin name; required for job result classes
:return: new db model class
"""
if schema.__class__ is JobResult:
# Plugin does not define its own result schema; use job_results table
return base_class
if isinstance(schema, Job):
kind = 'jobs'
else:
kind = 'job_results'
if plugin_name is None:
plugin_name = schema.type
# Get job-specific fields that are missing from the base schema and map
# them to SQLAlchemy column types; skip fields that have no db counterpart
base_fields = sum(
[list(c().fields.keys()) for c in schema.__class__.__bases__
if issubclass(c, Schema)], [])
new_fields = [(name, Column(db_field_type_mapping[type(field)],
default=field.default
if field.default != missing else None))
for name, field in schema.fields.items()
if name not in base_fields and
type(field) in db_field_type_mapping]
if not new_fields:
# No extra fields with respect to parent schema; use parent table
return base_class
# Create a subclass with __tablename__ and polymorphic_identity derived
# from the job type ID
return type(
'Db' + schema.__class__.__name__,
(base_class,),
dict(
[
('__tablename__', plugin_name + '_' + kind),
('id', Column(ForeignKey(base_class.__tablename__ + '.id',
ondelete='CASCADE'),
primary_key=True, nullable=False)),
('__mapper_args__', {'polymorphic_identity': plugin_name}),
] + new_fields),
)
# Read/write lock by Fazal Majid
# (http://www.majid.info/mylos/weblog/2004/11/04-1.html)
# updated to support context manager protocol
class RWLock(object):
"""
A simple reader-writer lock Several readers can hold the lock
simultaneously, XOR one writer. Write locks have priority over reads to
prevent write starvation.
"""
def __init__(self):
self.rwlock = 0
self.writers_waiting = 0
self.monitor = threading.Lock()
self.readers_ok = threading.Condition(self.monitor)
self.writers_ok = threading.Condition(self.monitor)
def acquire_read(self):
"""
Acquire a read lock. Several threads can hold this typeof lock.
It is exclusive with write locks.
"""
self.monitor.acquire()
while self.rwlock < 0 or self.writers_waiting:
self.readers_ok.wait()
self.rwlock += 1
self.monitor.release()
return self
def acquire_write(self):
"""
Acquire a write lock. Only one thread can hold this lock, and
only when no read locks are also held.
"""
self.monitor.acquire()
while self.rwlock != 0:
self.writers_waiting += 1
self.writers_ok.wait()
self.writers_waiting -= 1
self.rwlock = -1
self.monitor.release()
return self
def promote(self):
"""
Promote an already-acquired read lock to a write lock
WARNING: it is very easy to deadlock with this method
"""
self.monitor.acquire()
self.rwlock -= 1
while self.rwlock != 0:
self.writers_waiting += 1
self.writers_ok.wait()
self.writers_waiting -= 1
self.rwlock = -1
self.monitor.release()
def demote(self):
"""
Demote an already-acquired write lock to a read lock
"""
self.monitor.acquire()
self.rwlock = 1
self.readers_ok.notifyAll()
self.monitor.release()
def release(self):
"""
Release a lock, whether read or write.
"""
self.monitor.acquire()
if self.rwlock < 0:
self.rwlock = 0
else:
self.rwlock -= 1
wake_writers = self.writers_waiting and self.rwlock == 0
wake_readers = self.writers_waiting == 0
self.monitor.release()
if wake_writers:
self.writers_ok.acquire()
self.writers_ok.notify()
self.writers_ok.release()
elif wake_readers:
self.readers_ok.acquire()
self.readers_ok.notifyAll()
self.readers_ok.release()
def __enter__(self) -> None:
"""
Context manager protocol support, called after acquiring the lock on
either read or write
:return: None
"""
pass
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
"""
Context manager protocol support, called after acquiring the lock on
either read or write
:return: False if exception is raised within the "with" block
"""
self.release()
return exc_type is None
WINDOWS = sys.platform.startswith('win')
class JobWorkerProcess(Process):
"""
Job worker process class
"""
abort_event = None
def __init__(self, job_queue, result_queue):
if WINDOWS:
self.abort_event = Event()
super(JobWorkerProcess, self).__init__(
target=self.body, args=(job_queue, result_queue, self.abort_event))
self.daemon = True
self.start()
def body(self, job_queue, result_queue, abort_event):
"""
Job worker process
:param multiprocessing.Queue job_queue: single-producer
multiple-consumer task queue; holds incoming jobs -- serialized Job
objects
:param multiprocessing.Queue result_queue: multiple-producer
single-consumer result queue; holds job state/result updates
:param multiprocessing.Event abort_event: event object used to cancel
a job; only used on Windows, other systems use OS signals
:return: None
"""
prefix = '[Job worker {}]'.format(os.getpid())
if WINDOWS:
# Start an extra thread waiting for abort event and raising
# KeyboardInterrupt in the main thread context
stop_event = Event()
main_tid = threading.current_thread().ident
def abort_event_listener_body():
while True:
abort_event.wait()
if stop_event.is_set():
break
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(main_tid),
ctypes.py_object(KeyboardInterrupt))
if res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(main_tid), None)
abort_event_listener = threading.Thread(
target=abort_event_listener_body)
abort_event_listener.start()
else:
stop_event = abort_event_listener = None
# Close all possible data file db engine connections inherited from the
# parent process
from .resources import data_files
for engine, session in data_files.data_file_engine.values():
session.close_all()
engine.dispose()
# noinspection PyTypeChecker
reload(data_files)
from . import auth
from .resources import users
users.db.engine.dispose()
# noinspection PyTypeChecker
reload(users)
if app.config.get('AUTH_ENABLED'):
# noinspection PyProtectedMember
users._init_users()
# Memory leak detection support
trace_malloc = app.config.get('JOB_TRACE_MALLOC')
if trace_malloc:
tracemalloc.start()
prev_snapshot = None
# Wait for an incoming job request
app.logger.info('%s Waiting for jobs', prefix)
while True:
job_descr = job = None
# noinspection PyBroadException
try:
job_descr = job_queue.get()
if not job_descr:
# Empty job request = terminate worker
app.logger.info('%s Terminating', prefix)
break
app.logger.debug('%s Got job request: %s', prefix, job_descr)
# Create job object from description; job_descr is guaranteed
# to contain at least type, ID, and user ID, and
# the corresponding job plugin is guaranteed to exist
try:
job = Job(
_queue=result_queue, _set_defaults=True, **job_descr)
except Exception as e:
# Report job creation error to job server
app.logger.warning(
'%s Could not create job', prefix, exc_info=True)
result_queue.put(dict(
id=job_descr['id'],
state=dict(progress=100, status='completed'),
result=dict(errors=[str(e)]),
))
continue
# Set auth.current_user to the actual db user
if job.user_id is not None:
user_session = users.db.create_scoped_session()
try:
auth.current_user = user_session.query(users.DbUser) \
.get(job.user_id)
except Exception:
print(
'!!! User db query error for user ID', job.user_id)
user_session.remove()
raise
if auth.current_user is None:
print('!!! No user for user ID', job.user_id)
auth.current_user = auth.AnonymousUser()
else:
auth.current_user = auth.AnonymousUser()
user_session = None
# Clear the possible cancel request
if WINDOWS:
abort_event.clear()
# Notify the job server that the job is running and run it
result_queue.put(dict(id=job_descr['id'], pid=self.ident))
job.state.status = 'in_progress'
job.update()
try:
if app.config.get('PROFILE'):
# Profile the job if enabled
print('{}\nProfiling job "{}" (ID {})'.format(
'-'*80, job.type, job.id))
cProfile.runctx(
'job.run()', {}, {'job': job}, sort='time')
print('-'*80)
else:
job.run()
except KeyboardInterrupt:
# Job canceled
job.state.status = 'canceled'
except Exception as e:
# Unexpected job exception
job.add_error(e)
finally:
if user_session is not None:
user_session.remove()
# Notify the job server about job completion
if job.state.status != 'canceled':
job.state.status = 'completed'
job.state.progress = 100
job.state.completed_on = datetime.utcnow()
job.update()
result_queue.put(dict(id=None, pid=self.ident))
# Close the possible data file db session
# noinspection PyBroadException
try:
with data_files.data_file_thread_lock:
data_files.data_file_engine[
data_files.get_root(job.user_id), os.getpid()
][1].remove()
except Exception:
pass
except KeyboardInterrupt:
# Ignore interrupt signals occasionally sent before the job has
# started
pass
except Exception:
app.logger.warning(
'%s Internal job queue error', prefix, exc_info=True)
if trace_malloc:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot is not None:
stats = snapshot.compare_to(prev_snapshot, 'lineno')
app.logger.info(
'\n%s: %s -> %s\n%s\n',
prefix, job_descr,
(job.result.errors or 'OK') if job is not None
else 'not started',
'\n'.join(str(l) for l in stats[:5]))
prev_snapshot = snapshot
if WINDOWS:
# Terminate abort event listener
stop_event.set()
abort_event.set()
abort_event_listener.join()
class JobWorkerProcessWrapper(object):
"""
Wrapper class that holds a :class:`JobWorkerProcess` instance and a job ID
currently run by this process
"""
process = None
_job_id_lock = None
_job_id = None
@property
def job_id(self):
"""Currently running job ID"""
with self._job_id_lock.acquire_read():
return self._job_id
@job_id.setter
def job_id(self, value):
with self._job_id_lock.acquire_write():
self._job_id = value
@property
def ident(self):
"""Worker process ID"""
return self.process.ident
def __init__(self, job_queue, result_queue):
self._job_id_lock = RWLock()
self.process = JobWorkerProcess(job_queue, result_queue)
def cancel_current_job(self):
"""
Send abort signal to the current job that is being executed by the
worker process
:return: None
"""
if WINDOWS:
self.process.abort_event.set()
else:
s = signal.SIGINT
try:
# In Python 3, SIGINT is a Signals enum instance
s = s.value
except AttributeError:
pass
# noinspection PyTypeChecker
os.kill(self.ident, s)
def join(self) -> None:
"""
Wait for the worker process completion
"""
self.process.join()
class JobRequestHandler(BaseRequestHandler):
"""
Job TCP server request handler class
"""
# noinspection PyUnresolvedReferences
def handle(self):
server = self.server
# noinspection PyUnresolvedReferences
session = self.server.session_factory()
http_status = 200
try:
msg_len = bytearray()
while len(msg_len) < msg_hdr_size:
msg_len += self.request.recv(msg_hdr_size - len(msg_len))
msg_len = struct.unpack(msg_hdr, msg_len)[0]
msg = bytearray()
while len(msg) < msg_len:
msg += self.request.recv(msg_len - len(msg))
try:
msg = json.loads(msg)
if not isinstance(msg, dict):
raise Exception()
except Exception:
raise JobServerError(reason='JSON dict expected')
try:
method = msg.pop('method').lower()
except Exception:
raise JobServerError(reason='Missing request method')
if method == 'terminate':
# Server shutdown request
self.server.shutdown()
self.server.server_close()
return
try:
resource = msg.pop('resource').lower()
except Exception:
raise JobServerError(reason='Missing resource ID')
try:
user_id = msg['user_id']
except Exception:
raise JobServerError(reason='Missing user ID')
if resource == 'jobs':
if method == 'get':
job_id = msg.get('id')
if job_id is None:
# Return all user's jobs for the given client session;
# hide user id/name and result
result = [
Job(db_job, exclude=['result']).to_dict()
for db_job in session.query(DbJob).filter(
DbJob.user_id == user_id,
DbJob.session_id == msg.get('session_id'))
]
else:
# Return the given job
db_job = session.query(DbJob).get(job_id)
if db_job is None or db_job.user_id != user_id:
raise UnknownJobError(id=job_id)
result = Job(db_job, exclude=['result']).to_dict()
elif method == 'post':
# Submit a job
try:
job_type = msg['type']
except KeyError:
raise MissingFieldError(field='type')
if job_type not in server.db_job_types:
raise UnknownJobTypeError(type=job_type)
# Check that the specified session exists
session_id = msg.get('session_id')
if session_id is not None:
get_session(user_id, session_id)
# Need an extra worker?
with server.pool_lock.acquire_read():
pool_size = len(server.pool)
busy_workers = len(
[p for p in server.pool if p.job_id is not None])
if busy_workers == pool_size:
# All workers are currently busy
if server.max_pool_size and \
pool_size >= server.max_pool_size:
app.logger.warning(
'All job worker processes are busy; '
'consider increasing JOB_POOL_MAX')
else:
app.logger.info(
'Adding one more worker to job pool')
with server.pool_lock.acquire_write():
server.pool.append(JobWorkerProcessWrapper(
server.job_queue, server.result_queue))
try:
# Convert message arguments to polymorphic job model
# and create an appropriate db job class instance
job_args = Job(_set_defaults=True, **msg).to_dict()
del job_args['state'], job_args['result']
db_job = server.db_job_types[job_type](
state=DbJobState(),
result=server.db_job_result_types[job_type](),
**job_args
)
session.add(db_job)
session.flush()
result = Job(db_job).to_dict()
server.job_queue.put(result)
session.commit()
except Exception:
session.rollback()
raise
http_status = 201
elif method == 'delete':
# Delete existing job
try:
job_id = msg['id']
except KeyError:
raise MissingFieldError(field='id')
db_job = session.query(DbJob).get(job_id)
if db_job is None or db_job.user_id != user_id:
raise UnknownJobError(id=job_id)
if db_job.state.status not in ('completed', 'canceled'):
raise CannotDeleteJobError(status=db_job.state.status)
# Delete job files
for jf in db_job.files:
try:
os.unlink(
job_file_path(user_id, job_id, jf.file_id))
except OSError:
pass
try:
session.query(DbJob).filter(DbJob.id == job_id) \
.delete()
session.commit()
except Exception:
session.rollback()
raise
result = ''
http_status = 204
else:
raise InvalidMethodError(
resource=resource, method=method.upper())
elif resource == 'jobs/state':
# Get/update job state
try:
job_id = msg['id']
except KeyError:
raise MissingFieldError(field='id')
db_job = session.query(DbJob).get(job_id)
if db_job is None or db_job.user_id != user_id:
raise UnknownJobError(id=job_id)
if method == 'get':
# Return job state
result = JobState(db_job.state).to_dict()
elif method == 'put':
# Cancel job
status = getattr(
JobState(_set_defaults=True, **msg), 'status', None)
if status is None:
raise MissingFieldError(field='status')
if status != 'canceled':
raise CannotSetJobStatusError(status=msgstatus)
# Find worker process that is currently running the job
if db_job.state.status != 'in_progress':
raise CannotCancelJobError(status=db_job.state.status)
# Send abort signal to worker process running the given
# job. If no such process found, this means that the job
# either has not been dispatched yet or has been already
# completed; do nothing in both cases.
with server.pool_lock.acquire_read():
for p in server.pool:
if p.job_id == job_id:
p.cancel_current_job()
break
# Return the current job state
result = JobState(db_job.state).to_dict()
else:
raise InvalidMethodError(
resource=resource, method=method.upper())
elif resource == 'jobs/result':
if method == 'get':
# Return job result
try:
job_id = msg['id']
except KeyError:
raise MissingFieldError(field='id')
db_job = session.query(DbJob).get(job_id)
if db_job is None or db_job.user_id != user_id:
raise UnknownJobError(id=job_id)
# Deduce the polymorphic job result type from the parent
# job model; add job type info for the /jobs/[id]/result
# view to be able to find the appropriate schema as well
result = job_types[db_job.type].fields['result'].nested(
db_job.result).to_dict()
result['type'] = db_job.type
else:
raise InvalidMethodError(
resource=resource, method=method.upper())
elif resource == 'jobs/result/files':
if method == 'get':
# Return extra job result file data
try:
job_id = msg['id']
except KeyError:
raise MissingFieldError(field='id')
try:
file_id = msg['file_id']
except KeyError:
raise MissingFieldError(field='file_id')
job_file = session.query(DbJobFile).filter_by(
job_id=job_id, file_id=file_id).one_or_none()
if job_file is None or job_file.job.user_id != user_id:
raise UnknownJobFileError(id=file_id)
result = {
'filename': job_file_path(user_id, job_id, file_id),
'mimetype': job_file.mimetype,
'headers': job_file.headers or [],
}
else:
raise InvalidMethodError(
resource=resource, method=method.upper())
else:
raise JobServerError(
reason='Invalid resource "{}"'.format(resource))
except AfterglowError as e:
# Construct JSON error response in the same way as
# errors.afterglow_error_handler()
http_status = int(getattr(e, 'code', 0)) or 400
result = {
'status': HTTP_STATUS_CODES.get(
http_status, '{} Unknown Error'.format(http_status)),
'id': str(getattr(e, 'id', e.__class__.__name__)),
'detail': str(e),
}
meta = getattr(e, 'meta', None)
if meta:
result['meta'] = dict(meta)
if http_status == 500:
result.setdefault('meta', {})['traceback'] = \
traceback.format_tb(sys.exc_info()[-1]),
except Exception as e:
# Wrap other exceptions in JobServerError
# noinspection PyUnresolvedReferences
http_status = JobServerError.code
result = {
'status': HTTP_STATUS_CODES[http_status],
'id': e.__class__.__name__,
'detail': str(e),
'meta': {'traceback': traceback.format_tb(sys.exc_info()[-1])},
}
# noinspection PyBroadException
try:
session.remove()
except Exception:
pass
# Format response message and send back to Flask
msg = {'json': result, 'status': http_status}
# noinspection PyBroadException
try:
msg = json.dumps(msg).encode('utf8')
self.request.sendall(struct.pack(msg_hdr, len(msg)) + msg)
except Exception:
# noinspection PyBroadException
try:
app.logger.warning(
'Error sending job server response', exc_info=True)
except Exception:
pass
def job_server(notify_queue):
"""
Main job server process
:param multiprocessing.Queue notify_queue: queue used to send messages to
the main process on the job server process initialization and errors
:return: None
"""
# Create sync structures
job_queue = Queue()
result_queue = Queue()
terminate_listener_event = threading.Event()
state_update_listener = None
# Start TCP server, listen on the configured port
try:
tcp_server = ThreadingTCPServer(
('localhost', app.config['JOB_SERVER_PORT']), JobRequestHandler)
except Exception as e:
notify_queue.put(('exception', e))
return
tcp_server.job_queue = job_queue
tcp_server.result_queue = result_queue
app.logger.info(
'Started Afterglow job server on port %d, pid %d',
app.config['JOB_SERVER_PORT'], os.getpid())
# Initialize worker process pool
min_pool_size = app.config.get('JOB_POOL_MIN', 1)
max_pool_size = app.config.get('JOB_POOL_MAX', 16)
if min_pool_size > 0:
pool = [JobWorkerProcessWrapper(job_queue, result_queue)
for _ in range(min_pool_size)]
else:
pool = []
pool_lock = RWLock()
tcp_server.min_pool_size = min_pool_size
tcp_server.max_pool_size = max_pool_size
tcp_server.pool = pool
tcp_server.pool_lock = pool_lock
app.logger.info(
'Started %d job worker process%s', min_pool_size,
'' if min_pool_size == 1 else 'es')
try:
# Initialize job database
# Create DbJob and DbJobResult subclasses for each job type based on
# schema fields
db_job_types, db_job_result_types = {}, {}
for job_type, job_schema in job_types.items():
db_job_types[job_type] = db_from_schema(
DbJob, job_schema)
db_job_result_types[job_type] = db_from_schema(
DbJobResult, job_schema.fields['result'].nested(),
job_schema.type)
# Enable foreign keys in sqlite; required for ON DELETE CASCADE to work
# when deleting jobs; set journal mode to WAL to allow concurrent
# access from multiple Apache processes
@event.listens_for(Engine, 'connect')
def set_sqlite_pragma(dbapi_connection, _):
if isinstance(dbapi_connection, sqlite3.Connection):
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.execute('PRAGMA journal_mode=WAL')
cursor.close()
# Recreate the job db on startup; also erase shared memory and journal
# files
db_path = os.path.join(
os.path.abspath(app.config['DATA_ROOT']), 'jobs.db')
for fp in glob(db_path + '*'):
try:
os.remove(fp)
except OSError:
pass
engine = create_engine(
'sqlite:///{}'.format(db_path),
connect_args={'check_same_thread': False, 'isolation_level': None,
'timeout': 10},
)
JobBase.metadata.create_all(bind=engine)
session_factory = scoped_session(sessionmaker(bind=engine))
# Erase old job files
try:
shutil.rmtree(job_file_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Listen for job state updates in a separate thread
def state_update_listener_body():
"""
Thread that listens for job state/result updates from worker
processes and updates the corresponding database tables
:return: None
"""
while not terminate_listener_event.is_set():
msg = result_queue.get()
if not msg:
continue
if not isinstance(msg, dict) or 'id' not in msg or \
'state' in msg and \
not isinstance(msg['state'], dict) or \
'result' in msg and \
not isinstance(msg['result'], dict):
app.logger.warning(
'Job state listener got unexpected message "%s"', msg)
continue
job_id = msg['id']
job_state = msg.get('state', {})
job_result = msg.get('result', {})
job_pid = msg.get('pid')
job_file = msg.get('file')
if job_pid is not None:
# Worker process assignment message
found = False
with pool_lock.acquire_read():
for _p in pool:
if _p.ident == job_pid:
_p.job_id = job_id
found = True
break
if not found:
app.logger.warning(
'Job state listener got a job assignment message '
'for non-existent worker process %s', job_pid)
continue
sess = session_factory()
try:
if job_file is not None:
# Job file creation message
# noinspection PyBroadException
try:
sess.add(DbJobFile(
job_id=job_id,
file_id=job_file['id'],
mimetype=job_file.get('mimetype'),
headers=job_file.get('headers')))
sess.commit()
except Exception:
sess.rollback()
app.logger.warning(
'Could not add job file "%s" to database',
exc_info=True)
continue
if not job_state and not job_result:
# Empty message, nothing to do
continue
job = sess.query(DbJob).get(job_id)
if job is None:
# State update for a job that was already deleted;
# silently ignore
continue
# noinspection PyBroadException
try:
# Update job state
for name, val in job_state.items():
setattr(job.state, name, val)
# Update job result
for name, val in job_result.items():
setattr(job.result, name, val)
sess.commit()
except Exception:
sess.rollback()
app.logger.warning(
'Could not update job state/result "%s"',
msg, exc_info=True)
finally:
sess.close()
state_update_listener = threading.Thread(
target=state_update_listener_body)
state_update_listener.start()
# Set TCP server attrs related to job database
tcp_server.db_job_types = db_job_types
tcp_server.db_job_result_types = db_job_result_types
tcp_server.session_factory = session_factory
# Send the actual port number to the main process
notify_queue.put(('success', tcp_server.server_address[1]))
app.logger.info('Afterglow job server initialized')
# Serve job resource requests until requested to terminate
tcp_server.serve_forever()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
# Make sure the main process receives at least an error message if job
# server process initialization failed
notify_queue.put(('exception', e))
app.logger.warning('Error in job server process', exc_info=True)
finally:
# Stop all worker processes
with pool_lock.acquire_write():
for _ in range(len(pool)):
job_queue.put(None)
for p in pool:
p.join()
# Shut down state update listener
result_queue.put(None)
terminate_listener_event.set()
if state_update_listener is not None:
state_update_listener.join()
app.logger.info('Job server terminated')
def init_jobs():
"""
Initialize the job subsystem
:return: None
"""
# Start job server process
notify_queue = Queue()
p = Process(target=job_server, args=(notify_queue,))
p.start()
# Wait for initialization
response = notify_queue.get()
if response[0] == 'exception':
if isinstance(response[1], OSError) and \
response[1].errno == errno.EADDRINUSE:
# Address already in use -- means that the job server has been
# started by another WSGI process
p.join()
return
# Other job server initialization error
p.join()
raise response[1]
# Request job server to terminate on Flask shutdown
def terminate_server():
# noinspection PyBroadException
try:
msg = json.dumps(dict(method='terminate')).encode('ascii')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', app.config['JOB_SERVER_PORT']))
try:
sock.sendall(struct.pack(msg_hdr, len(msg)) + msg)
finally:
sock.close()
p.join()
except Exception:
pass
atexit.register(terminate_server)
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9665
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
heartmates.py | # PennApps
from flask import Flask, jsonify, render_template, request
import csv
import json
import requests
from requests.auth import HTTPBasicAuth
from threading import Thread
app = Flask(__name__)
@app.route('/')
def home():
'''This is what you will see if you go to http://127.0.0.1:5000'''
return render_template('main.html');
# @app.route('/nutrition_facts', methods=['POST'])
@app.route('/nutrition_facts/', methods=['POST'])
def nutrition_facts():
# Get data from post request
items = json.loads(request.data)['items']
resps = ['' for x in xrange(len(items))]
# Set up session
FE_APIKey = 'n54k3jg9cpkwzvaam4vavc7c'
params = {'uid': 'user1', 'devid':'device1',
'appide': 'heartmates', 'f':'json',
'api_key': FE_APIKey}
url_create = 'http://api.foodessentials.com/createsession'
resp = requests.get(url_create, params=params)
sid = resp.json()['session_id']
# Set profile
# API requires entire JSON to be sent to set up profile
prof_json = '{"session_id":' + str(sid) + ',"nutrients":[{"name":"Calcium","value":"true"},{"name":"Calories","value":"true"},{"name":"Calories from Fat","value":"false"},{"name":"Cholesterol","value":"false"},{"name":"Dietary Fiber","value":"true"},{"name":"Insoluble Fiber","value":"true"},{"name":"Iron","value":"true"},{"name":"Monounsaturated Fat","value":"true"},{"name":"Other Carbohydrate","value":"true"},{"name":"Polyunsaturated Fat","value":"true"},{"name":"Potassium","value":"true"},{"name":"Protein","value":"true"},{"name":"Saturated Fat","value":"false"},{"name":"Saturated Fat Calories","value":"false"},{"name":"Sodium","value":"false"},{"name":"Soluble Fiber","value":"true"},{"name":"Sugar Alcohol","value":"false"},{"name":"Sugars","value":"false"},{"name":"Total Carbohydrate","value":"true"},{"name":"Total Fat","value":"true"},{"name":"Vitamin A","value":"true"},{"name":"Vitamin C","value":"true"}],"allergens":[{"name":"Cereals","value":"false"},{"name":"Coconut","value":"false"},{"name":"Corn","value":"false"},{"name":"Egg","value":"true"},{"name":"Fish","value":"false"},{"name":"Gluten","value":"false"},{"name":"Lactose","value":"true"},{"name":"Milk","value":"true"},{"name":"Peanuts","value":"false"},{"name":"Sesame Seeds","value":"false"},{"name":"Shellfish","value":"false"},{"name":"Soybean","value":"false"},{"name":"Sulfites","value":"false"},{"name":"Tree Nuts","value":"false"},{"name":"Wheat","value":"false"}],"additives":[{"name":"Acidity Regulator","value":"true"},{"name":"Added Sugar","value":"false"},{"name":"Anti-Caking Agents","value":"true"},{"name":"Anti-Foaming Agent","value":"true"},{"name":"Antioxidants","value":"true"},{"name":"Artificial Color","value":"true"},{"name":"Artificial Flavoring Agent","value":"true"},{"name":"Artificial Preservative","value":"true"},{"name":"Bulking Agents","value":"true"},{"name":"Colors","value":"true"},{"name":"Emulsifiers","value":"true"},{"name":"Enzyme","value":"true"},{"name":"Firming Agent","value":"true"},{"name":"Flavor Enhancer","value":"true"},{"name":"Flour Treating Agent","value":"true"},{"name":"Food Acids","value":"true"},{"name":"Gelling Agents","value":"true"},{"name":"Glazing Agent","value":"true"},{"name":"Humectants","value":"true"},{"name":"Leavening Agent","value":"true"},{"name":"Mineral Salt","value":"true"},{"name":"Natural Color","value":"true"},{"name":"Natural Flavoring Agent","value":"true"},{"name":"Natural Preservative","value":"true"},{"name":"Preservatives","value":"true"},{"name":"Propellant","value":"true"},{"name":"Raising Agents","value":"true"},{"name":"Saturated Fat","value":"false"},{"name":"Sequestrant","value":"true"},{"name":"Stabilizers","value":"true"},{"name":"Sweeteners","value":"true"},{"name":"Thickeners","value":"true"},{"name":"Trans Fat","value":"true"},{"name":"Unsaturated Fat","value":"true"},{"name":"Vegetable Gum","value":"true"}],"myingredients":[],"mysort":[{"sort_variable":"Calories","sort_order":1,"variable_type":1}]}'
params = {'json': prof_json, 'api_key': FE_APIKey}
url_setprof = 'http://api.foodessentials.com/setprofile'
resp = requests.post(url_setprof, params=params)
# Get product info
url_product = 'http://api.foodessentials.com/productscore'
# Practice UPC values
UPC_list = {'Bacon' : '093966004656',
'Broccoli': '032601025090',
'Extra Firm Tofu' : '076371011075',
'King Arthur Flour' : '030000012031',
'Prairie Farms Milk' : '041234639642',
'Quaker Steel Cut Oats' : '021908453361',
'Sliced Peaches' :'024000167136',
'Extra Virgin Oil' : '634039000016',
'Kendall Brooke Salmon' : '15078'}
# Multithreaded API calls to get item info, fully scalable
threads = []
for num in xrange(len(items)):
params = {'u': UPC_list[items[num]], 'sid':sid, 'f':'json','api_key':FE_APIKey}
threads.append(Thread(target=get_request, args=(params, num, resps)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Scrape product score from response JSON and return flattened JSON of all product scores
scores = [str(resp.json()['product']['productscore']) for resp in resps]
return json.dumps(dict(zip(items,scores)))
def get_request(params, num, resps):
url_product = 'http://api.foodessentials.com/productscore'
resps[num] = requests.get(url_product, params=params)
@app.route('/postmates_delivery/<dropoff_address>/')
def postmates_delivery(dropoff_address):
# Preparing API call
PM_Test_APIKey = 'd184ecab-5f46-42fd-bbfc-28b73b88cf4e'
PM_cust_id = 'cus_KAay_YCGWhyi_k'
url = 'https://api.postmates.com'
url_delivery = url + '/v1/customers/' + PM_cust_id + '/delivery_quotes'
headers = {'user': 'd184ecab-5f46-42fd-bbfc-28b73b88cf4e'}
data = {'pickup_address': '2001 Pennsylvania Avenue Philadelphia, PA 19130',
'dropoff_address': dropoff_address}
# Sending API request
resp = requests.post(url_delivery, data=data, auth=HTTPBasicAuth('d184ecab-5f46-42fd-bbfc-28b73b88cf4e', ''))
# Parsing API response
rj = resp.json()
c_vals = (rj['created'].lstrip('0123456789-').lstrip('T').rstrip('Z')).split(':')
eta_vals = (rj['dropoff_eta'].lstrip('0123456789-').lstrip('T').rstrip('Z')).split(':')
fee = ('$'+str(int(rj['fee'])/100.0)+'0')[:6]
# Return fee, request time, and expected delivery time (eta)
return str(json.dumps({'fee': fee, 'created':str(int(c_vals[0]) - 5) +':'+ c_vals[1] +':'+ c_vals[2], 'eta': str(int(eta_vals[0]) - 5) +':'+ eta_vals[1] +':'+ eta_vals[2]}))
if __name__ == '__main__':
app.run(debug=True)
|
test_socket.py | import io
import time
import unittest
from unittest import mock
import pytest
from engineio import exceptions
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def setUp(self):
self.bg_tasks = []
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.2
mock_server.ping_interval = 0.2
mock_server.ping_interval_grace_period = 0.001
mock_server.async_handlers = True
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server._async = {
'threading': threading.Thread,
'queue': queue.Queue,
'websocket': None,
}
def bg_task(target, *args, **kwargs):
th = threading.Thread(target=target, args=args, kwargs=kwargs)
self.bg_tasks.append(th)
th.start()
return th
def create_queue(*args, **kwargs):
return queue.Queue(*args, **kwargs)
mock_server.start_background_task = bg_task
mock_server.create_queue = create_queue
mock_server.get_queue_empty_exception.return_value = queue.Empty
return mock_server
def _join_bg_tasks(self):
for task in self.bg_tasks:
task.join()
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
assert s.server == mock_server
assert s.sid == 'sid'
assert not s.upgraded
assert not s.closed
assert hasattr(s.queue, 'get')
assert hasattr(s.queue, 'put')
assert hasattr(s.queue, 'task_done')
assert hasattr(s.queue, 'join')
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
with pytest.raises(exceptions.QueueEmpty):
s.poll()
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
assert s.poll() == [pkt1, pkt2]
def test_poll_none(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.put(None)
assert s.poll() == []
def test_poll_none_after_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt = packet.Packet(packet.MESSAGE, data='hello')
s.send(pkt)
s.queue.put(None)
assert s.poll() == [pkt]
assert s.poll() == []
def test_schedule_ping(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = 0.01
s = socket.Socket(mock_server, 'sid')
s.send = mock.MagicMock()
s.schedule_ping()
time.sleep(0.05)
assert s.last_ping is not None
assert s.send.call_args_list[0][0][0].encode() == '2'
def test_schedule_ping_closed_socket(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = 0.01
s = socket.Socket(mock_server, 'sid')
s.send = mock.MagicMock()
s.closed = True
s.schedule_ping()
time.sleep(0.05)
assert s.last_ping is None
s.send.assert_not_called()
def test_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.schedule_ping = mock.MagicMock()
s.receive(packet.Packet(packet.PONG))
s.schedule_ping.assert_called_once_with()
def test_message_async_handler(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with(
'message', 'sid', 'foo', run_async=True
)
def test_message_sync_handler(self):
mock_server = self._get_mock_server()
mock_server.async_handlers = False
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with(
'message', 'sid', 'foo', run_async=False
)
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
with pytest.raises(exceptions.UnknownPacketError):
s.receive(packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = 6
mock_server.ping_interval_grace_period = 2
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 9
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=False)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
assert packets == [pkt1, pkt2]
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
with pytest.raises(exceptions.QueueEmpty):
s.handle_get_request(environ, start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode().encode('utf-8')
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': io.BytesIO(p),
}
s.handle_post_request(environ)
assert s.receive.call_count == 2
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode().encode('utf-8')
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': io.BytesIO(p),
}
with pytest.raises(exceptions.ContentTooLongError):
s.handle_post_request(environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket',
}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
mock_ws = mock.MagicMock()
mock_server._async['websocket'].return_value = mock_ws
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server._async['websocket'].assert_called_once_with(
s._websocket_handler
)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.upgraded = True
environ = "foo"
start_response = "bar"
with pytest.raises(IOError):
s._upgrade_websocket(environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
assert len(r) == 1
assert r[0].encode() == packet.Packet(packet.NOOP).encode()
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode()
s._websocket_handler(ws)
assert not s.upgraded
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = 'probe'
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(),
packet.Packet(packet.NOOP).encode(),
]
s._websocket_handler(ws)
ws.send.assert_called_once_with(
packet.Packet(packet.PONG, data=probe).encode()
)
assert s.queue.get().packet_type == packet.NOOP
assert not s.upgraded
def test_close_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.close = mock.MagicMock()
s.receive(packet.Packet(packet.CLOSE))
s.close.assert_called_once_with(wait=False, abort=True)
def test_invalid_packet_type(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt = packet.Packet(packet_type=99)
with pytest.raises(exceptions.UnknownPacketError):
s.receive(pkt)
def test_upgrade_not_supported(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = None
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server._bad_request.assert_called_once_with()
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = 'foo'
bar = 'bar'
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.connected
assert s.upgraded
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', 'foo', run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
foo = 'foo'
bar = 'bar'
probe = 'probe'
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(),
packet.Packet(packet.UPGRADE).encode(),
packet.Packet(packet.MESSAGE, data=foo).encode(),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', 'foo', run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_with_payload(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
probe = 'probe'
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(),
packet.Packet(packet.UPGRADE, data='2').encode(),
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
def test_websocket_upgrade_with_backlog(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
probe = 'probe'
foo = 'foo'
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(),
packet.Packet(packet.UPGRADE, data='2').encode(),
]
s.upgrading = True
s.send(packet.Packet(packet.MESSAGE, data=foo))
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=sid'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
assert len(packets) == 1
assert packets[0].encode() == '6'
packets = s.poll()
assert len(packets) == 1
assert packets[0].encode() == '4foo'
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
assert not s.upgrading
packets = s.handle_get_request(environ, start_response)
assert len(packets) == 1
assert packets[0].encode() == '6'
def test_websocket_read_write_wait_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = 'foo'
bar = 'bar'
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(),
RuntimeError,
]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.closed
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = 'foo'
bar = 'bar'
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.OPEN).encode(),
packet.Packet(packet.MESSAGE, data=foo).encode(),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.connected
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', foo, run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
with pytest.raises(exceptions.SocketIsClosedError):
s.send(packet.Packet(packet.NOOP))
def test_close_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
assert s.closed
assert mock_server._trigger_event.call_count == 1
mock_server._trigger_event.assert_called_once_with(
'disconnect', 'sid', run_async=False
)
s.close()
assert mock_server._trigger_event.call_count == 1
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
assert s.queue.join.call_count == 0
|
utils.py | from PIL import Image
import os
import shutil
import pandas as pd
import threading
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
def load_image(label, path, q=None):
"""
label in here is a real number.
load image and put in a Queue.
"""
img = load_img(path)
arr = img_to_array(img)
arr = np.reshape(arr, (1,)+arr.shape)
data = {'y': label, 'data':arr}
if q:
q.put(data)
q.task_done()
return arr
def ImgProcess(file, outdir, size):
img = Image.open(file['Files'])
oldsize = img.size
new_img = Image.new('RGB', (oldsize[1], oldsize[1]), (255,255,255))
new_img.paste(img)
# new_img = new_img.resize(size)
new_img.thumbnail(size, Image.ANTIALIAS)
new_img.save(os.path.join(outdir, file['Files'].split('/')[-1]))
def CropAndResize(indir, output_size, label):
"""
padding image to become squared image then size to the desire size.
"""
# if label:
label = pd.read_csv(label)
outlable = pd.DataFrame(label)
outdir = indir+'_'+str.replace(str(output_size), ' ', '')
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
for index, row in label.iterrows():
label['Files'][index] = str(os.path.join(outdir, row['Files'].split('/')[-1]))
t = threading.Thread(target=ImgProcess, args=(row,outdir,output_size,))
t.start()
size = str(output_size)
size = str.replace(size, ' ', '')
outlable.to_csv(os.path.join(outdir, indir.split('/')[-1]+
size)+'.csv')
print "DONE!"
# files_name = []
# for f in os.listdir(in_dir):
# if f.endswith('.jpg'):
# CropAndResize('./SCUT_FBP', (227, 227), './SCUT_FBP.csv')
|
main.py | import sys
sys.path.append('./personalized-instagram-scraper/')
from igramscraper.instagram import Instagram
import argparse
import pandas as pd
import threading
from time import sleep
from igramscraper.exception import *
def instagram_login(username: str, password: str):
"""
This is a function that permit the login
:param username: the instagram username
:param password: the instagram password
:return: the instagram object with logged in information
"""
instagram = Instagram()
instagram.with_credentials(username, password, './cache/')
instagram.login()
return instagram
def download_image(hashtag, medias):
"""
:param hashtag: is the hashtag
:param medias: is the list of media associated to the hashtag
:return:
"""
import urllib
import os
os.makedirs(os.path.join('.', hashtag), exist_ok=True) # create the folder for the hastag if not exist
print('Start download')
counter = 0
for media in medias:
# get the image in the post
high_resolution_url = media.image_high_resolution_url
low_resolution_url = media.image_low_resolution_url
standard_resolution_url = media.image_standard_resolution_url
identifier = media.identifier # save the instagram identifier
short_code = media.short_code # save the instagram code
if high_resolution_url is not None:
open_url = urllib.request.urlopen(high_resolution_url) # opening the url of the image
with open(os.path.join('.', hashtag, 'hq-' + identifier + '-' + short_code + '.jpg'), 'wb') as output:
# opening a binary object and save the image from the url content
output.write(open_url.read())
if low_resolution_url is not None:
open_url = urllib.request.urlopen(low_resolution_url)
with open(os.path.join('.', hashtag, 'lw-' + identifier + '-' + short_code + '.jpg'), 'wb') as output:
output.write(open_url.read())
if standard_resolution_url is not None:
open_url = urllib.request.urlopen(standard_resolution_url)
with open(os.path.join('.', hashtag, 'st-' + identifier + '-' + short_code + '.jpg'), 'wb') as output:
output.write(open_url.read())
counter += 1
print(f'Downloaded {counter} images')
def get_posts_by_hashtag(instagram, hashtag, number_of_post=200):
"""
This function use the list of hashtag to create a list of media and after start download in a parallel processes
:param instagram: list of instances or a single instance of Instagram with logged in
:param hashtag: list or single hashtag
:return: tuple
"""
total_number_of_post = 0
thread: list = []
if isinstance(instagram, list) and isinstance(hashtag, list):
# There the arguments are lists and can be created different thread to get data
hashtag_count = 0
while (hashtag_count < len(hashtag)):
for index, ig in enumerate(instagram):
if index + hashtag_count >= len(hashtag):
break
if hashtag_count != 0 and hashtag_count >= len(instagram):
print('Index: {}'.format(index + hashtag_count))
print(f'Hashtag count: {hashtag_count}')
keyword = hashtag[index + hashtag_count]
else:
print(f'Index: {index}')
keyword = hashtag[index]
print('Actual Hashtag: {}'.format(keyword['hashtag']))
print('Account: {}'.format(ig.session_username))
try:
# Check if the hashtag contains posts
# Check the latest data downloaded
if keyword['min_time'] == 0 and keyword['max_time'] == 0:
medias = ig.get_medias_by_tag(keyword['hashtag'], count=number_of_post)
elif keyword['max_time'] == 0:
medias = ig.get_medias_by_tag(keyword['hashtag'], count=number_of_post,
min_timestamp=keyword['min_time'])
elif keyword['min_time'] == 0:
medias = ig.get_medias_by_tag(keyword['hashtag'], count=number_of_post,
max_timestamp=keyword['max_time'])
else:
medias = ig.get_medias_by_tag(keyword['hashtag'], count=number_of_post,
max_timestamp=keyword['max_time'],
min_timestamp=keyword['min_time'])
if len(medias) == 0:
print('Hashtag not found')
hashtag_count += 1
continue
print('Get max_time and min_time for the hashtag')
# get the max_timestamp, in the first position of media and get the min_timestamp in the last position of medias
last_media = medias[0]
old_media = medias[-1]
if keyword['max_time'] < last_media.created_time:
keyword['max_time'] = last_media.created_time
if keyword['min_time'] > old_media.created_time or keyword['min_time'] == 0:
keyword['min_time'] = old_media.created_time
thread.append(threading.Thread(target=download_image, args=(keyword['hashtag'], medias)))
thread[-1].start()
total_number_of_post += len(medias)
keyword['count'] += len(medias)
print('Incremento hashtag_count')
hashtag_count += 1
sleep(500) # After a Request of number_of_post sleep for 500 seconds to simulate human interaction
except InstagramException:
# if the hashtag does not contain images or there is a problem with the connection an Instagram Exception is generated
print('Instagram Problem')
hashtag_count += 1
continue
except:
return hashtag, thread, total_number_of_post
if index + hashtag_count > len(hashtag):
print('il prossimo index è oltre il numero di hashtag')
break
elif isinstance(instagram, Instagram) and isinstance(hashtag, list):
for tag in hashtag:
if tag['min_time'] == 0 and tag['max_time'] == 0:
medias = instagram.get_medias_by_tag(tag['hashtag'], count=number_of_post)
elif tag['max_time'] == 0:
medias = instagram.get_medias_by_tag(tag['hashtag'], count=number_of_post,
min_timestamp=tag['min_time'])
elif tag['min_time'] == 0:
medias = instagram.get_medias_by_tag(tag['hashtag'], count=number_of_post,
max_timestamp=tag['max_time'])
else:
medias = instagram.get_medias_by_tag(tag['hashtag'], count=number_of_post,
max_timestamp=tag['max_time'],
min_timestamp=tag['min_time'])
# get the max_timestamp, in the first position of media and get the min_timestamp in the last position of medias
last_media = medias[0]
old_media = medias[-1]
if tag['max_time'] < last_media.created_time:
tag['max_time'] = last_media.created_time
if tag['min_time'] > old_media.created_time:
tag['max_time'] = old_media.created_time
thread.append(threading.Thread(target=download_image, args=(tag['hashtag'], medias)))
thread[-1].start()
total_number_of_post += len(medias)
sleep(500)
elif isinstance(instagram, Instagram) and isinstance(hashtag, str):
medias = instagram.get_medias_by_tag(hashtag, count=number_of_post)
thread.append(threading.Thread(target=download_image, args=(hashtag, medias)))
thread[-1].start()
total_number_of_post += len(medias)
return hashtag, thread, total_number_of_post
# adding parsing
parser = argparse.ArgumentParser()
parser.add_argument('--account_list', help='the instagram accounts list')
parser.add_argument('--login_username', help='The username to be used for login')
parser.add_argument('--login_password', help='The username password')
parser.add_argument('--hashtag_list', help='the file that contains the list of hashtags')
parser.add_argument('--hashtag', help='The hashtag used to download data')
parser.add_argument('--total_number_of_post', help='The total number of posts to be download')
parser.add_argument('--session_number_of_post', help='The number of posts to get for each request')
args = parser.parse_args()
# Check input:
if (args.account_list is None) and ((args.login_username is None) or (args.login_password is None)):
raise Exception('To work need accounts list or a single account')
if (args.hashtag_list is None) and (args.hashtag is None):
raise Exception('To work need hashtags list or a single hashtag')
if __name__ == '__main__':
if args.account_list is not None:
try:
ig_accounts = pd.read_csv(args.account_list, sep=',', names=['ACCOUNT', 'PW'], header=0)
instagram: list = []
for index, row in ig_accounts.iterrows():
try:
instagram.append(instagram_login(row['ACCOUNT'], row['PW']))
except InstagramAuthException:
print('Account {} has login problem proceed to next account'.format(row['ACCOUNT']))
print(InstagramAuthException)
continue
except FileNotFoundError:
raise FileNotFoundError('File not found')
except:
print('Error')
else:
instagram = instagram_login(str(args.login_username), str(args.login_password))
if args.hashtag_list is not None:
try:
ig_hashtag = pd.read_csv(args.hashtag_list, sep=',', names=['hashtag', 'min_time', 'max_time', 'count'],
header=0)
hashtag: list = []
for index, row in ig_hashtag.iterrows():
hashtag.append(
{'hashtag': str(row['hashtag']), 'min_time': int(row['min_time']), 'max_time': int(row['max_time']),
'count': int(row['count'])})
except FileNotFoundError:
raise FileNotFoundError('File not found')
except:
print('Error')
else:
hashtag = str(args.hashtag)
total_number_of_post = 0
"""
Here a session is the single interation of the while that means is the number of downloaded posts for each hashtag
after one request
The end of the download is caused by the reached number of the total_number_of_post.
"""
while (total_number_of_post < int(args.total_number_of_post)):
update_hashtag, thread, session_number_of_post = get_posts_by_hashtag(instagram, hashtag,
number_of_post=
int(args.session_number_of_post))
# update hashtag_file
if args.hashtag_list is not None and isinstance(update_hashtag, list):
ig_hashtag = pd.DataFrame.from_dict(update_hashtag, orient='columns')
ig_hashtag.to_csv(str(args.hashtag_list), sep=',')
elif isinstance(update_hashtag, list):
ig_hashtag.to_csv('./hashtag_list.csv', sep=',')
if len(thread) == 0:
print('No Download Created')
continue
else:
# wait for download completion
print('Attendo completamento')
for th in thread:
th.join()
total_number_of_post += session_number_of_post * int(len(hashtag))
hashtag = update_hashtag # update hashtag content to continue
sleep(100) |
tcpserver.py | import socket
import threading
class TCPserver():
def __init__(self,ip,port):
self.server_ip=ip
self.server_port=port
def Server(self):
#creating server
server = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
server.bind((self.server_ip,self.server_port))
#start listening from server
server.listen(5)
print(f'Server is liestening on {self.server_ip} : {self.server_port}')
while True:
#connection accept
client , address =server.accept()
print(f'Accepted connection from {address}:')
#transmit data and recive
client_handler= threading.Thread(target=self.handle_client,args=(client,))
client_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
receive_data=sock.recv(1024)
print(f'Server Received:{receive_data.decode("utf-8")}')
html=open('index.html').read()
tosend = ('HTTP/1.0 200 OK\n\n '+html).encode()
sock.send(tosend)
if __name__=="__main__":
server_ip='localhost'
server_port=9999
tcpServer=TCPserver(server_ip,server_port)
tcpServer.Server()
|
netdjango_connect_processes.py | #!/usr/bin/env python
from netmiko import ConnectHandler
from datetime import datetime
from net_system.models import NetworkDevice, Credentials
import django
from multiprocessing import Process, current_process
import time
def main():
django.setup()
start_time = datetime.now()
devices = NetworkDevice.objects.all()
procs = []
for device in devices:
my_proc = Process(target=show_version, args=(device,)) #comma indicates the args is a tuple
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
print a_proc
a_proc.join()
execute_time = datetime.now() - start_time
print "Time to Execute: ", execute_time
def show_version(device):
creds = device.credentials
remote_conn = ConnectHandler(device_type=device.device_type,
ip=device.ip_address,
username=creds.username,
password=creds.password,
port=device.port,
secret='')
print
print '#' * 80
print remote_conn.send_command("show version")
print '#' * 80
if __name__ == '__main__':
main()
|
soc_net_bench.py | #! /usr/bin/env python
#
# ===============================================================
# Description: Benchmark to emulate TAO workload.
#
# Created: 2014-04-17 10:47:00
#
# Author: Ayush Dubey, dubey@cs.cornell.edu
#
# Copyright (C) 2013-2014, Cornell University, see the LICENSE
# file for licensing agreement
# ===============================================================
#
import random
import sys
import time
import threading
import weaver.client as client
num_started = 0
num_finished = 0
num_clients = 15
cv = threading.Condition()
all_latencies = []
class request_gen:
def __init__(self):
# node handles are range(0, num_nodes)
self.num_nodes = 10000 # snap livejournal 10k sample
#self.num_nodes = 81306 # snap twitter-combined
#self.num_nodes = 4840000 # snap livejournal
self.p_read = 0.75 # p_write = 1 - p_read
self.p_assoc_get = 0.157
self.p_assoc_range = 0.437
self.p_assoc_count = 0.117
self.p_obj_get = 0.289
self.c_assoc_get = self.p_assoc_get
self.c_assoc_range = self.c_assoc_get + self.p_assoc_range
self.c_assoc_count = self.c_assoc_range + self.p_assoc_count
self.c_obj_get = self.c_assoc_count + self.p_obj_get
self.p_assoc_add = 0.8
self.p_assoc_del = 0.2
self.p_assoc_update = 0.207
#self.p_assoc_add = 0.525
#self.p_assoc_del = 0.083
#self.p_obj_add = 0.165
#self.p_obj_update = 0.207
#self.p_obj_del = 0.02
self.c_assoc_add = self.p_assoc_add
self.c_assoc_del = self.c_assoc_add + self.p_assoc_del
#self.c_assoc_add = self.p_assoc_add
#self.c_assoc_del = self.c_assoc_add + self.p_assoc_del
#self.c_obj_add = self.c_assoc_del + self.p_obj_add
#self.c_obj_update = self.c_obj_add + self.p_obj_update
#self.c_obj_del = self.c_obj_update + self.p_obj_del
self.req_types = ['assoc_get', 'assoc_range', 'assoc_count', 'obj_get',
'assoc_add', 'assoc_del', 'obj_add', 'obj_update', 'obj_del']
def get(self):
coin_toss = random.random()
n1 = str(random.randint(0, self.num_nodes-1))
n2 = str(random.randint(0, self.num_nodes-1))
if coin_toss < self.p_read:
coin_toss = random.random()
if coin_toss < self.c_assoc_get:
return [0, n1, n2]
elif coin_toss < self.c_assoc_range:
return [1, n1]
elif coin_toss < self.c_assoc_count:
return [2, n1]
else:
return [3, n1]
else:
#return [4, n1, n2]
coin_toss = random.random()
if coin_toss < self.c_assoc_add:
return [4, n1, n2]
else:
return [5, n1, n2]
#elif coin_toss < self.c_obj_add:
# return [6]
#elif coin_toss < self.c_obj_update:
# return [7, n1]
#else:
# return [8, n1]
def exec_work(idx, cl, num_requests):
global num_started
global num_finished
global num_clients
global cv
global all_latencies
assert(num_requests % 1000 == 0)
rgen = request_gen()
egp = client.EdgeGetParams()
ecp = client.EdgeCountParams()
rnep = client.ReadNEdgesParams()
rnpp = client.ReadNodePropsParams()
with cv:
while num_started < num_clients:
cv.wait()
edge_list = []
edge_parent_list = []
latencies = []
for rcnt in range(num_requests):
req = rgen.get()
start = time.time();
if req[0] == 0: # assoc_get
response = cl.get_edges(node=req[1], nbrs=[req[2]])
elif req[0] == 1: # assoc range
rnep.num_edges = 50
prog_args = [(req[1], rnep)]
response = cl.read_n_edges(prog_args)
elif req[0] == 2: # assoc count
prog_args = [(req[1], ecp)]
response = cl.edge_count(prog_args)
elif req[0] == 3: # obj get
response = cl.get_node(req[1], get_props=True)
elif req[0] == 4: # assoc_add
cl.begin_tx()
new_edge = cl.create_edge(req[1], req[2])
cl.end_tx()
edge_list.append(new_edge)
edge_parent_list.append(req[1])
elif req[0] == 5: # assoc del
if len(edge_list) > 0:
cl.begin_tx()
del_edge = edge_list.pop()
del_edge_node = edge_parent_list.pop()
cl.delete_edge(del_edge, del_edge_node)
cl.end_tx()
#elif req[0] == 6: # assoc update
# tx_id = cl.begin_tx()
# if len(edge_list) > 0:
# rnd_idx = random.randint(0, len(edge_list)-1)
# edge = edge_list[rnd_idx]
# node = edge_parent_list[rnd_idx]
# cl.set_edge_property(tx_id, node, edge, 'color', 'red')
# cl.end_tx(tx_id)
else:
print 'BAD VALUE!'
#elif req[0] == 4: # assoc add
# print 'write 4'
#elif req[0] == 5: # assoc del
# print 'write 5'
#elif req[0] == 6: # obj add
# print 'write 6'
#elif req[0] == 7: # obj update
# print 'write 7'
#elif req[0] == 8: # obj del
# print 'write 8'
#else:
# print 'unknown request type'
# assert(False)
end = time.time()
latencies.append(end-start)
if rcnt > 0 and rcnt % 100 == 0:
print 'done ' + str(rcnt) + ' by client ' + str(idx)
with cv:
num_finished += 1
cv.notify_all()
all_latencies.extend(latencies)
def main():
num_requests = 5000
global num_started
global num_finished
global num_clients
global cv
global all_latencies
clients = []
for i in range(num_clients):
clients.append(client.Client('128.84.167.101', 2002))
threads = []
print "starting requests"
for i in range(num_clients):
thr = threading.Thread(target=exec_work, args=(i, clients[i], num_requests))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time - start_time
print 'Total time = ' + str(total_time)
throughput = (num_requests * num_clients) / total_time
print 'Throughput = ' + str(throughput)
for thr in threads:
thr.join()
lat_file = open('latencies', 'w')
for t in all_latencies:
lat_file.write(str(t) + '\n')
if __name__ == '__main__':
main()
|
arduscope.py | # -*- coding: utf-8 -*-
from __future__ import annotations
import calendar
import json
import os
import pathlib
import threading
import time
from collections import deque
from dataclasses import dataclass, field, asdict
from tqdm import tqdm
from typing import List, Dict
import matplotlib.pyplot as plt
import numpy as np
from serial import Serial
BUFFER = 480
MAX_FREQ = 32000
REAL_MAX_FREQ = 20000
MAX_PULSE_WIDTH = 32767
BAUDRATE = 115200
OFFSET_TIME = 0.000054554
ARDUINO_PARAMS = [
"limit",
"frequency",
"reference",
"trigger",
"trigger_channel",
"trigger_offset",
"trigger_tol",
"channels",
"adc_prescaler",
"pulse_width",
]
@dataclass
class ArduscopeMeasure:
acquire_time: float
frequency: int
pulse_width: float
trigger_value: float
amplitude: float
n_channels: int
trigger_channel: str
trigger_offset: float
x: List[np.ndarray] = field(init=False)
channels: List[np.ndarray] = list
version: str = "0.3.1"
def __post_init__(self):
self.acquire_time = float(self.acquire_time)
self.frequency = int(self.frequency)
self.pulse_width = float(self.pulse_width)
self.trigger_value = float(self.trigger_value)
self.amplitude = float(self.amplitude)
self.n_channels = int(self.n_channels)
self.trigger_channel = str(self.trigger_channel)
self.trigger_offset = float(self.trigger_offset)
base_x = np.arange(BUFFER // self.n_channels) / self.frequency
self.x = [
base_x + OFFSET_TIME * i
for i in range(self.n_channels)
]
def save(self, file: [str, os.PathLike], overwrite: bool = False):
""" Saves a screen into a file (csv, npz or json)
Parameters
----------
file : str or os.PathLike
A filename with desired format in extension (.csv, .npz or .json)
overwrite : bool
Indicates if the file is overwrite on exists case
"""
if isinstance(file, (str, os.PathLike)):
filename = pathlib.Path(file).absolute()
else:
raise TypeError
if overwrite is False and filename.exists():
raise FileExistsError
as_dict: dict = asdict(self)
if filename.suffix == ".json":
with open(filename, mode="w") as f:
as_dict.pop("x")
as_dict["channels"] = [
channel.tolist()
for channel in self.channels
]
json.dump(as_dict, f)
elif filename.suffix == ".npz":
as_dict.pop("x")
np.savez(filename, **as_dict)
elif filename.suffix == ".csv":
as_dict["acquire_time"] = time.strftime(
"%d/%m/%Y %H:%M:%S",
time.gmtime(self.acquire_time)
)
header = "\n".join([
f"# {key} = {value}"
for key, value in as_dict.items()
if key not in ["x", "channels"]
])
with open(filename, mode="w") as f:
f.write(header)
f.write("\n")
for i in range(self.channels[0].shape[0]):
f.write(f"\n### Screen {i} of {self.channels[0].shape[0]}\n")
screen = [
channel[i, :]
for channel in self.channels
]
data = np.append(self.x, screen, axis=0).T
np.savetxt(f, data, fmt="%.9e")
f.write("### End of screen\n")
else:
raise ValueError
@classmethod
def load(cls, file: [str, os.PathLike]) -> ArduscopeMeasure:
""" Loads a screen from a file (csv, npz or json)
Parameters
----------
file : str or os.PathLike
A filename with valid extension (.csv, .npz or .json)
Returns
-------
ArduscopeScreen instance with loaded data
"""
if isinstance(file, (str, os.PathLike)):
filename = pathlib.Path(file).absolute()
else:
raise TypeError
if not filename.exists():
raise FileNotFoundError
if filename.suffix == ".json":
with open(filename, mode="r") as f:
data = json.load(f)
data["channels"] = [
np.array(ch)
for ch in data["channels"]
]
return cls(**data)
elif filename.suffix == ".npz":
data = np.load(str(filename))
return cls(**data)
elif filename.suffix == ".csv":
as_dict = {}
with open(filename, mode="r") as f:
line = f.readline().strip()
while line.startswith("#"):
split = line.replace("#", "").split("=")
if len(split) == 2:
key, value = split[0].strip(), split[1].strip()
as_dict[key] = value
line = f.readline().strip()
as_dict["acquire_time"] = calendar.timegm(
time.strptime(as_dict["acquire_time"], "%d/%m/%Y %H:%M:%S")
)
data = np.loadtxt(str(filename))
n = data.shape[0] // (BUFFER // int(as_dict["n_channels"]))
as_dict["channels"] = [
np.asarray(np.array_split(data[:, i+1], n))
for i in range(int(as_dict["n_channels"]))
]
return cls(**as_dict)
else:
raise ValueError
class Arduscope:
_open_ports: Dict[str, Arduscope] = {}
def __init__(self, port: str, deque_max_size: int = 100):
"""
Parameters
----------
port : str
Connection port of Arduino, like "COM1" or "/dev/ttyUS0"
deque_max_size : int
Max size of screen buffer (a double-ended queue)
"""
if not isinstance(port, str):
raise TypeError
self._port = port
self._baudrate = BAUDRATE
self._serial = self._open_serial()
self._capture_parameters = None
self._freq = None
self._pulse_width = None
self._amplitude = None
self._ref = None
self._n_channels = None
self._trigger_value = None
self._trigger_channel = None
self._trigger_channel_code = None
self._trigger_tol = None
self._trigger_offset = None
self._adc_prescaler = 4
self._ref_values = {"5.0": 0, "1.1": 1}
self._measure_params = None
self._data_buffer = deque(maxlen=deque_max_size)
self._daemon = None
self._running = threading.Event()
self._screen_ready = threading.Event()
self._uptime = time.time()
self.frequency = 200
self.pulse_width = 0.1
self.trigger_value = 2.5
self.amplitude = 5.0
self.n_channels = 2
self.trigger_channel = "A0"
self.trigger_offset = 0.05
self._trigger_tol = 5
self._live_mode_on = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _open_serial(self) -> Serial:
"""
Opens a serial port between Arduino and Python
Returns
-------
Serial (from PySerial library)
"""
if self._port in Arduscope._open_ports.keys():
print(f"Closing other Arduscope instances in port {self._port}...")
other_arduscope = Arduscope._open_ports[self._port]
try:
other_arduscope.stop_acquire()
other_arduscope._serial.close()
except AttributeError:
pass
Arduscope._open_ports.update({
self._port: self
})
serial = Serial(port=self._port, baudrate=self._baudrate, timeout=1)
msg = ""
start_time = time.time()
while msg != "BOOTED\r\n":
try:
msg = serial.readline().decode('utf-8')
except UnicodeDecodeError:
pass
if time.time() - start_time > 5:
raise TimeoutError("Arduino is not responding")
return serial
@property
def uptime(self) -> float:
""" Uptime of Arduscope object creation"""
return time.time() - self._uptime
@property
def x(self) -> List[np.ndarray]:
""" Time-array for x axes representation """
base_x = np.arange(BUFFER // self.n_channels) / self.frequency
return [
base_x + OFFSET_TIME * i
for i in range(self._n_channels)
]
@property
def channels(self) -> List[np.ndarray]:
return [
np.asarray([channels[i] for channels in self._data_buffer])
for i in range(self._n_channels)
]
@property
def frequency(self) -> int:
""" Frequency of sampling (in Hz) """
return self._freq
@frequency.setter
def frequency(self, value: int):
if 1 <= value <= MAX_FREQ:
self._freq = int(value)
else:
raise ValueError(f"MIN: 1, MAX: {MAX_FREQ}")
self._on_property_change()
@property
def pulse_width(self) -> float:
""" Output pulse width in PIN7 (in seconds) """
return self._pulse_width * 0.001
@pulse_width.setter
def pulse_width(self, value: float):
if 0.002 <= value <= MAX_PULSE_WIDTH / 1000.0:
self._pulse_width = int(value * 1000)
else:
raise ValueError(f"MIN: 0.002, MAX: {MAX_PULSE_WIDTH / 1000.0}")
self._on_property_change()
@property
def amplitude(self) -> float:
""" Max amplitude measured (in Volts) """
return self._amplitude
@amplitude.setter
def amplitude(self, value: float):
if 0 < value <= 1.1:
self._ref = "1.1"
self._amplitude = value
elif 1.1 <= value <= 5.0:
self._ref = "5.0"
self._amplitude = value
else:
raise ValueError("0.0 < value <= 5.0")
self._on_property_change()
@property
def trigger_value(self) -> float:
""" Trigger value (in Volts) """
return self._trigger_value
@trigger_value.setter
def trigger_value(self, value: float):
if 0 < value < 5.0:
self._trigger_value = value
else:
raise ValueError("MIN: 0, MAX: 5.0")
self._on_property_change()
@property
def trigger_channel(self) -> str:
""" Trigger channel
Posible values:
- "A0" to "A6" -> Analog inputs
- "D7OUT_HIGH" -> When PIN7 changes to HIGH state
- "D7OUT_LOW" -> When PIN7 changes to LOW state
- "REPEAT" -> Immediately after transmitting the last measurement
"""
return self._trigger_channel
@trigger_channel.setter
def trigger_channel(self, value: str):
if isinstance(value, str):
if value.upper().startswith("A") and len(value) == 2:
self._trigger_channel_code = int(value[1])
elif value.upper() == "D7OUT_HIGH":
self._trigger_channel_code = -1
elif value.upper() == "D7OUT_LOW":
self._trigger_channel_code = -2
elif value.upper() == "REPEAT":
self._trigger_channel_code = -3
else:
raise ValueError("Posible values: "
'"A0", "A1", "A2", "A3", "A4", "A5", "A6", '
'"D7OUT_HIGH", "D7OUT_LOW", "REPEAT"')
else:
raise TypeError("Posible values: "
'"A0", "A1", "A2", "A3", "A4", "A5", "A6", '
'"D7OUT_HIGH", "D7OUT_LOW", "REPEAT"')
self._trigger_channel = value
self._on_property_change()
@property
def trigger_offset(self) -> float:
""" Trigger offset in screen fraction (-1.0 to 1.0) """
return self._trigger_offset
@trigger_offset.setter
def trigger_offset(self, value: float):
if isinstance(value, (int, float)):
if -1.0 <= value <= 1.0:
self._trigger_offset = value
else:
raise ValueError("MIN: -1.0, MAX: 1.0")
else:
raise TypeError("Expected <float>, MIN: -1.0, MAX: 1.0")
self._on_property_change()
@property
def n_channels(self) -> int:
""" Number of channels (1 to 6 available)"""
return self._n_channels
@n_channels.setter
def n_channels(self, value: int):
if 1 <= value <= 6:
self._n_channels = int(value)
else:
raise ValueError("MIN: 1, MAX: 6")
self._on_property_change()
@property
def factor(self) -> float:
""" Conversion factor given by Arduino Reference and bit depth (10 bits)"""
if self._ref == "5.0":
return 1024 / 5.0
elif self._ref == "1.1":
return 1024 / 1.1
@property
def measure(self) -> ArduscopeMeasure:
""" An ArduscopeMeasure object with measurement params and channel data"""
return ArduscopeMeasure(channels=self.channels, **self._measure_params)
def start_acquire(self):
""" Starts acquire in background (clearing previous state) """
if self._serial.isOpen() is False:
self._serial = self._open_serial()
parameters = {
"limit": 0,
"frequency": self._freq,
"reference": self._ref_values[self._ref],
"trigger": self._trigger_value * self.factor,
"trigger_channel": self._trigger_channel_code,
"trigger_offset": int((BUFFER // self._n_channels) * self._trigger_offset),
"trigger_tol": self._trigger_tol,
"channels": self._n_channels,
"adc_prescaler": self._adc_prescaler,
"pulse_width": self._pulse_width // 2
}
if self._ref == "1.1":
if self._trigger_value > 1.1 and self._trigger_channel_code >= 0:
raise ValueError(f"Trigger value {self._trigger_value}V "
f"greater than maximum amplitude {self._ref}V.")
if self._freq > REAL_MAX_FREQ / self._n_channels:
print(f"\n*** WARNING ***"
f"\nMAXIMUM RECOMMENDED FREQUENCY FOR {self._n_channels} CHANNELS "
f"IS {int(REAL_MAX_FREQ / self._n_channels):d}Hz.\n"
f"***\n")
self._measure_params = {
"acquire_time": time.time(),
"frequency": self.frequency,
"pulse_width": self.pulse_width,
"trigger_value": self.trigger_value,
"amplitude": self.amplitude,
"n_channels": self.n_channels,
"trigger_channel": self.trigger_channel,
"trigger_offset": self.trigger_offset
}
typed_array = np.asarray(
[parameters[x] for x in ARDUINO_PARAMS],
dtype=np.int16
)
for param in typed_array:
self._serial.write(int(param).to_bytes(2, byteorder="little", signed=True))
if self._daemon is not None:
if self._daemon.is_alive():
self._running.clear()
self._daemon.join()
self._running.set()
self._screen_ready.clear()
self._data_buffer.clear()
self._daemon = threading.Thread(target=self._acquire_daemon, daemon=True)
self._daemon.start()
self._screen_ready.wait()
def clear_buffer(self):
self._data_buffer.clear()
def wait_signal(self):
""" Stops execution until screen buffer has at least one measurement"""
self._screen_ready.wait()
def wait_until(self, n_screens: int, timeout: float = None):
""" Stops execution until screen buffer has at least <n_screen>
Parameters
----------
n_screens : int
Number of screens required
timeout : float
Timeout in seconds (raises a TimeoutError exception)
"""
if isinstance(n_screens, int):
if n_screens > self._data_buffer.maxlen:
raise ValueError(f"0 < n_screens < {self._data_buffer.maxlen}")
else:
raise TypeError(f"0 < n_screens < {self._data_buffer.maxlen}")
if timeout is not None:
if not isinstance(timeout, (int, float)):
raise TypeError("Timeout type: float")
start = time.time()
current_screens = len(self._data_buffer)
if current_screens < n_screens:
with tqdm(
total=n_screens,
miniters=1,
initial=current_screens,
ncols=80,
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}"
) as pb:
pb.set_description("Waiting for requested screens")
while current_screens < n_screens:
if timeout is not None:
if time.time() - start > timeout:
raise TimeoutError()
pb.update(current_screens - pb.n)
current_screens = len(self._data_buffer)
pb.update(n_screens - pb.n)
print()
def stop_acquire(self):
""" Stops acquire without clearing the buffer """
if self._running.isSet():
self._running.clear()
if self._daemon is not None:
if self._daemon.is_alive():
self._running.clear()
self._daemon.join()
self._serial.close()
Arduscope._open_ports.pop(self._port, None)
def close(self):
self.stop_acquire()
def _on_property_change(self):
""" Handles the properties changes resetting acquisition"""
self._data_buffer.clear()
if self._running.isSet():
self.stop_acquire()
self.start_acquire()
def _acquire_daemon(self):
""" Background daemon that performs the buffer read """
while self._running.isSet():
if self._serial.inWaiting() >= BUFFER * 2 + 2:
channels = self._read_buffer()
self._data_buffer.append(channels)
self._screen_ready.set()
def _read_buffer(self) -> List[np.ndarray]:
""" Private function for buffer reading and conversion """
if self._serial.inWaiting() < BUFFER * 2 + 2:
raise BufferError("Empty buffer")
raw_start = self._serial.read(2)
start = int.from_bytes(raw_start, byteorder="little", signed=True)
raw_data = self._serial.read(BUFFER * 2)
data = np.frombuffer(raw_data, dtype=np.uint16)
data = data.reshape((BUFFER // self._n_channels, self._n_channels))
channels = [
np.roll(data[:, i], shift=-(start + 1) // self._n_channels) / self.factor
for i in range(self._n_channels)
]
return channels
def simple_plot(self):
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
curves = [
ax.plot([], [], lw=2.0, label=f'Channel A{i}')[0]
for i in range(self.n_channels)
]
for i, channel in enumerate(self._data_buffer[-1]):
curves[i].set_data(self.x[i], channel)
ax.grid()
ax.set_xlim(0, max(self.x[-1]))
ax.set_ylim(0, self.amplitude)
ax.set_xlabel("Time (s)", fontsize=14)
ax.set_ylabel("Voltage (V)", fontsize=14)
ax.legend(loc=1, fontsize=14)
def live_plot(self, close_after: int = None):
""" Deploy a Matplotlib window with the live state of Arduscope """
if not self._running.isSet():
raise RuntimeError('First call "start_acquire()"')
backend = plt.get_backend()
if 'inline' in backend:
print(
f"\nCurrent backend of Matplotlib is {plt.get_backend()}"
f"\nLive mode not available for this backend"
)
self.simple_plot()
return
def on_close(event):
self._live_mode_on = False
interactive_state = plt.isinteractive()
plt.ion()
self._live_mode_on = True
fig: plt.Figure
ax: plt.Axes
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
fig.canvas.mpl_connect('close_event', on_close)
curves = [
ax.plot([], [], lw=2.0, label=f'Channel A{i}')[0]
for i in range(self.n_channels)
]
ax.grid()
ax.set_xlim(0, max(self.x[-1]))
ax.set_ylim(0, self.amplitude)
ax.set_xlabel("Time (s)", fontsize=14)
ax.set_ylabel("Voltage (V)", fontsize=14)
ax.legend(loc=1, fontsize=14)
current_screens = len(self._data_buffer)
with tqdm(
total=self._data_buffer.maxlen,
initial=current_screens,
ncols=80,
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}"
) as pb:
pb.set_description("Live mode on. Screen buffer status")
while self._live_mode_on is True:
if close_after is not None:
if len(self._data_buffer) >= close_after:
plt.close(fig)
self._live_mode_on = False
fig.canvas.draw_idle()
fig.canvas.flush_events()
if self._screen_ready.isSet():
for i, channel in enumerate(self._data_buffer[-1]):
curves[i].set_data(self.x[i], channel)
self._screen_ready.clear()
pb.update(current_screens - pb.n)
current_screens = len(self._data_buffer)
if interactive_state is False:
plt.ioff()
print("\n")
|
lisp-rtr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
import binascii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = None
Ii1IIii11 = None
Oooo0000 = None
if 22 - 22: Ii1I . IiII
I11 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
iIiiI1 = None
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = ( os . getenv ( "LISP_RTR_FAST_DATA_PLANE" ) != None )
I1I11I1I1I = ( os . getenv ( "LISP_RTR_LATENCY_DEBUG" ) != None )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
def O00oooo0O ( parameter ) :
global I11
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
I11 ) )
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
def Ii1IOo0o0 ( parameter ) :
global I11
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , I11 ,
True ) )
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
def o00oOO0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 95 - 95: OOooOOo / OoooooooOO
if 18 - 18: i11iIiiIii
if 46 - 46: i1IIi / I11i % OOooOOo + I1Ii111
if 79 - 79: I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - iII111i
if 8 - 8: I1IiiI
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
if 9 - 9: OoO0O00
def i11 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
def O0O0O ( kv_pair ) :
oO0Oo = { "rloc-probe" : False , "igmp-query" : False }
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
for O0o0 in kv_pair . keys ( ) :
OO00Oo = kv_pair [ O0o0 ]
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if ( O0o0 == "instance-id" ) :
o0O0O00 = OO00Oo . split ( "-" )
oO0Oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0O0O00 ) == 1 ) :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 0 ] )
else :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 1 ] )
if 86 - 86: I11i / IiII % i11iIiiIii
if 7 - 7: ooOoO0o * OoO0O00 % oO0o . IiII
if ( O0o0 == "eid-prefix" ) :
Ii1iIiII1ii1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii1iIiII1ii1 . store_prefix ( OO00Oo )
oO0Oo [ "eid-prefix" ] = Ii1iIiII1ii1
if 62 - 62: iIii1I11I1II1 * OoOoOO00
if ( O0o0 == "group-prefix" ) :
i1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1 . store_prefix ( OO00Oo )
oO0Oo [ "group-prefix" ] = i1
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if ( O0o0 == "rloc-prefix" ) :
iII1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
iII1 . store_prefix ( OO00Oo )
oO0Oo [ "rloc-prefix" ] = iII1
if 30 - 30: II111iiii - OOooOOo - i11iIiiIii % OoOoOO00 - II111iiii * Ii1I
if ( O0o0 == "rloc-probe" ) :
oO0Oo [ "rloc-probe" ] = ( OO00Oo == "yes" )
if 61 - 61: oO0o - I11i % OOooOOo
if ( O0o0 == "igmp-query" ) :
oO0Oo [ "igmp-query" ] = ( OO00Oo == "yes" )
if 84 - 84: oO0o * OoO0O00 / I11i - O0
if 30 - 30: iIii1I11I1II1 / ooOoO0o - I1Ii111 - II111iiii % iII111i
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
if 95 - 95: i1IIi
if 3 - 3: I1Ii111 - O0 / I1Ii111 % OoO0O00 / I1Ii111 . I1IiiI
for iiI111I1iIiI in lisp . lisp_glean_mappings :
if ( iiI111I1iIiI . has_key ( "eid-prefix" ) ^ oO0Oo . has_key ( "eid-prefix" ) ) : continue
if ( iiI111I1iIiI . has_key ( "eid-prefix" ) and oO0Oo . has_key ( "eid-prefix" ) ) :
II = iiI111I1iIiI [ "eid-prefix" ]
Ii1I1IIii1II = oO0Oo [ "eid-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if ( iiI111I1iIiI . has_key ( "group-prefix" ) ^ oO0Oo . has_key ( "group-prefix" ) ) :
continue
if 91 - 91: IiII
if ( iiI111I1iIiI . has_key ( "group-prefix" ) and oO0Oo . has_key ( "group-prefix" ) ) :
II = iiI111I1iIiI [ "group-prefix" ]
Ii1I1IIii1II = oO0Oo [ "group-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 15 - 15: II111iiii
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if ( iiI111I1iIiI . has_key ( "rloc-prefix" ) ^ oO0Oo . has_key ( "rloc-prefix" ) ) : continue
if ( iiI111I1iIiI . has_key ( "rloc-prefix" ) and oO0Oo . has_key ( "rloc-prefix" ) ) :
II = iiI111I1iIiI [ "rloc-prefix" ]
Ii1I1IIii1II = oO0Oo [ "rloc-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if ( iiI111I1iIiI . has_key ( "instance-id" ) ^ oO0Oo . has_key ( "instance-id" ) ) : continue
if ( iiI111I1iIiI . has_key ( "instance-id" ) and oO0Oo . has_key ( "instance-id" ) ) :
II = iiI111I1iIiI [ "instance-id" ]
Ii1I1IIii1II = oO0Oo [ "instance-id" ]
if ( II != Ii1I1IIii1II ) : continue
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
return
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
lisp . lisp_glean_mappings . append ( oO0Oo )
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
def iiI1I11i1i ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
def IIIII11I1IiI ( mc , parms ) :
i1I , iII1 , OoOO , ooOOO0 = parms
if 65 - 65: O0
oO00OOoO00 = "{}:{}" . format ( iII1 . print_address_no_iid ( ) , OoOO )
Ii1iIiII1ii1 = lisp . green ( mc . print_eid_tuple ( ) , False )
IiI111111IIII = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( ooOOO0 , lisp . red ( oO00OOoO00 , False ) , Ii1iIiII1ii1 , "{}" , "{}" )
if 37 - 37: I1Ii111 / OoOoOO00
if 23 - 23: O0
for o00oO0oOo00 in mc . rloc_set :
if ( o00oO0oOo00 . rle ) :
for oO0oOo0 in o00oO0oOo00 . rle . rle_nodes :
if ( oO0oOo0 . rloc_name != ooOOO0 ) : continue
oO0oOo0 . store_translated_rloc ( iII1 , OoOO )
I1I1I = oO0oOo0 . address . print_address_no_iid ( ) + ":" + str ( oO0oOo0 . translated_port )
if 95 - 95: II111iiii + o0oOOo0O0Ooo + iII111i * iIii1I11I1II1 % oO0o / IiII
lisp . lprint ( IiI111111IIII . format ( "RLE" , I1I1I ) )
if 56 - 56: iII111i
if 86 - 86: II111iiii % I1Ii111
if 15 - 15: i1IIi * I1IiiI + i11iIiiIii
if ( o00oO0oOo00 . rloc_name != ooOOO0 ) : continue
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
I1I1I = o00oO0oOo00 . rloc . print_address_no_iid ( ) + ":" + str ( o00oO0oOo00 . translated_port )
if 25 - 25: OoO0O00
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( I1I1I ) ) :
oOo0oO = lisp . lisp_crypto_keys_by_rloc_encap [ I1I1I ]
lisp . lisp_crypto_keys_by_rloc_encap [ oO00OOoO00 ] = oOo0oO
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
o00oO0oOo00 . delete_from_rloc_probe_list ( mc . eid , mc . group )
o00oO0oOo00 . store_translated_rloc ( iII1 , OoOO )
o00oO0oOo00 . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( IiI111111IIII . format ( "RLOC" , I1I1I ) )
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if ( lisp . lisp_rloc_probing ) :
o0oOO000oO0oo = None if ( mc . group . is_null ( ) ) else mc . eid
oOO00O = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( i1I , 0 , o0oOO000oO0oo , oOO00O , o00oO0oOo00 )
if 77 - 77: Oo0Ooo - i1IIi - I11i . OoOoOO00
if 39 - 39: II111iiii / ooOoO0o + I1Ii111 / OoOoOO00
if 13 - 13: IiII + O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
def OoOOoOooooOOo ( mc , parms ) :
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if ( mc . group . is_null ( ) ) : return ( IIIII11I1IiI ( mc , parms ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if ( mc . source_cache == None ) : return ( True , parms )
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
mc . source_cache . walk_cache ( IIIII11I1IiI , parms )
return ( True , parms )
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
def o00oo0 ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( OoOOoOooooOOo ,
[ sockets , rloc , port , hostname ] )
return
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
def o00Oo0oooooo ( sred , packet ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if ( sred in [ "Send" , "Receive" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 20 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] ,
o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ) )
elif ( sred in [ "Encap" , "Decap" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 36 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] , o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ,
# iIii1I11I1II1 / I11i . OoO0O00 - o0oOOo0O0Ooo
o0o [ 40 : 48 ] , o0o [ 48 : 56 ] , o0o [ 56 : 64 ] , o0o [ 64 : 72 ] ) )
if 48 - 48: i1IIi - Ii1I / O0 * OoO0O00
if 71 - 71: I1ii11iIi11i
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
def ii ( dest , mc ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 68 - 68: iII111i - I1IiiI / I1Ii111 / I11i
I11iiii = "miss" if mc == None else "hit!"
lisp . lprint ( "Fast-Lookup {} {}" . format ( dest . print_address ( ) , I11iiii ) )
if 60 - 60: I11i . i1IIi + IiII / o0oOOo0O0Ooo . II111iiii
if 82 - 82: I1ii11iIi11i / I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: I1Ii111 * OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % OoO0O00
if 15 - 15: OoOoOO00 % I1IiiI * I11i
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
def o0O ( ts , msg ) :
global I1I11I1I1I
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
if ( I1I11I1I1I == False ) : return ( None )
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if ( ts == None ) : return ( time . time ( ) )
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
ts = ( time . time ( ) - ts ) * 1000000
lisp . lprint ( "{}-Latency: {} usecs" . format ( msg , round ( ts , 1 ) ) , "force" )
return ( None )
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
def I1IiIiiIiIII ( a ) :
iIIi = ord ( a [ 0 ] ) << 24 | ord ( a [ 1 ] ) << 16 | ord ( a [ 2 ] ) << 8 | ord ( a [ 3 ] )
return ( iIIi )
if 11 - 11: I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
iiI1I1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
ooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 6 - 6: iIii1I11I1II1 . ooOoO0o % o0oOOo0O0Ooo
def I1Iii1 ( packet ) :
global lisp_map_cache , OOo
if 30 - 30: OoooooooOO - OoOoOO00
Ooo00O0o = o0O ( None , "Fast" )
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
I111i1i1111 = 0
IIII1 = None
if ( packet [ 9 ] == '\x11' ) :
if ( packet [ 20 : 22 ] == '\x10\xf6' ) : return ( False )
if ( packet [ 22 : 24 ] == '\x10\xf6' ) : return ( False )
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if ( packet [ 20 : 22 ] == '\x10\xf5' or packet [ 22 : 24 ] == '\x10\xf5' ) :
IIII1 = packet [ 12 : 16 ]
I111i1i1111 = packet [ 32 : 35 ]
I111i1i1111 = ord ( I111i1i1111 [ 0 ] ) << 16 | ord ( I111i1i1111 [ 1 ] ) << 8 | ord ( I111i1i1111 [ 2 ] )
if ( I111i1i1111 == 0xffffff ) : return ( False )
o00Oo0oooooo ( "Decap" , packet )
packet = packet [ 36 : : ]
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
o00Oo0oooooo ( "Receive" , packet )
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
ii1 = I1IiIiiIiIII ( packet [ 16 : 20 ] )
ooO . instance_id = I111i1i1111
ooO . address = ii1
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
if 76 - 76: ooOoO0o
if 15 - 15: OOooOOo . I11i + OoooooooOO - OoO0O00
if ( ( ii1 & 0xe0000000 ) == 0xe0000000 ) : return ( False )
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
ii1 = ooO
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 27 - 27: O0
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if ( IIII1 != None ) :
I11ii1i1 = I1IiIiiIiIII ( packet [ 12 : 16 ] )
iiI1I1 . instance_id = I111i1i1111
iiI1I1 . address = I11ii1i1
ooo0OoOOOOO = lisp . lisp_map_cache . lookup_cache ( iiI1I1 , False )
if ( ooo0OoOOOOO == None ) :
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( iiI1I1 , None ,
None )
if ( i1iIi1iI ) : return ( False )
elif ( ooo0OoOOOOO . gleaned ) :
IIII1 = I1IiIiiIiIII ( IIII1 )
if ( ooo0OoOOOOO . rloc_set [ 0 ] . rloc . address != IIII1 ) : return ( False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
O00o0OO0000oo . add_recent_source ( iiI1I1 )
if 59 - 59: OoooooooOO
if 47 - 47: ooOoO0o - I1IiiI / II111iiii
if 12 - 12: OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION and
O00o0OO0000oo . eid . instance_id == 0 ) :
ii1 . instance_id = lisp . lisp_default_secondary_iid
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if ( O00o0OO0000oo . action != lisp . LISP_NATIVE_FORWARD_ACTION ) :
if ( O00o0OO0000oo . best_rloc_set == [ ] ) : return ( False )
if 7 - 7: OoooooooOO . IiII
ii1 = O00o0OO0000oo . best_rloc_set [ 0 ]
if ( ii1 . state != lisp . LISP_RLOC_UP_STATE ) : return ( False )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
I111i1i1111 = O00o0OO0000oo . eid . instance_id
OoOO = ii1 . translated_port
Oooo00 = ii1 . stats
ii1 = ii1 . rloc
I111iIi1 = ii1 . address
IIII1 = lisp . lisp_myrlocs [ 0 ] . address
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
o00 = '\x45\x00'
oO = len ( packet ) + 20 + 8 + 8
o00 += chr ( ( oO >> 8 ) & 0xff ) + chr ( oO & 0xff )
o00 += '\xff\xff\x40\x00\x10\x11\x00\x00'
o00 += chr ( ( IIII1 >> 24 ) & 0xff )
o00 += chr ( ( IIII1 >> 16 ) & 0xff )
o00 += chr ( ( IIII1 >> 8 ) & 0xff )
o00 += chr ( IIII1 & 0xff )
o00 += chr ( ( I111iIi1 >> 24 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 16 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 8 ) & 0xff )
o00 += chr ( I111iIi1 & 0xff )
o00 = lisp . lisp_ip_checksum ( o00 )
if 92 - 92: IiII * Oo0Ooo * Oo0Ooo * I1IiiI . iIii1I11I1II1
if 16 - 16: ooOoO0o % OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / OoooooooOO
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
Ooooooo = oO - 20
I1IIIiI1I1ii1 = '\xff\x00' if ( OoOO == 4341 ) else '\x10\xf5'
I1IIIiI1I1ii1 += chr ( ( OoOO >> 8 ) & 0xff ) + chr ( OoOO & 0xff )
I1IIIiI1I1ii1 += chr ( ( Ooooooo >> 8 ) & 0xff ) + chr ( Ooooooo & 0xff ) + '\x00\x00'
if 30 - 30: O0 * OoooooooOO
I1IIIiI1I1ii1 += '\x08\xdf\xdf\xdf'
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 16 ) & 0xff )
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 8 ) & 0xff )
I1IIIiI1I1ii1 += chr ( I111i1i1111 & 0xff )
I1IIIiI1I1ii1 += '\x00'
if 38 - 38: IiII - I1ii11iIi11i . OoOoOO00 - I1Ii111 . OoooooooOO
if 89 - 89: iIii1I11I1II1
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
packet = o00 + I1IIIiI1I1ii1 + packet
o00Oo0oooooo ( "Encap" , packet )
else :
oO = len ( packet )
Oooo00 = O00o0OO0000oo . stats
o00Oo0oooooo ( "Send" , packet )
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
O00o0OO0000oo . last_refresh_time = time . time ( )
Oooo00 . increment ( oO )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
ii1 = ii1 . print_address_no_iid ( )
OOo . sendto ( packet , ( ii1 , 0 ) )
if 92 - 92: Oo0Ooo
o0O ( Ooo00O0o , "Fast" )
return ( True )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def IIIii ( lisp_packet , thread_name ) :
global II1iII1i , O00OooOo00o , IiI11i1IIiiI
global OOo , Ii1IIii11
global oO0oIIII
global iIiiI1
global oo0Ooo0
if 60 - 60: I1ii11iIi11i * I1IiiI
Ooo00O0o = o0O ( None , "RTR" )
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if ( oo0Ooo0 ) :
if ( I1Iii1 ( lisp_packet . packet ) ) : return
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
Ooooo00o0OoO = lisp_packet
oooo0O0O0o0 = Ooooo00o0OoO . is_lisp_packet ( Ooooo00o0OoO . packet )
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if ( oooo0O0O0o0 == False ) :
oOoOOo0oo0 = Ooooo00o0OoO . packet
o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O = lisp . lisp_is_rloc_probe ( oOoOOo0oo0 , - 1 )
if ( oOoOOo0oo0 != o0O0Oo00Oo0o ) :
if ( OOOo == None ) : return
lisp . lisp_parse_packet ( II1iII1i , o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O )
return
if 39 - 39: OoooooooOO + oO0o % OOooOOo / OOooOOo
if 27 - 27: iII111i . I11i . iIii1I11I1II1 . iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
Ooooo00o0OoO . packet = lisp . lisp_reassemble ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if ( oooo0O0O0o0 ) :
if ( Ooooo00o0OoO . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , True )
Ooooo00o0OoO . strip_outer_headers ( )
else :
if ( Ooooo00o0OoO . decode ( False , None , None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if ( oooo0O0O0o0 and Ooooo00o0OoO . lisp_header . get_instance_id ( ) == 0xffffff ) :
II1I1iiIII1I1 = lisp . lisp_control_header ( )
II1I1iiIII1I1 . decode ( Ooooo00o0OoO . packet )
if ( II1I1iiIII1I1 . is_info_request ( ) ) :
o0Ooo0o0ooo0 = lisp . lisp_info ( )
o0Ooo0o0ooo0 . decode ( Ooooo00o0OoO . packet )
o0Ooo0o0ooo0 . print_info ( )
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
ooo = o0Ooo0o0ooo0 . hostname if ( o0Ooo0o0ooo0 . hostname != None ) else ""
OOOO0oooo = Ooooo00o0OoO . outer_source
o0o = Ooooo00o0OoO . udp_sport
if ( lisp . lisp_store_nat_info ( ooo , OOOO0oooo , o0o ) ) :
o00oo0 ( II1iII1i , ooo , OOOO0oooo , o0o )
if 51 - 51: O0 - i1IIi / I1IiiI
else :
OOOo = Ooooo00o0OoO . outer_source . print_address_no_iid ( )
oo0OOo0O = Ooooo00o0OoO . outer_ttl
Ooooo00o0OoO = Ooooo00o0OoO . packet
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 28 ] ) == False ) : oo0OOo0O = - 1
Ooooo00o0OoO = Ooooo00o0OoO [ 28 : : ]
lisp . lisp_parse_packet ( II1iII1i , Ooooo00o0OoO , OOOo , 0 , oo0OOo0O )
if 37 - 37: o0oOOo0O0Ooo % ooOoO0o
return
if 83 - 83: OOooOOo . I1Ii111 + oO0o - OOooOOo * I1Ii111 / I1Ii111
if 39 - 39: I1Ii111 / Oo0Ooo % OoO0O00 % i11iIiiIii
if 90 - 90: I1Ii111 - OoooooooOO
if 96 - 96: O0 . Ii1I % OoO0O00 * iIii1I11I1II1
if 54 - 54: Ii1I * I1Ii111 - OoooooooOO % I1IiiI + O0
if 6 - 6: I1ii11iIi11i - II111iiii / oO0o + i11iIiiIii + OOooOOo
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if ( oooo0O0O0o0 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( Ooooo00o0OoO . packet ) )
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
I1i11II = None
if ( Ooooo00o0OoO . inner_dest . is_mac ( ) ) :
Ooooo00o0OoO . packet = lisp . lisp_mac_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( Ooooo00o0OoO . inner_version == 4 ) :
I1i11II , Ooooo00o0OoO . packet = lisp . lisp_ipv4_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
elif ( Ooooo00o0OoO . inner_version == 6 ) :
Ooooo00o0OoO . packet = lisp . lisp_ipv6_input ( Ooooo00o0OoO )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 31 - 31: oO0o / IiII * o0oOOo0O0Ooo . II111iiii
if 89 - 89: O0
if 2 - 2: I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i * o0oOOo0O0Ooo
if 100 - 100: Oo0Ooo % Ii1I / I11i
if 30 - 30: Oo0Ooo - OOooOOo - iII111i
if ( Ooooo00o0OoO . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , ed = "decap" ) == False ) : return
Ooooo00o0OoO . outer_source . afi = lisp . LISP_AFI_NONE
Ooooo00o0OoO . outer_dest . afi = lisp . LISP_AFI_NONE
if 81 - 81: o0oOOo0O0Ooo . OoooooooOO + OOooOOo * ooOoO0o
if 74 - 74: i1IIi + O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , None ,
Ooooo00o0OoO . outer_source )
if ( i1iIi1iI ) :
Ooo = Ooooo00o0OoO . packet if ( I1i11II ) else None
lisp . lisp_glean_map_cache ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . outer_source ,
Ooooo00o0OoO . udp_sport , Ooo )
if ( I1i11II ) : return
if 65 - 65: Oo0Ooo / I11i
if 12 - 12: I11i % OoOoOO00
if 48 - 48: iII111i . i11iIiiIii
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
oOO00O = Ooooo00o0OoO . inner_dest
if ( oOO00O . is_multicast_address ( ) ) :
ooo0OO = False
i1I11IiI1iiII , o00oOo0oOoo , iIi1IiI = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , oOO00O , None )
else :
ooo0OO , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( oOO00O , None , None )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
Ooooo00o0OoO . gleaned_dest = ooo0OO
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest )
if ( O00o0OO0000oo ) : O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if ( O00o0OO0000oo and ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION or
O00o0OO0000oo . eid . address == 0 ) ) :
oo = lisp . lisp_db_for_lookups . lookup_cache ( Ooooo00o0OoO . inner_source , False )
if ( oo and oo . secondary_iid ) :
i1II1I = Ooooo00o0OoO . inner_dest
i1II1I . instance_id = oo . secondary_iid
if 95 - 95: OoO0O00 - OOooOOo / II111iiii % I1ii11iIi11i . o0oOOo0O0Ooo
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , i1II1I )
if ( O00o0OO0000oo ) :
Ooooo00o0OoO . gleaned_dest = O00o0OO0000oo . gleaned
O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
else :
ooo0OO , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( i1II1I , None ,
None )
Ooooo00o0OoO . gleaned_dest = ooo0OO
if 24 - 24: i1IIi . i11iIiiIii
if 16 - 16: Oo0Ooo % I1ii11iIi11i + I11i - O0 . iII111i / I1Ii111
if 35 - 35: oO0o / I1Ii111 / II111iiii - iIii1I11I1II1 + II111iiii . I1Ii111
if 81 - 81: iII111i * OOooOOo - I1ii11iIi11i * Ii1I % OoOoOO00 * OoOoOO00
if 59 - 59: iIii1I11I1II1
if 7 - 7: OOooOOo * I1IiiI / o0oOOo0O0Ooo * i11iIiiIii
if 84 - 84: OOooOOo . iII111i
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
if ( O00o0OO0000oo == None and ooo0OO ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( Ooooo00o0OoO . inner_dest . print_address ( ) , False ) ) )
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
return
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if ( O00o0OO0000oo == None or O00o0OO0000oo . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( Ooooo00o0OoO . inner_source ,
Ooooo00o0OoO . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None )
if 69 - 69: i1IIi
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "map-cache miss"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 71 - 71: II111iiii * iIii1I11I1II1 / I1ii11iIi11i
return
if 23 - 23: II111iiii
if 24 - 24: iIii1I11I1II1 + iIii1I11I1II1 * iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if ( O00o0OO0000oo and O00o0OO0000oo . refresh ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( O00o0OO0000oo . print_eid_tuple ( ) , False ) ) )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
O00o0OO0000oo . last_refresh_time = time . time ( )
O00o0OO0000oo . stats . increment ( len ( Ooooo00o0OoO . packet ) )
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
IIiIiiiIIIIi1 , iIi11 , O00O0 , iIiIiiiIi , i1iiIIi11I , o00oO0oOo00 = O00o0OO0000oo . select_rloc ( Ooooo00o0OoO , None )
if 80 - 80: ooOoO0o * O0
if 78 - 78: OoOoOO00
if ( IIiIiiiIIIIi1 == None and i1iiIIi11I == None ) :
if ( iIiIiiiIi == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
Ooooo00o0OoO . send_packet ( OOo , Ooooo00o0OoO . inner_dest )
if 20 - 20: iII111i % Ii1I . Ii1I / I11i + OoOoOO00 . Ii1I
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "not an EID"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 53 - 53: OOooOOo + I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
o0O ( Ooo00O0o , "RTR" )
return
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
ooOoOOOOo = "No reachable RLOCs found"
lisp . dprint ( ooOoOOOOo )
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 63 - 63: oO0o
return
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if ( IIiIiiiIIIIi1 and IIiIiiiIIIIi1 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 36 - 36: IiII
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "drop action"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
return
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
Ooooo00o0OoO . outer_tos = Ooooo00o0OoO . inner_tos
Ooooo00o0OoO . outer_ttl = Ooooo00o0OoO . inner_ttl
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if ( IIiIiiiIIIIi1 ) :
Ooooo00o0OoO . encap_port = iIi11
if ( iIi11 == 0 ) : Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT
Ooooo00o0OoO . outer_dest . copy_address ( IIiIiiiIIIIi1 )
i1i1IiIiIi1Ii = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = i1i1IiIiIi1Ii
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
i1IiiI1I1IIi11i1 = iIiiI1 if ( i1i1IiIiIi1Ii == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 45 - 45: ooOoO0o % o0oOOo0O0Ooo - ooOoO0o
Ooooo00o0OoO . outer_source . copy_address ( i1IiiI1I1IIi11i1 )
if 31 - 31: IiII / i11iIiiIii
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , rloc_entry = o00oO0oOo00 ,
lisp_socket = OOOO0oooo ) == False ) : return
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
if 74 - 74: O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if ( Ooooo00o0OoO . encode ( O00O0 ) == None ) : return
if ( len ( Ooooo00o0OoO . packet ) <= 1500 ) : Ooooo00o0OoO . print_packet ( "Send" , True )
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
iI1 = Ii1IIii11 if i1i1IiIiIi1Ii == 6 else OOo
Ooooo00o0OoO . send_packet ( iI1 , Ooooo00o0OoO . outer_dest )
if 14 - 14: I1ii11iIi11i
elif ( i1iiIIi11I ) :
if 49 - 49: oO0o / i1IIi % Ii1I . I1IiiI
if 93 - 93: OOooOOo
if 43 - 43: I1ii11iIi11i / I1IiiI . ooOoO0o
if 62 - 62: iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % O0 . I1Ii111
Oo0oOooOoOo = len ( Ooooo00o0OoO . packet )
for I1i in i1iiIIi11I . rle_forwarding_list :
Ooooo00o0OoO . outer_dest . copy_address ( I1i . address )
Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT if I1i . translated_port == 0 else I1i . translated_port
if 59 - 59: OoooooooOO . Ii1I / O0 - OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
i1i1IiIiIi1Ii = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = i1i1IiIiIi1Ii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
i1IiiI1I1IIi11i1 = iIiiI1 if ( i1i1IiIiIi1Ii == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
Ooooo00o0OoO . outer_source . copy_address ( i1IiiI1I1IIi11i1 )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "replicate"
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo ) == False ) : return
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
Ooooo00o0OoO . print_packet ( "Replicate-to-L{}" . format ( I1i . level ) , True )
Ooooo00o0OoO . send_packet ( OOo , Ooooo00o0OoO . outer_dest )
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
OO00OO0o0 = len ( Ooooo00o0OoO . packet ) - Oo0oOooOoOo
Ooooo00o0OoO . packet = Ooooo00o0OoO . packet [ OO00OO0o0 : : ]
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
del ( Ooooo00o0OoO )
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
o0O ( Ooo00O0o , "RTR" )
return
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
def I1I1iII1i ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
Ooooo00o0OoO = lisp_thread . input_queue . get ( )
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
lisp_thread . input_stats . increment ( len ( Ooooo00o0OoO ) )
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
lisp_thread . lisp_packet . packet = Ooooo00o0OoO
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
IIIii ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 66 - 66: I1IiiI - IiII
return
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
def o00Ooo0 ( thread ) :
O0O00O = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( O0O00O ) == thread . thread_number )
if 4 - 4: OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
if 82 - 82: OoO0O00 % o0oOOo0O0Ooo % OOooOOo / O0
if 94 - 94: I1ii11iIi11i + I1ii11iIi11i + OoooooooOO % ooOoO0o
if 7 - 7: iII111i
if 78 - 78: OOooOOo + iII111i . IiII
def Oo ( parms , not_used , packet ) :
if ( o00Ooo0 ( parms [ 1 ] ) == False ) : return
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
I1Iii1I = parms [ 0 ]
iIi11I = parms [ 1 ]
O0Oo = iIi11I . number_of_worker_threads
if 39 - 39: OOooOOo - OoooooooOO + Oo0Ooo
iIi11I . input_stats . increment ( len ( packet ) )
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
if 51 - 51: Oo0Ooo + oO0o / iII111i - i1IIi
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
o0ooO0OOO = 4 if I1Iii1I == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ o0ooO0OOO : : ]
if 74 - 74: Ii1I * i11iIiiIii / I1Ii111
if 75 - 75: O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if ( O0Oo ) :
oo0 = iIi11I . input_stats . packet_count % O0Oo
oo0 = oo0 + ( len ( I11 ) - O0Oo )
i1iIIi1II1iiI = I11 [ oo0 ]
i1iIIi1II1iiI . input_queue . put ( packet )
else :
iIi11I . lisp_packet . packet = packet
IIIii ( iIi11I . lisp_packet , iIi11I . thread_name )
if 31 - 31: o0oOOo0O0Ooo % I11i + iIii1I11I1II1 + i11iIiiIii * I1Ii111
return
if 45 - 45: OOooOOo * I1Ii111 . ooOoO0o - I1Ii111 + IiII
if 34 - 34: OOooOOo . Oo0Ooo
if 78 - 78: I1ii11iIi11i % I1IiiI / OoooooooOO % OOooOOo - iII111i
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
def i1i1IiIi1 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 22 - 22: I11i * O0 . II111iiii - OoO0O00
I1Iii1I = "lo0" if lisp . lisp_is_macos ( ) else "any"
o0Oo00OO0 = pcappy . open_live ( I1Iii1I , 9000 , 0 , 100 )
if 50 - 50: Ii1I * o0oOOo0O0Ooo % i11iIiiIii
if 96 - 96: I1ii11iIi11i + Oo0Ooo * OoO0O00 % ooOoO0o - O0
if 54 - 54: OoOoOO00 . oO0o % i11iIiiIii / OoooooooOO + IiII % oO0o
if 36 - 36: oO0o
if 74 - 74: OoooooooOO
OoOoO0O = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
OoOoO0O = ( OoOoO0O != "" and OoOoO0O [ 0 ] == " " )
if 100 - 100: O0
o00IiI1iiII1i1i = "(dst host "
i1IiI = ""
for oO00OOoO00 in lisp . lisp_get_all_addresses ( ) :
o00IiI1iiII1i1i += "{} or " . format ( oO00OOoO00 )
i1IiI += "{} or " . format ( oO00OOoO00 )
if 82 - 82: OoOoOO00
o00IiI1iiII1i1i = o00IiI1iiII1i1i [ 0 : - 4 ]
o00IiI1iiII1i1i += ") and ((udp dst port 4341 or 8472 or 4789) or "
o00IiI1iiII1i1i += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
i1IiI = i1IiI [ 0 : - 4 ]
o00IiI1iiII1i1i += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( i1IiI )
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if ( OoOoO0O ) :
o00IiI1iiII1i1i += ( " or (dst net 0.0.0.0/0 and " + "not (host {} or src net 127.0.0.0/8))" ) . format ( i1IiI )
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
lisp . lprint ( "Capturing packets for: '{}'" . format ( o00IiI1iiII1i1i ) )
o0Oo00OO0 . filter = o00IiI1iiII1i1i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
o0Oo00OO0 . loop ( - 1 , Oo , [ I1Iii1I , lisp_thread ] )
return
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
def O0OOO0 ( lisp_raw_socket , eid , geid , igmp ) :
if 8 - 8: i11iIiiIii / II111iiii + o0oOOo0O0Ooo * Ii1I % IiII . I11i
if 6 - 6: IiII % Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
Ooooo00o0OoO = lisp . lisp_packet ( igmp )
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( eid , geid )
if ( O00o0OO0000oo == None ) : return
if ( O00o0OO0000oo . rloc_set == [ ] ) : return
if ( O00o0OO0000oo . rloc_set [ 0 ] . rle == None ) : return
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
oO00o = eid . print_address_no_iid ( )
for oO0oOo0 in O00o0OO0000oo . rloc_set [ 0 ] . rle . rle_nodes :
if ( oO0oOo0 . rloc_name == oO00o ) :
Ooooo00o0OoO . outer_dest . copy_address ( oO0oOo0 . address )
Ooooo00o0OoO . encap_port = oO0oOo0 . translated_port
break
if 36 - 36: I1Ii111 . II111iiii % ooOoO0o
if 84 - 84: OoooooooOO - i11iIiiIii / iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i
if ( Ooooo00o0OoO . outer_dest . is_null ( ) ) : return
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
Ooooo00o0OoO . outer_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . outer_version = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_ttl = 32
Ooooo00o0OoO . inner_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . inner_dest . store_address ( "[{}]224.0.0.1" . format ( geid . instance_id ) )
Ooooo00o0OoO . inner_ttl = 1
if 17 - 17: OoO0O00 * OoOoOO00
iiI111I1iIiI = lisp . green ( eid . print_address ( ) , False )
ooOoOOOOo = lisp . red ( "{}:{}" . format ( Ooooo00o0OoO . outer_dest . print_address_no_iid ( ) ,
Ooooo00o0OoO . encap_port ) , False )
ii11i = lisp . bold ( "IGMP Query" , False )
if 71 - 71: I1Ii111 / I1ii11iIi11i * iIii1I11I1II1
lisp . lprint ( "Data encapsulate {} to gleaned EID {}, RLOC {}" . format ( ii11i , iiI111I1iIiI , ooOoOOOOo ) )
if 57 - 57: OOooOOo + I1Ii111 % I1ii11iIi11i . OoO0O00 / OoO0O00 * O0
if 6 - 6: i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Send" , True )
if 37 - 37: Oo0Ooo / IiII * O0
Ooooo00o0OoO . send_packet ( lisp_raw_socket , Ooooo00o0OoO . outer_dest )
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
def IIi1ii1 ( lisp_raw_socket ) :
if ( lisp . lisp_gleaned_groups == { } ) : return
if 48 - 48: ooOoO0o / iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . OoO0O00
if 60 - 60: I1Ii111
if 98 - 98: ooOoO0o
if 34 - 34: iIii1I11I1II1 * I11i * I11i / I1ii11iIi11i
if 28 - 28: OoO0O00 - oO0o + OoOoOO00 + Ii1I / iIii1I11I1II1
iiiii11I1 = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
Ii1 = lisp . lisp_myrlocs [ 0 ]
iII1 = Ii1 . address
iiiii11I1 += chr ( ( iII1 >> 24 ) & 0xff )
iiiii11I1 += chr ( ( iII1 >> 16 ) & 0xff )
iiiii11I1 += chr ( ( iII1 >> 8 ) & 0xff )
iiiii11I1 += chr ( iII1 & 0xff )
iiiii11I1 += "\xe0\x00\x00\x01"
iiiii11I1 += "\x94\x04\x00\x00"
iiiii11I1 = lisp . lisp_ip_checksum ( iiiii11I1 , 24 )
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
I1i11II = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
I1i11II = lisp . lisp_igmp_checksum ( I1i11II )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
for iI1ii11Ii in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1 . store_address ( iI1ii11Ii )
i1I11IiI1iiII , o00oOo0oOoo , O0OO0OO = lisp . lisp_allow_gleaning ( o0oOO000oO0oo , i1 , None )
if ( O0OO0OO == False ) : continue
O0OOO0 ( lisp_raw_socket , o0oOO000oO0oo , i1 , iiiii11I1 + I1i11II )
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
if 38 - 38: OoO0O00 / ooOoO0o % I1Ii111 * I11i + i11iIiiIii % ooOoO0o
if 61 - 61: I1Ii111 - Ii1I % I1ii11iIi11i / ooOoO0o / iII111i + iIii1I11I1II1
def O0O0oo ( ) :
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 83 - 83: IiII / I1Ii111
OOo000OO000 = [ ]
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
for iI1ii11Ii in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
OOOO00OooO = lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] [ iI1ii11Ii ]
OOO = time . time ( ) - OOOO00OooO
if ( OOO < lisp . LISP_IGMP_TIMEOUT_INTERVAL ) : continue
OOo000OO000 . append ( [ Ii1iIiII1ii1 , iI1ii11Ii ] )
if 32 - 32: OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
ooooO0 = lisp . bold ( "timed out" , False )
for Ii1iIiII1ii1 , iI1ii11Ii in OOo000OO000 :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
i1 . store_address ( iI1ii11Ii )
iiI111I1iIiI = lisp . green ( Ii1iIiII1ii1 , False )
Iiii111 = lisp . green ( iI1ii11Ii , False )
lisp . lprint ( "{} RLE {} for gleaned group {}" . format ( iiI111I1iIiI , ooooO0 , Iiii111 ) )
lisp . lisp_remove_gleaned_multicast ( o0oOO000oO0oo , i1 )
if 71 - 71: O0 / I1IiiI . I1Ii111 / I1Ii111 * ooOoO0o
if 60 - 60: II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
def ii1II1II ( lisp_raw_socket ) :
lisp . lisp_set_exception ( )
if 42 - 42: Ii1I
if 68 - 68: OOooOOo . Oo0Ooo % ooOoO0o - OoooooooOO * iII111i . OOooOOo
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
for oOo0oO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for O0o in oOo0oO : del ( O0o )
if 78 - 78: OOooOOo % iIii1I11I1II1
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 50 - 50: I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
O0O0oo ( )
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
IIi1ii1 ( lisp_raw_socket )
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
Oooo0000 = threading . Timer ( 60 , ii1II1II ,
[ lisp_raw_socket ] )
Oooo0000 . start ( )
return
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
def iIIi1Ii1III ( ) :
global Oo0oO0oo0oO00 , II1iII1i , II1Ii1iI1i
global OOo , Ii1IIii11 , I11
global i111I , oO0oIIII
global iIiiI1
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 17 - 17: Ii1I
if 67 - 67: O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
iIiiI1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
iIiiI1 = lisp . lisp_get_interface_address ( "eth0" )
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
i1iI1i = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
II1Ii1iI1i = lisp . lisp_open_listen_socket ( i1iI1i ,
str ( iiI1iIiI ) )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
i111I = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 59 - 59: IiII
II1iII1i [ 0 ] = II1Ii1iI1i
if 89 - 89: OoOoOO00 % iIii1I11I1II1
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
II1iII1i [ 2 ] = Oo0oO0oo0oO00
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
OOo = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
OOo . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
II1iII1i . append ( OOo )
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
oO0oIIII = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if ( lisp . lisp_is_raspbian ( ) == False ) :
Ii1IIii11 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
Ii1Ii1 = os . getenv ( "LISP_PCAP_THREADS" )
Ii1Ii1 = 1 if ( Ii1Ii1 == None ) else int ( Ii1Ii1 )
ii1IiI11I = os . getenv ( "LISP_WORKER_THREADS" )
ii1IiI11I = 0 if ( ii1IiI11I == None ) else int ( ii1IiI11I )
if 90 - 90: o0oOOo0O0Ooo % oO0o % i11iIiiIii . OOooOOo % OOooOOo
if 36 - 36: Oo0Ooo % Ii1I / i11iIiiIii % I1Ii111 + OoO0O00
if 23 - 23: II111iiii
if 93 - 93: oO0o . I11i / i1IIi
for i11ii in range ( Ii1Ii1 ) :
oOOOOO0Ooooo = lisp . lisp_thread ( "pcap-{}" . format ( i11ii ) )
oOOOOO0Ooooo . thread_number = i11ii
oOOOOO0Ooooo . number_of_pcap_threads = Ii1Ii1
oOOOOO0Ooooo . number_of_worker_threads = ii1IiI11I
I11 . append ( oOOOOO0Ooooo )
threading . Thread ( target = i1i1IiIi1 , args = [ oOOOOO0Ooooo ] ) . start ( )
if 57 - 57: Ii1I - OoooooooOO
if 68 - 68: o0oOOo0O0Ooo % I1ii11iIi11i / I1Ii111 + I1Ii111 - I1Ii111 . OoO0O00
if 100 - 100: OoOoOO00 % Oo0Ooo
if 76 - 76: II111iiii / OoO0O00 + OoooooooOO . I1ii11iIi11i . I11i . ooOoO0o
if 43 - 43: i1IIi
if 17 - 17: O0 - OoOoOO00
for i11ii in range ( ii1IiI11I ) :
oOOOOO0Ooooo = lisp . lisp_thread ( "worker-{}" . format ( i11ii ) )
I11 . append ( oOOOOO0Ooooo )
threading . Thread ( target = I1I1iII1i , args = [ oOOOOO0Ooooo ] ) . start ( )
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 64 - 64: I11i + OoO0O00
if 25 - 25: I1IiiI . ooOoO0o + I1IiiI % Ii1I * iIii1I11I1II1
if 31 - 31: i11iIiiIii + OOooOOo - O0
if 51 - 51: OoO0O00 * i1IIi / Ii1I * OOooOOo + ooOoO0o % I1ii11iIi11i
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 34 - 34: oO0o * OoooooooOO + Ii1I + i11iIiiIii
if 22 - 22: i1IIi
if 24 - 24: I11i / I1IiiI * i1IIi % OoooooooOO
if 99 - 99: i11iIiiIii . II111iiii . OoooooooOO
Oooo0000 = threading . Timer ( 60 , ii1II1II ,
[ OOo ] )
Oooo0000 . start ( )
return ( True )
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if 98 - 98: oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
def OoIi1I1I ( ) :
if 56 - 56: O0
if 45 - 45: OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lisp-rtr" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "" )
lisp . lisp_close_socket ( i111I , "lispers.net-itr" )
OOo . close ( )
return
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
def ooOOO00oOOooO ( kv_pair ) :
global II1iII1i
global iiI1iIiI
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
lispconfig . lisp_map_resolver_command ( kv_pair )
if 66 - 66: O0
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 52 - 52: OoO0O00 * OoooooooOO
return
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
def O00Ooo ( kv_pair ) :
global II1Ii1iI1i , OOo , iiI1iIiI
if 92 - 92: OoOoOO00 % O0
oo00ooooOOo00 = lisp . lisp_rloc_probing
if 16 - 16: i11iIiiIii / i1IIi % OOooOOo
if 84 - 84: I11i - Oo0Ooo * O0 / Ii1I . Ii1I
if 93 - 93: O0 / ooOoO0o + I1IiiI
if 20 - 20: IiII / iII111i % OoooooooOO / iIii1I11I1II1 + I1IiiI
lispconfig . lisp_xtr_command ( kv_pair )
if 57 - 57: o0oOOo0O0Ooo / I1Ii111
if 13 - 13: OoooooooOO + OoO0O00
if 32 - 32: O0 + oO0o % Oo0Ooo
if 7 - 7: I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if ( oo00ooooOOo00 == False and lisp . lisp_rloc_probing ) :
i1I = [ II1Ii1iI1i , II1Ii1iI1i ,
None , OOo ]
lisp . lisp_start_rloc_probe_timer ( 1 , i1I )
oO0Oo = { "type" : "itr-crypto-port" , "port" : iiI1iIiI }
lisp . lisp_write_to_dp_socket ( oO0Oo )
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
iI1111i = {
"lisp xtr-parameters" : [ O00Ooo , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ ooOOO00oOOooO , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i11 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ O0O0O , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] ,
"igmp-query" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ iiI1I11i1i , { } ] ,
"show rtr-keys" : [ o00oOO0 , { } ] ,
"show rtr-map-cache" : [ O00oooo0O , { } ] ,
"show rtr-map-cache-dns" : [ Ii1IOo0o0 , { } ]
}
if 39 - 39: I1Ii111 % OoooooooOO - II111iiii % OoOoOO00 + oO0o + O0
if 14 - 14: OoooooooOO . o0oOOo0O0Ooo . I11i
if 50 - 50: ooOoO0o * OoOoOO00 + I1ii11iIi11i - i11iIiiIii + Oo0Ooo * I1ii11iIi11i
if 20 - 20: I1Ii111 / o0oOOo0O0Ooo % OoOoOO00
if 69 - 69: I1Ii111 - i1IIi % iII111i . OOooOOo - OOooOOo
if 65 - 65: OOooOOo + II111iiii
def Oo0O0OO0OoO0 ( lisp_socket ) :
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( lisp_socket , False )
oooOOO0ooOoOOO = lisp . lisp_trace ( )
if ( oooOOO0ooOoOOO . decode ( Ooooo00o0OoO ) == False ) : return
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
oooOOO0ooOoOOO . rtr_cache_nat_trace ( OOOo , OoOO )
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
if 13 - 13: OOooOOo / IiII - OoO0O00 / OOooOOo . i1IIi
if ( iIIi1Ii1III ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 22 - 22: O0 - I11i + I1Ii111 . Ii1I * i1IIi
if 26 - 26: iIii1I11I1II1 * o0oOOo0O0Ooo . I11i
I11III11III1 = [ II1Ii1iI1i , Oo0oO0oo0oO00 ,
i111I , oO0oIIII ]
oooOoO00OooO0 = [ II1Ii1iI1i ] * 3
if 98 - 98: OOooOOo + Ii1I
while ( True ) :
try : OOOO , IIIIiI11Ii1i , i1I11IiI1iiII = select . select ( I11III11III1 , [ ] , [ ] )
except : break
if 100 - 100: iII111i + I11i + ooOoO0o + iII111i / i1IIi
if 74 - 74: O0 % OoooooooOO * Oo0Ooo + OOooOOo * iII111i
if 100 - 100: OOooOOo + Ii1I * o0oOOo0O0Ooo + II111iiii
if 70 - 70: Oo0Ooo * iIii1I11I1II1
if ( lisp . lisp_ipc_data_plane and i111I in OOOO ) :
lisp . lisp_process_punt ( i111I , II1iII1i ,
iiI1iIiI )
if 76 - 76: iII111i % OoOoOO00 % iIii1I11I1II1 . OOooOOo
if 30 - 30: i1IIi
if 75 - 75: I11i . OOooOOo - iIii1I11I1II1 * OoO0O00 * iII111i
if 93 - 93: ooOoO0o
if 18 - 18: ooOoO0o
if ( oO0oIIII in OOOO ) :
Oo0O0OO0OoO0 ( oO0oIIII )
if 66 - 66: oO0o * i11iIiiIii + OoOoOO00 / OOooOOo
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
if ( II1Ii1iI1i in OOOO ) :
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( oooOoO00OooO0 [ 0 ] ,
False )
if ( OOOo == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 72 - 72: Ii1I . IiII * I1ii11iIi11i / I1ii11iIi11i / iII111i
lisp . lisp_parse_packet ( oooOoO00OooO0 , Ooooo00o0OoO , OOOo , OoOO )
if 13 - 13: i1IIi
if 17 - 17: i11iIiiIii * o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoO0O00
if 95 - 95: I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if ( Oo0oO0oo0oO00 in OOOO ) :
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( Oo0oO0oo0oO00 , True )
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if ( OOOo == "" ) : break
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if ( O0oO0oo0O == "command" ) :
if ( Ooooo00o0OoO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if ( Ooooo00o0OoO . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( Ooooo00o0OoO )
continue
if 55 - 55: oO0o
lispconfig . lisp_process_command ( Oo0oO0oo0oO00 , O0oO0oo0O ,
Ooooo00o0OoO , "lisp-rtr" , [ iI1111i ] )
elif ( O0oO0oo0O == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Oo0oO0oo0oO00 , Ooooo00o0OoO )
elif ( O0oO0oo0O == "data-packet" ) :
IIIii ( Ooooo00o0OoO , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 97 - 97: I1Ii111 . I11i / I1IiiI
lisp . lisp_parse_packet ( II1iII1i , Ooooo00o0OoO , OOOo , OoOO )
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
OoIi1I1I ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 28 - 28: IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
stream.py | """ Read audio stream, and play it, using installed vlc player (libvlc) """
# python standard lib
import sys
import os
import time
from typing import List
import ctypes
from subprocess import Popen, PIPE
from multiprocessing import Process, Queue
import logging
# externals
main_logger = logging.getLogger("main")
def print_info(msg):
print(msg)
main_logger.info(msg)
#
# Using ffmpeg to play url stream
#
def execute_ffmpeg(command: str, queue: Queue):
""" Execute ffmpeg command, until queue gets message. Retry when command exits. """
# 'exec' is needed to be able to easily stop/terminate the process
cmd = f'exec {command} >/dev/null 2>&1'
proc = None
def create_process():
nonlocal proc
print_info(f"execute_ffmpeg create_process {command}")
proc = Popen(args=cmd, stdin=None, stdout=None, stderr=None, cwd=None, bufsize=0, shell=True)
create_process()
def stop():
print_info(f"execute_ffmpeg stop {command}")
proc.terminate()
proc.wait()
while True:
# check if process must stop
must_stop = None
try:
must_stop = queue.get(block=True, timeout=1)
except:
pass
if must_stop is not None:
stop()
break
else:
# check if process is stopped (by accident)
is_running = proc.poll() is None
if not is_running:
print_info(f"execute_ffmpeg stopped unexpectedly {command}")
create_process()
time.sleep(3) # do not try to create process too many times
class FfmpegProcess():
def __init__(self, cmd):
self.queue = Queue()
self.process = Process(target=execute_ffmpeg, args=(cmd, self.queue,), daemon=True)
self.process.start()
self.stopped = False
def stop(self):
if not self.stopped:
self.stopped = True
self.queue.put("stop")
self.process.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def play_process(url):
""" Create and return process to read audio from url and send to analog output"""
return FfmpegProcess(f'ffmpeg -i {url} -f alsa default')
def send_process(urls: List[str]):
""" Create and return process to read audio from analog input and send to (icecast?) urls """
# input = "-f alsa -i hw:0" # default input
input = "-f alsa -i hw:CARD=CODEC,DEV=0" # usb-sound-card input: run command 'arecord -L' to see a full list of possibilities
outputs = []
for url in urls:
url_splitted = url.split(";")
url = url_splitted[0]
if len(url_splitted) > 1 and len(url_splitted[1]) > 0:
bitrate = url_splitted[1]
else:
bitrate = '64K'
content_type = "-content_type audio/mpeg -f mp3"
bitrate_ = f"-b:a {bitrate} -minrate {bitrate} -maxrate {bitrate} -bufsize {bitrate}"
outputs.append(f'{content_type} {bitrate_} "{url}"')
outputs = " ".join(outputs)
cmd = f'ffmpeg {input} {outputs}'
return FfmpegProcess(cmd)
class TestUrl():
ro1 = "http://ro1.reformatorischeomroep.nl:8003/live"
ro1_s = "https://radio1.reformatorischeomroep.nl/live.m3u" # werkt niet
ro2 = "http://ro2.reformatorischeomroep.nl:8020/live"
ro3 = "http://ro3.reformatorischeomroep.nl:8072/live"
noord = "http://meeluisteren.gergemrijssen.nl:8000/noord"
zuid = "http://meeluisteren.gergemrijssen.nl:8000/zuid"
west = "http://meeluisteren.gergemrijssen.nl:8000/west"
def test_ffmpeg():
""" stream to icecast """
input_url = TestUrl.ro1
password = input("Icecast password: ")
icecast_url = f"icecast://source:{password}@173.249.6.236:8000/babyfoon"
content_type = "-content_type audio/mpeg -f mp3"
bitrate = "-b:a 64K -minrate 64K -maxrate 64K -bufsize 64K"
# play on standard out:
cmd = f'ffmpeg -i {input_url} -f alsa default'
# send input url to icecast:
cmd = f'ffmpeg -i {input_url} {content_type} {bitrate} "{icecast_url}"'
# send recording to icecast:
cmd = f'ffmpeg -f alsa -i hw:0 {content_type} {bitrate} "{icecast_url}"'
with FfmpegProcess(cmd):
while True:
time.sleep(30)
def test():
return
test_ffmpeg()
# test_sounddevice()
# test_ffmpeg()
sys.exit(0)
if __name__ == '__main__':
test_ffmpeg()
#
# Deprecated: Using VLC to play url stream
#
# import urllib3
# import vlc
# from vlc import CallbackDecorators
# MediaReadCb = CallbackDecorators.MediaReadCb
# def from_url(url):
# while True:
# try:
# http = urllib3.PoolManager()
# r = http.request('GET', url, preload_content=False)
# for chunk in r.stream(32 * 100):
# yield chunk
# r.release_conn()
# except:
# print(f"Exception while reading from url {url}:")
# print(traceback.format_exc())
# time.sleep(5)
# def play_from_url(url: str, queue: Queue):
# print(f"playing {url}")
# generator = from_url(url)
# @MediaReadCb
# def read_cb(opaque, buffer, length):
# new_data = next(generator)
# c = len(new_data)
# buffer_array = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_char * length))
# ctypes.memmove(buffer_array, new_data, c)
# return c
# instance = vlc.Instance()
# player = instance.media_player_new()
# media = instance.media_new_callbacks(None, read_cb, seek_cb=None, close_cb=None, opaque=None)
# player.set_media(media)
# player.play()
# # wait until other process puts something in queue
# queue.get(block=True)
# def test_sounddevice():
# import sounddevice as sd
# def callback(indata, outdata, frames, time, status):
# if status:
# print(status)
# outdata[:] = indata
# with sd.RawStream(channels=2, dtype='int24', callback=callback):
# while True:
# sd.sleep(1000)
# print('done')
|
futu_gateway.py | """
Please install futu-api before use.
"""
from copy import copy
from collections import OrderedDict
from datetime import datetime
from threading import Thread
from time import sleep
from futu import (
KLType,
ModifyOrderOp,
TrdSide,
TrdEnv,
OpenHKTradeContext,
OpenQuoteContext,
OpenUSTradeContext,
OrderBookHandlerBase,
OrderStatus,
OrderType,
RET_ERROR,
RET_OK,
StockQuoteHandlerBase,
TradeDealHandlerBase,
TradeOrderHandlerBase
)
from vnpy.trader.constant import Direction, Exchange, Product, Status
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
BarData,
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest,
HistoryRequest,
Interval
)
EXCHANGE_VT2FUTU = {
Exchange.SMART: "US",
Exchange.SEHK: "HK",
Exchange.HKFE: "HK_FUTURE",
}
EXCHANGE_FUTU2VT = {v: k for k, v in EXCHANGE_VT2FUTU.items()}
PRODUCT_VT2FUTU = {
Product.EQUITY: "STOCK",
Product.INDEX: "IDX",
Product.ETF: "ETF",
Product.WARRANT: "WARRANT",
Product.BOND: "BOND",
}
DIRECTION_VT2FUTU = {
Direction.LONG: TrdSide.BUY,
Direction.SHORT: TrdSide.SELL,
}
DIRECTION_FUTU2VT = {v: k for k, v in DIRECTION_VT2FUTU.items()}
STATUS_FUTU2VT = {
OrderStatus.NONE: Status.SUBMITTING,
OrderStatus.SUBMITTING: Status.SUBMITTING,
OrderStatus.SUBMITTED: Status.NOTTRADED,
OrderStatus.FILLED_PART: Status.PARTTRADED,
OrderStatus.FILLED_ALL: Status.ALLTRADED,
OrderStatus.CANCELLED_ALL: Status.CANCELLED,
OrderStatus.CANCELLED_PART: Status.CANCELLED,
OrderStatus.SUBMIT_FAILED: Status.REJECTED,
OrderStatus.FAILED: Status.REJECTED,
OrderStatus.DISABLED: Status.CANCELLED,
}
KLTYPE_MINUTES = [1, 3, 5, 15, 30, 60]
class FutuGateway(BaseGateway):
"""
富途证券API
# 网络访问路径: vnpy=>FutuGateway=>FutuOpenD 本地客户端[端口11111] => 富途证券
# FutuOpenD下载地址 https://www.futunn.com/download/openAPI?lang=zh-CN
# windows: 安装完毕后,使用客户端登录=》短信验证=》建立本地11111端口侦听
"""
default_setting = {
"密码": "", # 交易密码
"地址": "127.0.0.1",
"端口": 11111,
"市场": ["HK", "US"],
"环境": [TrdEnv.REAL, TrdEnv.SIMULATE],
}
# 支持的交易所清单
exchanges = list(EXCHANGE_FUTU2VT.values())
def __init__(self, event_engine, gateway_name="FUTU"):
"""Constructor"""
super(FutuGateway, self).__init__(event_engine, gateway_name)
self.quote_ctx = None
self.trade_ctx = None
self.host = ""
self.port = 0
self.market = ""
self.password = ""
self.env = TrdEnv.SIMULATE
self.ticks = {}
self.trades = set()
self.contracts = {}
# 引入本地委托单号《=》接口委托单号的管理
self.order_manager = LocalOrderManager(gateway=self, order_prefix='', order_rjust=4)
self.thread = Thread(target=self.query_data)
# For query function.
self.count = 0
self.interval = 1
self.query_funcs = [self.query_account, self.query_position]
def connect(self, setting: dict):
""""""
self.host = setting["地址"]
self.port = setting["端口"]
self.market = setting["市场"]
self.password = setting["密码"]
self.env = setting["环境"]
self.connect_quote()
self.connect_trade()
self.thread.start()
def query_data(self):
"""
使用异步线程单独查询
Query all data necessary.
"""
sleep(2.0) # Wait 2 seconds till connection completed.
self.query_contract()
self.query_trade()
self.query_order()
self.query_position()
self.query_account()
# Start fixed interval query.
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""定时器"""
self.count += 1
if self.count < self.interval:
return
self.count = 0
func = self.query_funcs.pop(0)
func()
self.query_funcs.append(func)
def connect_quote(self):
"""
Connect to market data server.
连接行情服务器
"""
self.quote_ctx = OpenQuoteContext(self.host, self.port)
# 股票行情处理的实现
class QuoteHandler(StockQuoteHandlerBase):
gateway = self
# 处理信息回调 =》 gateway.process_quote
def on_recv_rsp(self, rsp_str):
ret_code, content = super(QuoteHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_quote(content)
return RET_OK, content
# 订单簿的实现
class OrderBookHandler(OrderBookHandlerBase):
gateway = self
# 处理订单簿信息流回调 => gateway.process_orderbook
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderBookHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_orderbook(content)
return RET_OK, content
# 绑定两个实现方法
self.quote_ctx.set_handler(QuoteHandler())
self.quote_ctx.set_handler(OrderBookHandler())
self.quote_ctx.start()
self.write_log("行情接口连接成功")
def connect_trade(self):
"""
Connect to trade server.
连接交易服务器
"""
# Initialize context according to market.
if self.market == "US":
self.trade_ctx = OpenUSTradeContext(self.host, self.port)
else:
self.trade_ctx = OpenHKTradeContext(self.host, self.port)
# Implement handlers.
# 订单回报的实现
class OrderHandler(TradeOrderHandlerBase):
gateway = self
# 订单回报流 =》gateway.process_order
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_order(content)
return RET_OK, content
# 交易回报的实现
class DealHandler(TradeDealHandlerBase):
gateway = self
# 成交回报流 =》 gateway.process_deal
def on_recv_rsp(self, rsp_str):
ret_code, content = super(DealHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_deal(content)
return RET_OK, content
# Unlock to allow trading.
# 解锁交易接口
code, data = self.trade_ctx.unlock_trade(self.password)
if code == RET_OK:
self.write_log("交易接口解锁成功")
else:
self.write_log(f"交易接口解锁失败,原因:{data}")
# Start context.
# 绑定订单回报、成交回报
self.trade_ctx.set_handler(OrderHandler())
self.trade_ctx.set_handler(DealHandler())
self.trade_ctx.start()
self.write_log("交易接口连接成功")
def subscribe(self, req: SubscribeRequest):
"""订阅行情"""
for data_type in ["QUOTE", "ORDER_BOOK"]:
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
code, data = self.quote_ctx.subscribe(futu_symbol, data_type, True)
if code:
self.write_log(f"订阅行情失败:{data}")
def query_history(self, req: HistoryRequest):
"""查询某只股票的历史K线数据"""
history = []
limit = 60
if req.interval not in [Interval.MINUTE, Interval.DAILY]:
self.write_error(f'查询股票历史范围,本接口只支持分钟/日线')
return history
futu_code = '{}.{}'.format(EXCHANGE_VT2FUTU.get(req.exchange), req.symbol)
if req.interval == Interval.MINUTE:
if req.interval_num not in KLTYPE_MINUTES:
self.write_error(f'查询股票历史范围,请求分钟数{req.interval_num}不在范围:{KLTYPE_MINUTES}')
return history
k_type = f'K_{req.interval_num}M'
else:
if req.interval_num != 1:
self.write_error(f'查询股票历史范围,请求日线{req.interval_num}只能是1')
return history
k_type = KLType.K_DAY
start_date = req.start.strftime('%Y-%m-%d')
end_date = req.end.strftime('%Y-%m-%d') if req.end else None
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
max_count=limit) # 每页5个,请求第一页
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = BarData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=req.exchange,
datetime=dt,
trading_day=dt.strftime('%Y-%m-%d'),
interval=req.interval,
interval_num=req.interval_num,
volume=row['volume'],
open_price=float(row['open']),
high_price=float(row['high']),
low_price=float(row['low']),
close_price=float(row['close'])
)
history.append(bar)
else:
return history
while page_req_key != None: # 请求后面的所有结果
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
page_req_key=page_req_key) # 请求翻页后的数据
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = BarData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=req.exchange,
datetime=dt,
trading_day=dt.strftime('%Y-%m-%d'),
interval=req.interval,
interval_num=req.interval_num,
volume=row['volume'],
open_price=float(row['open']),
high_price=float(row['high']),
low_price=float(row['low']),
close_price=float(row['close'])
)
history.append(bar)
return history
def download_bars(self, req: HistoryRequest):
"""获取某只股票的历史K线数据"""
history = []
limit = 60
if req.interval not in [Interval.MINUTE, Interval.DAILY]:
self.write_error(f'查询股票历史范围,本接口只支持分钟/日线')
return history
futu_code = '{}.{}'.format(EXCHANGE_VT2FUTU.get(req.exchange), req.symbol)
if req.interval == Interval.MINUTE:
if req.interval_num not in KLTYPE_MINUTES:
self.write_error(f'查询股票历史范围,请求分钟数{req.interval_num}不在范围:{KLTYPE_MINUTES}')
return history
k_type = f'K_{req.interval_num}M'
else:
if req.interval_num != 1:
self.write_error(f'查询股票历史范围,请求日线{req.interval_num}只能是1')
return history
k_type = KLType.K_DAY
start_date = req.start.strftime('%Y-%m-%d')
end_date = req.end.strftime('%Y-%m-%d') if req.end else None
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
max_count=limit) # 每页5个,请求第一页
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = OrderedDict({
"datetime": str_time,
"open": float(row['open']),
"close": float(row['close']),
"high": float(row['high']),
"low": float(row['low']),
"volume": row['volume'],
"amount": row['turnover'],
"symbol": row['code'],
"trading_date": dt.strftime('%Y-%m-%d'),
"date": dt.strftime('%Y-%m-%d'),
"time": dt.strftime('%H:%M:%S'),
"pre_close": float(row['last_close']),
"turnover_rate": float(row.get('turnover_rate', 0)),
"change_rate": float(row.get('change_rate', 0))
})
history.append(bar)
else:
return history
while page_req_key != None: # 请求后面的所有结果
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
page_req_key=page_req_key) # 请求翻页后的数据
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = OrderedDict({
"datetime": str_time,
"open": float(row['open']),
"close": float(row['close']),
"high": float(row['high']),
"low": float(row['low']),
"volume": row['volume'],
"amount": row['turnover'],
"symbol": row['code'],
"trading_date": dt.strftime('%Y-%m-%d'),
"date": dt.strftime('%Y-%m-%d'),
"time": dt.strftime('%H:%M:%S'),
"pre_close": float(row['last_close']),
"turnover_rate": float(row.get('turnover_rate', 0)),
"change_rate": float(row.get('change_rate', 0))
})
history.append(bar)
return history
def send_order(self, req: OrderRequest):
"""发送委托"""
side = DIRECTION_VT2FUTU[req.direction]
futu_order_type = OrderType.NORMAL # Only limit order is supported.
# Set price adjustment mode to inside adjustment.
if req.direction is Direction.LONG:
adjust_limit = 0.05
else:
adjust_limit = -0.05
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
# 港股交易手数为整数
if req.exchange == Exchange.SEHK:
self.write_log(f'交易手数:{req.volume}=>{int(req.volume)}')
req.volume = int(req.volume)
local_orderid = self.order_manager.new_local_orderid()
order = req.create_order_data(local_orderid, self.gateway_name)
# 发出委托确认
order.status = Status.SUBMITTING
self.order_manager.on_order(order)
code, data = self.trade_ctx.place_order(
req.price,
req.volume,
futu_symbol,
side,
futu_order_type,
trd_env=self.env,
adjust_limit=adjust_limit,
)
if code:
self.write_log(f"委托失败:{data}")
order.status = Status.REJECTED
self.order_manager.on_order(order)
return ""
sys_orderid = ""
for ix, row in data.iterrows():
sys_orderid = str(row.get("order_id", ""))
if len(sys_orderid) > 0:
self.write_log(f'系统委托号:{sys_orderid}')
break
if len(sys_orderid) == 0:
order.status = Status.REJECTED
self.order_manager.on_order(order)
return ""
# 绑定 系统委托号
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.order_manager.update_orderid_map(local_orderid, sys_orderid)
# 更新订单为已委托
self.order_manager.on_order(copy(order))
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
order = self.order_manager.get_order_with_local_orderid(req.orderid)
# 更新订单委托状态为正在撤销
if order:
if order.status in [Status.REJECTED, Status.ALLTRADED, Status.CANCELLED]:
self.write_error(f'委托单:{req.orderid},状态已经是:{order.status},不能撤单')
return False
order.status = Status.CANCELLING
self.order_manager.on_order(order)
sys_orderid = order.sys_orderid
else:
sys_orderid = req.orderid
# 向接口发出撤单请求
code, data = self.trade_ctx.modify_order(
ModifyOrderOp.CANCEL, sys_orderid, 0, 0, trd_env=self.env
)
if code:
self.write_log(f"撤单失败:{data}")
return False
else:
self.write_log(f'成功发出撤单请求:orderid={req.orderid},sys_orderid:{sys_orderid}')
return True
def query_contract(self):
""""""
for product, futu_product in PRODUCT_VT2FUTU.items():
code, data = self.quote_ctx.get_stock_basicinfo(
self.market, futu_product
)
self.write_log(f'开始查询{futu_product}市场的合约清单')
if code:
self.write_log(f"查询合约信息失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
contract = ContractData(
symbol=symbol,
exchange=exchange,
name=row["name"],
product=product,
size=1,
pricetick=0.001,
net_position=True,
history_data=True,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.write_log("合约信息查询成功")
def query_account(self):
""""""
code, data = self.trade_ctx.accinfo_query(trd_env=self.env, acc_id=0)
if code:
self.write_log(f"查询账户资金失败:{data}")
return
for ix, row in data.iterrows():
account = AccountData(
accountid=f"{self.gateway_name}_{self.market}",
balance=float(row["total_assets"]),
frozen=(float(row["total_assets"]) - float(row["avl_withdrawal_cash"])),
gateway_name=self.gateway_name,
)
self.on_account(account)
def query_position(self):
""""""
code, data = self.trade_ctx.position_list_query(
trd_env=self.env, acc_id=0
)
if code:
self.write_log(f"查询持仓失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
pos = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.LONG,
volume=row["qty"],
frozen=(float(row["qty"]) - float(row["can_sell_qty"])),
price=float(row["cost_price"]),
pnl=float(row["pl_val"]),
gateway_name=self.gateway_name,
)
self.on_position(pos)
def query_order(self):
""""""
code, data = self.trade_ctx.order_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询委托失败:{data}")
return
self.process_order(data)
self.write_log("委托查询成功")
def query_trade(self):
""""""
code, data = self.trade_ctx.deal_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询成交失败:{data}")
return
self.process_deal(data)
self.write_log("成交查询成功")
def close(self):
""""""
if self.quote_ctx:
self.quote_ctx.close()
if self.trade_ctx:
self.trade_ctx.close()
def get_tick(self, code):
"""
Get tick buffer.
"""
tick = self.ticks.get(code, None)
symbol, exchange = convert_symbol_futu2vt(code)
if not tick:
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
return tick
def process_quote(self, data):
"""报价推送"""
for ix, row in data.iterrows():
symbol = row["code"]
tick = self.get_tick(symbol)
date = row["data_date"].replace("-", "")
time = row["data_time"]
tick.datetime = datetime.strptime(
f"{date} {time}", "%Y%m%d %H:%M:%S")
tick.open_price = row["open_price"]
tick.high_price = row["high_price"]
tick.low_price = row["low_price"]
tick.pre_close = row["prev_close_price"]
tick.last_price = row["last_price"]
tick.volume = row["volume"]
if "price_spread" in row:
spread = row["price_spread"]
tick.limit_up = tick.last_price + spread * 10
tick.limit_down = tick.last_price - spread * 10
self.on_tick(copy(tick))
def process_orderbook(self, data):
""""""
symbol = data["code"]
tick = self.get_tick(symbol)
d = tick.__dict__
for i in range(5):
bid_data = data["Bid"][i]
ask_data = data["Ask"][i]
n = i + 1
d["bid_price_%s" % n] = bid_data[0]
d["bid_volume_%s" % n] = bid_data[1]
d["ask_price_%s" % n] = ask_data[0]
d["ask_volume_%s" % n] = ask_data[1]
if tick.datetime:
self.on_tick(copy(tick))
def process_order(self, data):
"""
Process order data for both query and update.
"""
for ix, row in data.iterrows():
# Ignore order with status DELETED
if row["order_status"] == OrderStatus.DELETED:
continue
symbol, exchange = convert_symbol_futu2vt(row["code"])
# 获取系统委托编号
sys_orderid = str(row["order_id"])
# 系统委托变化=》 缓存 order
order = self.order_manager.get_order_with_sys_orderid(sys_orderid)
if order is None:
# 本地委托 《=》系统委托号
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
# 创建本地order缓存
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=local_orderid,
sys_orderid=sys_orderid,
direction=DIRECTION_FUTU2VT[row["trd_side"]],
price=float(row["price"]),
volume=row["qty"],
traded=row["dealt_qty"],
status=STATUS_FUTU2VT[row["order_status"]],
time=row["create_time"].split(" ")[-1],
gateway_name=self.gateway_name,
)
self.write_log(f'新建委托单缓存=>{order.__dict__}')
self.order_manager.on_order(copy(order))
else:
# 缓存order存在,判断状态、成交数量是否发生变化
changed = False
order_status = STATUS_FUTU2VT[row["order_status"]]
if order.status != order_status:
order.status = order_status
changed = True
if order.traded != row["dealt_qty"]:
order.traded = row["dealt_qty"]
changed = True
if changed:
self.write_log(f'委托单更新=>{order.__dict__}')
self.order_manager.on_order(copy(order))
def process_deal(self, data):
"""
Process trade data for both query and update.
"""
for ix, row in data.iterrows():
# 系统委托编号
tradeid = str(row["deal_id"])
if tradeid in self.trades:
continue
self.trades.add(tradeid)
symbol, exchange = convert_symbol_futu2vt(row["code"])
# 系统委托号
sys_orderid = row["order_id"]
# 本地委托号
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
trade = TradeData(
symbol=symbol,
exchange=exchange,
direction=DIRECTION_FUTU2VT[row["trd_side"]],
tradeid=tradeid,
orderid=local_orderid,
sys_orderid=sys_orderid,
price=float(row["price"]),
volume=row["qty"],
time=row["create_time"].split(" ")[-1],
gateway_name=self.gateway_name,
)
self.on_trade(trade)
def convert_symbol_futu2vt(code):
"""
Convert symbol from futu to vt.
"""
code_list = code.split(".")
futu_exchange = code_list[0]
futu_symbol = ".".join(code_list[1:])
exchange = EXCHANGE_FUTU2VT[futu_exchange]
return futu_symbol, exchange
def convert_symbol_vt2futu(symbol, exchange):
"""
Convert symbol from vt to futu.
"""
futu_exchange = EXCHANGE_VT2FUTU[exchange]
return f"{futu_exchange}.{symbol}"
|
visualizer.py | import math
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .boundingbox import *
from .colormap import *
from .labellut import *
import time
class Model:
"""The class that helps build visualization models based on attributes, data, and methods."""
bounding_box_prefix = "Bounding Boxes/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the target location.
**Args:**
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g. "points",
# "colors"). So the tpointcloud exists for rendering and initially only
# contains the "points" array.
self.tclouds = {} # name -> tpointcloud
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.data_names.append(name)
"""Check if the data is loaded."""
def is_loaded(self, name):
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
"""If data is not loaded, then load the data."""
def load(self, name, fail_if_no_space=False):
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
"""Create a point cloud based on the data provided. The data should include name and points."""
def create_point_cloud(self, data):
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["points"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["points"] = Visualizer._make_tcloud_array(pts)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None:
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
"""Get an attribute from data based on the name passed."""
def get_attr(self, name, attr_name):
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
"""Get a shape from data based on the name passed."""
def get_attr_shape(self, name, attr_name):
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
"""Get the minimum and maximum for an attribute."""
def get_attr_minmax(self, attr_name, channel):
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
"""Get a list of attributes based on the name."""
def get_available_attrs(self, names):
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
"""Calculate the bounds for a pointcloud."""
def calc_bounds_for(self, name):
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["points"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
**Args:**
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
"""Load a pointcloud based on the name provided."""
def load(self, name, fail_if_no_space=False):
if self.is_loaded(name):
return
self.create_point_cloud(self._name2srcdata[name])
"""Unload a pointcloud."""
def unload(self, name):
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
**Args:**
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, split, indices):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
# 8192 * 1024 * 1024 = 315 sample, 8192MB
self._memory_limit = 8192 * 1024 * 1024 * 8 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self._dataset = dataset.get_split(split)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
path2idx = {}
for i in range(0, len(self._dataset.path_list)):
path2idx[self._dataset.path_list[i]] = i
real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
print("len real_indices = ", len(real_indices))
indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI" or "Rellis3D":
underscore_to_slash = True
print("underscore_to_slash set to True")
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print("[ERROR] Dataset split has no data")
"""Check if the data is loaded."""
def is_loaded(self, name):
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
"""If data is not loaded, then load the data."""
def load(self, name, fail_if_no_space=False):
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
self.create_point_cloud(data)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud):
pcloud_size = 0
for (attr, arr) in raw_data.items():
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["points"].num_elements() * 64
return pcloud_size
"""Unload the data (only if you have loaded it earlier)."""
def unload(self, name):
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label look-up-table)."""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
"""Clears the look-up table."""
def clear(self):
self.widget.clear()
self._label2color = {}
"""Checks if the look-up table is empty."""
def is_empty(self):
return len(self._label2color) == 0
"""Returns a list of label keys."""
def get_colors(self):
return [
self._label2color[label]
for label in sorted(self._label2color.keys())
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
"""Updates the labels based on look-up table passsed."""
def set_labels(self, labellut):
self.widget.clear()
root = self.widget.get_root_item()
for key in sorted(labellut.labels.keys()):
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of points."""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
"""Updates the colormap based on the minimum and maximum values passed."""
def update(self, colormap, min_val, max_val):
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""This class is used to manage the progress dialog displayed during visualization.
Initialize the class.
**Args:**
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
"""Set the label text on the dialog box."""
def set_text(self, text):
self._label.text = text + " "
"""Post updates to the main thread."""
def post_update(self, text=None):
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
"""Enumerate the progress in the dialog box."""
def update(self):
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 0.100
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
def _init_dataset(self, dataset, split, indices):
self._objects = DatasetModel(dataset, split, indices)
def _init_data(self, data):
self._objects = DataModel(data)
def _init_user_interface(self, title, width, height):
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
view_tab.add_tab("List", self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._play)
h.add_stretch()
v.add_child(h)
self._panel.add_child(model)
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
"""Set the LUT for a specific attribute.
**Args:**
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
def set_lut(self, attr_name, lut):
self._attrname2lut[attr_name] = lut
"""Set up camera for visualization"""
def setup_camera(self):
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
"""Show geometry for a given node."""
def show_geometries_under(self, name, show):
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["points"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.Material()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.Material()
mat.shader = "unlitLine"
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, theme):
frame = self.window.content_rect
em = theme.font_size
panel_width = 20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 1:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.RAINBOW_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
split,
indices=None,
width=1024,
height=768):
"""Visualize a dataset.
**Example:**
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
**Args:**
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
lut = LabelLUT()
if dataset.__class__.__name__ == "SemanticKITTI":
# for val in sorted(dataset.label_to_names.values()):
# lut.add_label(val, val)
for i, val in enumerate(dataset.label_to_names.values()):
label_idx = dataset.label_indices[i]
lut.add_label(val, label_idx)
elif dataset.__class__.__name__ == "Rellis3D":
for i in range(len(dataset.label_to_names)):
label_idx = dataset.label_indices[i]
label_name = dataset.label_to_names.get(label_idx)
#label_colour = dataset.colour_map.get(label_idx)
label_colour = [colour/255. for colour in dataset.colour_map.get(label_idx)]
label_colour = label_colour[:: -1] #BGR to RGB
#print("label_idx = ", label_idx)
#print("label_name = ", label_name)
#print("label_colour = ", label_colour)
lut.add_label(label_name, label_idx, label_colour)
#print("\n\n")
# for i, val in enumerate(dataset.label_to_names.values()):
# label_idx = dataset.label_indices[i]
# lut.add_label(val, label_idx)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1024,
height=768):
"""Visualize a custom point cloud data.
**Example:**
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
**Args:**
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
self._visualize("Open3D", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = False
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
|
visualizer.py | from subprocess import call
from xml.dom import minidom
import xml.etree.ElementTree as ET
import ast
import LCD_1in44
import LCD_1in3
import LCD_Config
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageColor
import RPi.GPIO as GPIO
import time
import random
import webcolors as wc
import sys
import os
import datetime
import psutil
import fcntl
os.chdir(sys.path[0])
import mido
from mido import MidiFile, Message, tempo2bpm, MidiTrack,MetaMessage
from mido.sockets import PortServer
from neopixel import *
import argparse
import threading
# Ensure there is only one instance of the script running.
fh=0
def singleton():
global fh
fh=open(os.path.realpath(__file__),'r')
try:
fcntl.flock(fh,fcntl.LOCK_EX|fcntl.LOCK_NB)
except:
sys.exit(0)
def restart_script():
python = sys.executable
os.execl(python, python, *sys.argv)
singleton()
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
parser.add_argument('-d', '--display', type=str, help="choose type of display: '1in44' (default) | '1in3'")
args = parser.parse_args()
class Server:
def __init__(self):
self.port_server = PortServer('0.0.0.0', 8080)
self.port_clients = []
self.pending_messages = []
def server_loop(self):
# Remove closed clients
self.port_clients = [c for c in self.port_clients if not c.closed]
# Handle connections.
client = self.port_server.accept(block=False)
if client:
print('Connection from {}'.format(client.name))
self.port_clients.append(client)
# Receive messages.
for client in self.port_clients:
if client.closed:
continue
try:
for message in client.iter_pending():
self.pending_messages.append(message)
print('Received {} from {}'.format(message, client))
except:
pass
def iter_pending(self):
msgs = self.pending_messages
self.pending_messages = []
return msgs
def poll(self):
if len(self.pending_messages) > 0:
return self.pending_messages[0]
def send(self, msg):
for client in self.port_clients:
if client.closed:
continue
try:
client.send(msg)
except:
pass
server = Server()
class UserSettings:
def __init__(self):
self.pending_changes = False
try:
self.tree = ET.parse("settings.xml")
self.root = self.tree.getroot()
except:
print("Can't load settings file, restoring defaults")
self.reset_to_default()
self.pending_reset = False
def get_setting_value(self, name):
value = self.root.find(name).text
return value
def change_setting_value(self, name, value):
self.root.find(str(name)).text = str(value)
self.pending_changes = True
def save_changes(self):
if(self.pending_changes == True):
self.pending_changes = False
self.tree.write("settings.xml")
self.tree = ET.parse("settings.xml")
self.root = self.tree.getroot()
def reset_to_default(self):
self.tree = ET.parse("default_settings.xml")
self.tree.write("settings.xml")
self.root = self.tree.getroot()
self.pending_reset = True
class LedStrip:
def __init__(self):
self.brightness_percent = int(usersettings.get_setting_value("brightness_percent"))
self.led_number = int(usersettings.get_setting_value("led_count"))
self.invert = int(usersettings.get_setting_value("led_invert"))
self.shift = int(usersettings.get_setting_value("shift"))
self.brightness = 255 * self.brightness_percent / 100
self.keylist = [0] * self.led_number
self.keylist_status = [0] * self.led_number
self.keylist_color = [0] * self.led_number
# LED strip configuration:
self.LED_COUNT = int(self.led_number) # Number of LED pixels.
self.LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
self.LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
self.LED_DMA = 10 # DMA channel to use for generating signal (try 10)
self.LED_BRIGHTNESS = int(self.brightness) # Set to 0 for darkest and 255 for brightest
self.LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
self.LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
parser.add_argument('-d', '--display', type=str, help="choose type of display: '1in44' (default) | '1in3'")
args = parser.parse_args()
# Create NeoPixel object with appropriate configuration.
self.strip = Adafruit_NeoPixel(self.LED_COUNT, self.LED_PIN, self.LED_FREQ_HZ, self.LED_DMA, self.LED_INVERT, self.LED_BRIGHTNESS, self.LED_CHANNEL)
# Intialize the library (must be called once before other functions).
self.strip.begin()
def change_brightness(self, value):
self.brightness_percent += value
if(self.brightness_percent <= 0):
self.brightness_percent = 1
elif(self.brightness_percent > 100):
self.brightness_percent = 100
self.brightness = 255 * self.brightness_percent / 100
usersettings.change_setting_value("brightness_percent", self.brightness_percent)
if(menu.screensaver_is_running == True):
menu.screensaver_is_running = False
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
parser.add_argument('-d', '--display', type=str, help="choose type of display: '1in44' (default) | '1in3'")
args = parser.parse_args()
self.strip = Adafruit_NeoPixel(self.LED_COUNT, self.LED_PIN, self.LED_FREQ_HZ, self.LED_DMA, self.LED_INVERT, int(self.brightness), self.LED_CHANNEL)
# Intialize the library (must be called once before other functions).
self.strip.begin()
fastColorWipe(ledstrip.strip, True)
def change_led_count(self, value):
self.led_number += value
if(self.led_number <= 0):
self.led_number = 1
usersettings.change_setting_value("led_count", self.led_number)
self.keylist = [0] * self.led_number
self.keylist_status = [0] * self.led_number
self.keylist_color = [0] * self.led_number
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
parser.add_argument('-d', '--display', type=str, help="choose type of display: '1in44' (default) | '1in3'")
args = parser.parse_args()
self.strip = Adafruit_NeoPixel(int(self.led_number), self.LED_PIN, self.LED_FREQ_HZ, self.LED_DMA, self.LED_INVERT, int(self.brightness), self.LED_CHANNEL)
# Intialize the library (must be called once before other functions).
self.strip.begin()
fastColorWipe(ledstrip.strip, True)
def change_shift(self, value):
self.shift += value
usersettings.change_setting_value("shift", self.shift)
fastColorWipe(ledstrip.strip, True)
def set_adjacent_colors(self, note, color, led_turn_off):
if(ledsettings.adjacent_mode == "RGB" and color != 0 and led_turn_off != True):
color = Color(int(ledsettings.adjacent_green), int(ledsettings.adjacent_red), int(ledsettings.adjacent_blue))
if(ledsettings.adjacent_mode != "Off"):
self.strip.setPixelColor(int(note)+1, color)
self.strip.setPixelColor(int(note)-1, color)
KEYRIGHT = 26
KEYLEFT = 5
KEYUP = 6
KEYDOWN = 19
KEY1 = 21
KEY2 = 20
KEY3 = 16
JPRESS = 13
BACKLIGHT = 24
# pin numbers are interpreted as BCM pin numbers.
GPIO.setmode(GPIO.BCM)
# Sets the pin as input and sets Pull-up mode for the pin.
GPIO.setup(KEYRIGHT,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEYLEFT,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEYUP,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEYDOWN,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEY1,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEY2,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(KEY3,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(JPRESS,GPIO.IN,GPIO.PUD_UP)
#LED animations
def fastColorWipe(strip, update):
red = int(ledsettings.get_backlight_color("Red"))* (ledsettings.backlight_brightness_percent) / 100
green = int(ledsettings.get_backlight_color("Green")) * (ledsettings.backlight_brightness_percent) / 100
blue = int(ledsettings.get_backlight_color("Blue")) * float(ledsettings.backlight_brightness_percent) / 100
color = Color(int(green),int(red),int(blue))
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
if(update == True):
strip.show()
def theaterChase(strip, color, wait_ms=50):
"""Movie theater light style chaser animation."""
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while (menu.screensaver_is_running == True):
red = int(ledsettings.get_color("Red"))
green = int(ledsettings.get_color("Green"))
blue = int(ledsettings.get_color("Blue"))
for q in range(5):
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i+q, Color(green, red, blue))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i+q, 0)
j += 1
if(j > 256):
j = 0
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20):
"""Draw rainbow that fades across all pixels at once."""
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while (menu.screensaver_is_running == True):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((j) & 255))
j += 1
if(j >= 256):
j = 0
strip.show()
time.sleep(wait_ms/1000.0)
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def rainbowCycle(strip, wait_ms=20):
"""Draw rainbow that uniformly distributes itself across all pixels."""
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while (menu.screensaver_is_running == True):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
j += 1
if(j >= 256):
j = 0
strip.show()
time.sleep(wait_ms/1000.0)
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def theaterChaseRainbow(strip, wait_ms=10):
"""Rainbow movie theater light style chaser animation."""
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
j = 0
menu.screensaver_is_running = True
while (menu.screensaver_is_running == True):
for q in range(5):
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 5):
strip.setPixelColor(i+q, 0)
j += 1
if(j > 256):
j = 0
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def breathing(strip, wait_ms=2):
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
multiplier = 24
direction = 2
while (menu.screensaver_is_running == True):
if(multiplier >= 98 or multiplier < 24):
direction *= -1
multiplier += direction
divide = multiplier / float(100)
red = int(round(float(ledsettings.get_color("Red")) * float(divide)))
green = int(round(float(ledsettings.get_color("Green")) * float(divide)))
blue = int(round(float(ledsettings.get_color("Blue")) * float(divide)))
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(green, red, blue))
strip.show()
if(wait_ms > 0):
time.sleep(wait_ms/1000.0)
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def sound_of_da_police(strip, wait_ms=5):
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
middle = strip.numPixels() / 2
r_start = 0
l_start = 196
while (menu.screensaver_is_running == True):
r_start += 14
l_start -= 14
for i in range(strip.numPixels()):
if((i > middle) and i > (r_start) and i < (r_start + 40)):
strip.setPixelColor(i, Color(0, 255, 0))
elif((i < middle) and i < (l_start) and i > (l_start - 40)):
strip.setPixelColor(i, Color(0, 0, 255))
else:
strip.setPixelColor(i, Color(0, 0, 0))
if(r_start > 150):
r_start = 0
l_start = 175
strip.show()
time.sleep(wait_ms/1000.0)
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def scanner(strip, wait_ms=1):
if (menu.screensaver_is_running == True):
menu.screensaver_is_running = False
time.sleep(1)
fastColorWipe(ledstrip.strip, True)
menu.t = threading.currentThread()
menu.screensaver_is_running = True
position = 0
direction = 3
scanner_length = 20
red_fixed = ledsettings.get_color("Red")
green_fixed = ledsettings.get_color("Green")
blue_fixed = ledsettings.get_color("Blue")
while (menu.screensaver_is_running == True):
position += direction
for i in range(strip.numPixels()):
if(i > (position - (scanner_length)) and i < (position + (scanner_length))):
distance_from_position = position - i
if(distance_from_position < 0):
distance_from_position *= -1
divide = ((scanner_length / 2) - distance_from_position) / float(scanner_length / 2)
red = int(float(red_fixed) * float(divide))
green = int(float(green_fixed) * float(divide))
blue = int(float(blue_fixed) * float(divide))
if(divide > 0):
strip.setPixelColor(i, Color(green, red, blue))
else:
strip.setPixelColor(i, Color(0, 0, 0))
if(position >= strip.numPixels() or position <= 1):
direction *= -1
strip.show()
time.sleep(wait_ms/1000.0)
menu.screensaver_is_running = False
fastColorWipe(ledstrip.strip, True)
def get_rainbow_colors(pos, color):
pos = int(pos)
if pos < 85:
if(color == "green"):
return pos * 3
elif(color == "red"):
return 255 - pos * 3
elif(color == "blue"):
return 0
elif pos < 170:
pos -= 85
if(color == "green"):
return 255 - pos * 3
elif(color == "red"):
return 0
elif(color == "blue"):
return pos * 3
else:
pos -= 170
if(color == "green"):
return 0
elif(color == "red"):
return pos * 3
elif(color == "blue"):
return 255 - pos * 3
class MenuLCD:
def __init__(self, xml_file_name):
if args.display == '1in3':
self.LCD = LCD_1in3.LCD()
self.font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', self.scale(10))
else:
self.LCD = LCD_1in44.LCD()
self.font = ImageFont.load_default()
self.LCD.LCD_Init()
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), "GREEN")
self.draw = ImageDraw.Draw(self.image)
self.LCD.LCD_ShowImage(self.image,0,0)
self.xml_file_name = xml_file_name
self.DOMTree = minidom.parse(xml_file_name)
self.currentlocation = "menu";
self.scroll_hold = 0
self.cut_count = 0
self.pointer_position = 0
self.background_color = usersettings.get_setting_value("background_color")
self.text_color = usersettings.get_setting_value("text_color")
self.update_songs()
self.update_ports()
self.speed_multiplier = 1
self.screensaver_settings = dict()
self.screensaver_settings['time'] = usersettings.get_setting_value("time")
self.screensaver_settings['date'] = usersettings.get_setting_value("date")
self.screensaver_settings['cpu_chart'] = usersettings.get_setting_value("cpu_chart")
self.screensaver_settings['cpu'] = usersettings.get_setting_value("cpu")
self.screensaver_settings['ram'] = usersettings.get_setting_value("ram")
self.screensaver_settings['temp'] = usersettings.get_setting_value("temp")
self.screensaver_settings['network_usage'] = usersettings.get_setting_value("network_usage")
self.screensaver_settings['sd_card_space'] = usersettings.get_setting_value("sd_card_space")
self.screensaver_delay = usersettings.get_setting_value("screensaver_delay")
self.screen_off_delay = usersettings.get_setting_value("screen_off_delay")
self.led_animation_delay = usersettings.get_setting_value("led_animation_delay")
self.led_animation = usersettings.get_setting_value("led_animation")
self.screen_status = 1
self.screensaver_is_running = False
def toggle_screensaver_settings(self, setting):
setting = setting.lower()
setting = setting.replace(" ", "_")
if(str(self.screensaver_settings[setting]) == "1"):
usersettings.change_setting_value(setting, "0")
self.screensaver_settings[setting] = "0"
else:
usersettings.change_setting_value(setting, "1")
self.screensaver_settings[setting] = "1"
def update_songs(self):
songs_list = os.listdir("Songs")
self.DOMTree = minidom.parse(self.xml_file_name)
for song in songs_list:
element = self.DOMTree.createElement("Choose_song")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , song)
mc = self.DOMTree.getElementsByTagName("Play_MIDI")[0]
mc.appendChild(element)
def update_sequence_list(self):
try:
sequences_tree = minidom.parse("sequences.xml")
self.update_songs()
i = 0
while(True):
try:
i += 1
sequence_name = sequences_tree.getElementsByTagName("sequence_"+str(i))[0].getElementsByTagName("sequence_name")[0].firstChild.nodeValue
element = self.DOMTree.createElement("Sequences")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , str(sequence_name))
mc = self.DOMTree.getElementsByTagName("LED_Strip_Settings")[0]
mc.appendChild(element)
except:
break
except:
self.render_message("Something went wrong", "Check your sequences file", 1500)
def update_ports(self):
ports = mido.get_input_names()
self.update_sequence_list()
for port in ports:
element = self.DOMTree.createElement("Input")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , port)
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Playback")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , port)
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[2]
mc.appendChild(element)
def update_multicolor(self, colors_list):
i = 0
self.update_ports()
rgb_names = []
rgb_names = ["Red", "Green", "Blue"]
for color in colors_list:
i = i + 1
element = self.DOMTree.createElement("Multicolor")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "Color"+str(i))
mc = self.DOMTree.getElementsByTagName("LED_Color")[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Color"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "RGB Color"+str(i))
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
#adding key range to menu
element = self.DOMTree.createElement("Color"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "Key range"+str(i))
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Key_range"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "Start")
mc = self.DOMTree.getElementsByTagName("Color"+str(i))[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Key_range"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "End")
mc = self.DOMTree.getElementsByTagName("Color"+str(i))[0]
mc.appendChild(element)
#adding delete
element = self.DOMTree.createElement("Color"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , "Delete")
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
for rgb_name in rgb_names:
element = self.DOMTree.createElement("RGB_Color"+str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text" , rgb_name)
mc = self.DOMTree.getElementsByTagName("Color"+str(i))[0]
mc.appendChild(element)
def scale(self, size):
return int(round(size * self.LCD.font_scale))
def show(self, position = "default", back_pointer_location=False):
if(position == "default" and self.currentlocation):
position = self.currentlocation
refresh = 1
elif(position == "default" and not self.currentlocation):
position = "menu"
refresh = 1
else:
position = position.replace(" ", "_")
self.currentlocation = position
refresh = 0
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
self.draw.text((self.scale(2), self.scale(5)), position.replace("_", " "), fill=self.text_color, font=self.font)
#getting list of items in current menu
staffs = self.DOMTree.getElementsByTagName(position)
text_margin_top = self.scale(15)
i = 0
list_count = len(staffs)
list_count -= 1
if(self.pointer_position > 9):
self.menu_offset = self.pointer_position - 9
else:
self.menu_offset = -1
#looping through menu list
for staff in staffs:
if(self.pointer_position > list_count):
self.pointer_position = list_count
elif(self.pointer_position < 0):
self.pointer_position = 0
#drawing little arrow to show there are more items above
if(self.pointer_position > 9 and i < self.menu_offset):
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(125), self.scale(20))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(122), self.scale(17))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(122), self.scale(17))
],
fill=self.text_color,
width=(self.scale(2))
)
i += 1
continue
sid = staff.getAttribute("text")
if(not back_pointer_location):
if(i == self.pointer_position):
try:
self.parent_menu = staff.parentNode.tagName
except:
self.parent_menu = "end"
self.draw.rectangle(
[
(0, text_margin_top),
(self.LCD.width, text_margin_top + self.scale(11))
],
fill="Crimson"
)
self.draw.text((self.scale(3), text_margin_top), ">", fill=self.text_color, font=self.font)
self.current_choice = sid
else:
if(sid == back_pointer_location):
try:
self.parent_menu = staff.parentNode.tagName
except:
self.parent_menu = "data"
self.draw.rectangle([(0, text_margin_top), (self.LCD.width, text_margin_top + self.scale(11))], fill="Crimson")
self.draw.text((self.scale(3), text_margin_top), ">", fill=self.text_color, font=self.font)
self.current_choice = sid
self.pointer_position = i
#drawing little arrow to show there are more items below
if(i == 10 and self.pointer_position < list_count and list_count > 10):
self.draw.line(
[
(self.scale(119), self.scale(120)),
(self.scale(125), self.scale(120))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(120)),
(self.scale(122), self.scale(123))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(122), self.scale(123)),
(self.scale(125), self.scale(120))
],
fill=self.text_color,
width=(self.scale(2))
)
#scrolling text if too long
if(self.pointer_position == i and len(sid) > 18):
tobecontinued = ".."
if(refresh == 1):
try:
self.cut_count += 1
except:
self.cut_count = -6
else:
cut = 0
self.cut_count = -6
if(self.cut_count > (len(sid) - 16)):
#hold scrolling on end
if(self.scroll_hold < 8):
self.cut_count -= 1
self.scroll_hold += 1
tobecontinued = ""
else:
self.cut_count = -6
self.scroll_hold = 0
cut = self.cut_count
if(self.cut_count >= 0):
cut = self.cut_count
else:
cut = 0
else:
cut = 0
tobecontinued = ""
i += 1
#diplaying screensaver status
if(self.currentlocation == "Content"):
sid_temp = sid.lower()
sid_temp = sid_temp.replace(" ", "_")
if(str(menu.screensaver_settings[sid_temp]) == "1"):
sid_temp = " +"
else:
sid_temp = " -"
sid = sid+sid_temp
self.draw.text((self.scale(10), text_margin_top), sid[cut:(18 + cut)]+tobecontinued, fill=self.text_color, font=self.font)
text_margin_top += self.scale(10)
#displaying color example
if(self.currentlocation == "RGB"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.get_colors()), fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width,self.LCD.height)],fill="rgb("+str(ledsettings.get_colors())+")")
if("RGB_Color" in self.currentlocation):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.get_multicolors(self.currentlocation.replace('RGB_Color',''))), fill = self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width,self.LCD.height)], fill="rgb("+str(ledsettings.get_multicolors(self.currentlocation.replace('RGB_Color','')))+")")
if("Backlight_Color" in self.currentlocation):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.get_backlight_colors()), fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0),self.scale(80)),(self.LCD.width,self.LCD.height)], fill="rgb("+str(ledsettings.get_backlight_colors())+")")
if("Custom_RGB" in self.currentlocation):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.get_adjacent_colors()), fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)),(self.LCD.width,self.LCD.height)], fill="rgb("+str(ledsettings.get_adjacent_colors())+")")
if("Multicolor" in self.currentlocation):
try:
self.draw.rectangle([(self.scale(115), self.scale(50)), (self.LCD.width, self.scale(80))], fill="rgb("+str(ledsettings.get_multicolors(self.current_choice.replace('Color','')))+")")
except:
pass
if("Color_for_slow_speed" in self.currentlocation):
red = ledsettings.speed_slowest["red"]
green = ledsettings.speed_slowest["green"]
blue = ledsettings.speed_slowest["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red)+", "+str(green)+", "+str(blue), fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)),(self.LCD.width,self.LCD.height)], fill="rgb("+str(red)+", "+str(green)+", "+str(blue)+")")
if("Color_for_fast_speed" in self.currentlocation):
red = ledsettings.speed_fastest["red"]
green = ledsettings.speed_fastest["green"]
blue = ledsettings.speed_fastest["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red)+", "+str(green)+", "+str(blue), fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)),(self.LCD.width,self.LCD.height)], fill="rgb("+str(red)+", "+str(green)+", "+str(blue)+")")
#displaying rainbow offset value
if(self.current_choice == "Offset"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.rainbow_offset), fill=self.text_color, font=self.font)
if(self.current_choice == "Scale"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.rainbow_scale)+"%", fill=self.text_color, font=self.font)
if(self.current_choice == "Timeshift"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.rainbow_timeshift), fill=self.text_color, font=self.font)
#displaying brightness value
if(self.currentlocation == "Brightness"):
self.draw.text((self.scale(10), self.scale(35)), str(ledstrip.brightness_percent)+"%", fill=self.text_color, font=self.font)
miliamps = int(ledstrip.LED_COUNT) * (60 / (100 / float(ledstrip.brightness_percent)))
amps = round(float(miliamps) / float(1000),2)
self.draw.text((self.scale(10), self.scale(50)), "Amps needed to "+"\n"+"power "+str(ledstrip.LED_COUNT)+" LEDS with "+"\n"+"white color: "+str(amps), fill=self.text_color, font=self.font)
if(self.currentlocation == "Backlight_Brightness"):
self.draw.text((self.scale(10), self.scale(35)), str(ledsettings.backlight_brightness_percent)+"%", fill=self.text_color, font=self.font)
#displaying led count
if(self.currentlocation == "Led_count"):
self.draw.text((self.scale(10), self.scale(35)), str(ledstrip.led_number), fill=self.text_color, font=self.font)
#displaying shift
if(self.currentlocation == "Shift"):
self.draw.text((self.scale(10), self.scale(35)), str(ledstrip.shift), fill=self.text_color, font=self.font)
if("Key_range" in self.currentlocation):
if(self.current_choice == "Start"):
try:
self.draw.text((self.scale(10), self.scale(50)), str(ledsettings.multicolor_range[int(self.currentlocation.replace('Key_range',''))-1][0]), fill = self.text_color, font=self.font)
except:
pass
else:
self.draw.text((self.scale(10), self.scale(50)), str(ledsettings.multicolor_range[int(self.currentlocation.replace('Key_range',''))-1][1]), fill = self.text_color, font=self.font)
#displaying screensaver settings
if(self.currentlocation == "Start_delay"):
self.draw.text((self.scale(10), self.scale(70)), str(self.screensaver_delay), fill=self.text_color, font=self.font)
if(self.currentlocation == "Turn_off_screen_delay"):
self.draw.text((self.scale(10), self.scale(70)), str(self.screen_off_delay), fill = self.text_color, font=self.font)
if(self.currentlocation == "Led_animation_delay"):
self.draw.text((self.scale(10), self.scale(70)), str(self.led_animation_delay), fill = self.text_color, font=self.font)
#displaying speed values
if(self.currentlocation == "Period"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.speed_period_in_seconds), fill = self.text_color, font=self.font)
if(self.currentlocation == "Max_notes_in_period"):
self.draw.text((self.scale(10), self.scale(70)), str(ledsettings.speed_max_notes), fill = self.text_color, font=self.font)
self.LCD.LCD_ShowImage(self.image,0,0)
def change_pointer(self, direction):
if(direction == 0):
self.pointer_position -= 1
elif(direction == 1):
self.pointer_position += 1
menu.cut_count = -6
menu.show()
def enter_menu(self):
position = self.current_choice.replace(" ", "_")
if(not self.DOMTree.getElementsByTagName(position)):
menu.change_settings(self.current_choice, self.currentlocation)
else:
self.currentlocation = self.current_choice
self.pointer_position = 0
menu.cut_count = -6
menu.show(self.current_choice)
def go_back(self):
if(self.parent_menu != "data"):
location_readable = self.currentlocation.replace("_", " ")
menu.cut_count = -6
menu.show(self.parent_menu, location_readable)
def render_message(self, title, message, delay = 500):
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
self.draw.text((self.scale(3), self.scale(55)), title, fill=self.text_color, font=self.font)
self.draw.text((self.scale(3), self.scale(65)), message, fill=self.text_color, font=self.font)
self.LCD.LCD_ShowImage(self.image,0,0)
LCD_Config.Driver_Delay_ms(delay)
def render_screensaver(self, hour, date, cpu, cpu_average, ram, temp, cpu_history = [], upload = 0, download = 0, card_space = 0):
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
total_height = self.scale(1)
info_count = 0
for key, value in menu.screensaver_settings.items():
if(str(key) == "time" and str(value) == "1"):
total_height += self.scale(31)
elif(str(key) == "date" and str(value) == "1"):
total_height += self.scale(13)
elif(str(key) == "cpu_chart" and str(value) == "1"):
total_height += self.scale(35)
else:
if(str(value) == "1"):
info_count += 1
height_left = self.LCD.height - total_height
if(info_count > 0):
info_height_font = height_left / info_count
else:
info_height_font = 0
top_offset = self.scale(2)
if(menu.screensaver_settings["time"] == "1"):
fonthour = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', self.scale(31))
self.draw.text((self.scale(4), top_offset), hour, fill=self.text_color, font=fonthour)
top_offset += self.scale(31)
if(menu.screensaver_settings["date"] == "1"):
font_date = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', self.scale(13))
self.draw.text((self.scale(34), top_offset), date, fill=self.text_color, font=font_date)
top_offset += self.scale(13)
if(menu.screensaver_settings["cpu_chart"] == "1"):
previous_height = 0
c = self.scale(-5)
for cpu_chart in cpu_history:
height = self.scale(((100 - cpu_chart) * 35) / float(100))
self.draw.line([(c, top_offset+previous_height), (c+self.scale(5), top_offset+height)], fill="Red", width=self.scale(1))
previous_height = height
c += self.scale(5)
top_offset += self.scale(35)
if(info_height_font > self.scale(12)):
info_height_font = self.scale(12)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', info_height_font)
if(menu.screensaver_settings["cpu"] == "1"):
self.draw.text((self.scale(1), top_offset), "CPU: "+str(cpu)+"% ("+str(cpu_average)+"%)", fill = self.text_color, font=font)
top_offset += info_height_font
if(menu.screensaver_settings["ram"] == "1"):
self.draw.text((self.scale(1), top_offset), "RAM usage: "+str(ram)+"%", fill = self.text_color, font=font)
top_offset += info_height_font
if(menu.screensaver_settings["temp"] == "1"):
self.draw.text((self.scale(1), top_offset), "Temp: "+str(temp)+" C", fill = self.text_color, font=font)
top_offset += info_height_font
if(menu.screensaver_settings["network_usage"] == "1"):
if(info_height_font > self.scale(11)):
info_height_font_network = self.scale(11)
else:
info_height_font_network = info_height_font
font_network = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', info_height_font_network)
self.draw.text((self.scale(1), top_offset), "D:"+str("{:.2f}".format(download))+"Mb/s U:"+str("{:.2f}".format(upload))+"Mb/s", fill = self.text_color, font=font_network)
top_offset += info_height_font_network
if(menu.screensaver_settings["sd_card_space"] == "1"):
self.draw.text((self.scale(1), top_offset), "SD: "+str(round(card_space.used/(1024.0 ** 3), 1))+"/"+str(round(card_space.total/(1024.0 ** 3), 1))+"("+str(card_space.percent)+"%)", fill = self.text_color, font=font)
top_offset += info_height_font
self.LCD.LCD_ShowImage(self.image,0,0)
def change_settings(self, choice, location):
if(location == "Text_Color"):
self.text_color = choice
usersettings.change_setting_value("text_color", self.text_color)
if(location == "Background_Color"):
self.background_color = choice
usersettings.change_setting_value("background_color", self.background_color)
if(self.text_color == self.background_color):
self.text_color = "Red"
usersettings.change_setting_value("text_color", self.text_color)
if(location == "Choose_song"):
saving.t = threading.Thread(target=play_midi, args=(choice,))
saving.t.start()
if(location == "Play_MIDI"):
if(choice == "Save MIDI"):
now = datetime.datetime.now()
current_date = now.strftime("%Y-%m-%d %H:%M")
menu.render_message("Recording stopped", "Saved as "+current_date, 2000)
saving.save(current_date)
menu.update_songs()
if(choice =="Start recording"):
menu.render_message("Recording started", "", 2000)
saving.start_recording()
if(choice == "Cancel recording"):
menu.render_message("Recording canceled", "", 2000)
saving.cancel_recording()
if(choice == "Stop playing"):
saving.is_playing_midi.clear()
menu.render_message("Playing stopped", "", 2000)
fastColorWipe(ledstrip.strip, True)
if(location == "Solid"):
ledsettings.change_color_name(wc.name_to_rgb(choice))
ledsettings.color_mode = "Single"
usersettings.change_setting_value("color_mode", ledsettings.color_mode)
if(location == "Fading"):
ledsettings.mode = "Fading"
usersettings.change_setting_value("mode", ledsettings.mode)
if (choice == "Very fast"):
ledsettings.fadingspeed = 50
elif(choice == "Fast"):
ledsettings.fadingspeed = 40
elif(choice == "Medium"):
ledsettings.fadingspeed = 20
elif(choice == "Slow"):
ledsettings.fadingspeed = 10
elif(choice == "Very slow"):
ledsettings.fadingspeed = 2
usersettings.change_setting_value("fadingspeed", ledsettings.fadingspeed)
if(location == "Velocity"):
ledsettings.mode = "Velocity"
usersettings.change_setting_value("mode", ledsettings.mode)
if(choice == "Fast"):
ledsettings.fadingspeed = 10
elif(choice == "Medium"):
ledsettings.fadingspeed = 8
elif(choice == "Slow"):
ledsettings.fadingspeed = 6
elif(choice == "Very slow"):
ledsettings.fadingspeed = 3
usersettings.change_setting_value("fadingspeed", ledsettings.fadingspeed)
if(location == "Light_mode"):
ledsettings.mode = "Normal"
usersettings.change_setting_value("mode", ledsettings.mode)
fastColorWipe(ledstrip.strip, True)
if(location == "Input"):
midiports.change_port("inport", choice)
if(location == "Playback"):
midiports.change_port("playport", choice)
if(location == "Ports_Settings"):
if(choice == "Refresh ports" or choice == "Input" or choice == "Playback"):
menu.update_ports()
if(choice == "Reset Bluetooth service"):
menu.render_message("Reseting BL service", "", 1000)
os.system("sudo systemctl restart btmidi.service")
if(location == "LED_animations"):
if(choice == "Theater Chase"):
self.t = threading.Thread(target=theaterChase, args=(ledstrip.strip, Color(127, 127, 127)))
self.t.start()
if(choice == "Theater Chase Rainbow"):
self.t = threading.Thread(target=theaterChaseRainbow, args=(ledstrip.strip, 5))
self.t.start()
if(choice == "Sound of da police"):
self.t = threading.Thread(target=sound_of_da_police, args=(ledstrip.strip, 1))
self.t.start()
if(choice == "Scanner"):
self.t = threading.Thread(target=scanner, args=(ledstrip.strip, 1))
self.t.start()
if(choice == "Clear"):
fastColorWipe(ledstrip.strip, True)
if(location == "Breathing"):
if(choice == "Fast"):
self.t = threading.Thread(target=breathing, args=(ledstrip.strip,5))
self.t.start()
if(choice == "Medium"):
self.t = threading.Thread(target=breathing, args=(ledstrip.strip,10))
self.t.start()
if(choice == "Slow"):
self.t = threading.Thread(target=breathing, args=(ledstrip.strip,25))
self.t.start()
if(location == "Rainbow"):
if(choice == "Fast"):
self.t = threading.Thread(target=rainbow, args=(ledstrip.strip,2))
self.t.start()
if(choice == "Medium"):
self.t = threading.Thread(target=rainbow, args=(ledstrip.strip,20))
self.t.start()
if(choice == "Slow"):
self.t = threading.Thread(target=rainbow, args=(ledstrip.strip,50))
self.t.start()
if(location == "Rainbow_Cycle"):
if(choice == "Fast"):
self.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip,1))
self.t.start()
if(choice == "Medium"):
self.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip,20))
self.t.start()
if(choice == "Slow"):
self.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip,50))
self.t.start()
if(location == "LED_animations"):
if(choice == "Stop animation"):
self.screensaver_is_running = False
if(location == "Other_Settings"):
if(choice == "System Info"):
screensaver()
if(location == "Rainbow_Colors"):
ledsettings.color_mode = "Rainbow"
usersettings.change_setting_value("color_mode", ledsettings.color_mode)
if(choice == "Add Color"):
ledsettings.addcolor()
if(choice == "Delete"):
ledsettings.deletecolor(location.replace('Color',''))
if(location == "Multicolor" and choice == "Confirm"):
ledsettings.color_mode = "Multicolor"
usersettings.change_setting_value("color_mode", ledsettings.color_mode)
if(location == "Speed" and choice == "Confirm"):
ledsettings.color_mode = "Speed"
usersettings.change_setting_value("color_mode", ledsettings.color_mode)
if(location == "Sequences"):
if(choice == "Update"):
refresh_result = menu.update_sequence_list()
if(refresh_result == False):
menu.render_message("Something went wrong", "Make sure your sequence file is correct", 1500)
else:
ledsettings.set_sequence(self.pointer_position, 0)
if(location == "Sides_Color"):
if(choice == "Custom RGB"):
ledsettings.adjacent_mode = "RGB"
if(choice == "Same as main"):
ledsettings.adjacent_mode = "Main"
if(choice == "Off"):
ledsettings.adjacent_mode = "Off"
usersettings.change_setting_value("adjacent_mode", ledsettings.adjacent_mode)
if(location == "Reset_to_default_settings"):
if(choice == "Confirm"):
usersettings.reset_to_default()
else:
self.go_back()
if (location == "Shutdown"):
if (choice == "Confirm"):
menu.render_message("", "Shutting down...", 5000)
call("sudo shutdown -h now", shell=True)
else:
self.go_back()
if (location == "Reboot"):
if (choice == "Confirm"):
menu.render_message("", "Rebooting...", 5000)
call("sudo reboot now", shell=True)
else:
self.go_back()
if (location == "Skipped_notes"):
ledsettings.skipped_notes = choice
usersettings.change_setting_value("skipped_notes", ledsettings.skipped_notes)
if (location == "Content"):
menu.toggle_screensaver_settings(choice)
if (location == "Led_animation"):
menu.led_animation = choice
usersettings.change_setting_value("led_animation", choice)
def change_value(self, value):
if(value == "LEFT"):
value = -1
elif(value == "RIGHT"):
value = 1
if(self.currentlocation == "Brightness"):
ledstrip.change_brightness(value*self.speed_multiplier)
if(self.currentlocation == "Led_count"):
ledstrip.change_led_count(value)
if(self.currentlocation == "Shift"):
ledstrip.change_shift(value)
if(self.currentlocation == "Backlight_Brightness"):
if(self.current_choice == "Power"):
ledsettings.change_backlight_brightness(value*self.speed_multiplier)
if(self.currentlocation == "Backlight_Color"):
ledsettings.change_backlight_color(self.current_choice, value*self.speed_multiplier)
if(self.currentlocation == "Custom_RGB"):
ledsettings.change_adjacent_color(self.current_choice, value*self.speed_multiplier)
if(self.currentlocation == "RGB"):
ledsettings.change_color(self.current_choice, value*self.speed_multiplier)
ledsettings.color_mode = "Single"
usersettings.change_setting_value("color_mode", ledsettings.color_mode)
if("RGB_Color" in self.currentlocation):
ledsettings.change_multicolor(self.current_choice, self.currentlocation, value*self.speed_multiplier)
if("Key_range" in self.currentlocation):
ledsettings.change_multicolor_range(self.current_choice, self.currentlocation, value*self.speed_multiplier)
ledsettings.light_keys_in_range(self.currentlocation)
if(self.current_choice == "Offset"):
ledsettings.rainbow_offset = ledsettings.rainbow_offset + value * 5 *self.speed_multiplier
if(self.current_choice == "Scale"):
ledsettings.rainbow_scale = ledsettings.rainbow_scale + value * 5 *self.speed_multiplier
if(self.current_choice == "Timeshift"):
ledsettings.rainbow_timeshift = ledsettings.rainbow_timeshift + value *self.speed_multiplier
if(self.currentlocation == "Start_delay"):
self.screensaver_delay = int(self.screensaver_delay) + (value*self.speed_multiplier)
if(self.screensaver_delay < 0):
self.screensaver_delay = 0
usersettings.change_setting_value("screensaver_delay", self.screensaver_delay)
if(self.currentlocation == "Turn_off_screen_delay"):
self.screen_off_delay = int(self.screen_off_delay) + (value*self.speed_multiplier)
if(self.screen_off_delay < 0):
self.screen_off_delay = 0
usersettings.change_setting_value("screen_off_delay", self.screen_off_delay)
if(self.currentlocation == "Led_animation_delay"):
self.led_animation_delay = int(self.led_animation_delay) + (value*self.speed_multiplier)
if(self.led_animation_delay < 0):
self.led_animation_delay = 0
usersettings.change_setting_value("led_animation_delay", self.led_animation_delay)
if(self.currentlocation == "Color_for_slow_speed"):
ledsettings.speed_slowest[self.current_choice.lower()] += value*self.speed_multiplier
if(ledsettings.speed_slowest[self.current_choice.lower()] > 255):
ledsettings.speed_slowest[self.current_choice.lower()] = 255
if(ledsettings.speed_slowest[self.current_choice.lower()] < 0):
ledsettings.speed_slowest[self.current_choice.lower()] = 0
usersettings.change_setting_value("speed_slowest_"+self.current_choice.lower(), ledsettings.speed_slowest[self.current_choice.lower()])
if(self.currentlocation == "Color_for_fast_speed"):
ledsettings.speed_fastest[self.current_choice.lower()] += value*self.speed_multiplier
if(ledsettings.speed_fastest[self.current_choice.lower()] > 255):
ledsettings.speed_fastest[self.current_choice.lower()] = 255
if(ledsettings.speed_fastest[self.current_choice.lower()] < 0):
ledsettings.speed_fastest[self.current_choice.lower()] = 0
usersettings.change_setting_value("speed_fastest_"+self.current_choice.lower(), ledsettings.speed_fastest[self.current_choice.lower()])
if(self.currentlocation == "Period"):
ledsettings.speed_period_in_seconds += (value/float(10))*self.speed_multiplier
if(ledsettings.speed_period_in_seconds < 0.1):
ledsettings.speed_period_in_seconds = 0.1
usersettings.change_setting_value("speed_period_in_seconds", ledsettings.speed_period_in_seconds)
if(self.currentlocation == "Max_notes_in_period"):
ledsettings.speed_max_notes += value*self.speed_multiplier
if(ledsettings.speed_max_notes < 2):
ledsettings.speed_max_notes = 2
usersettings.change_setting_value("speed_max_notes", ledsettings.speed_max_notes)
menu.show()
def speed_change(self):
if(self.speed_multiplier == 10):
self.speed_multiplier = 1
elif(self.speed_multiplier == 1):
self.speed_multiplier = 10
def play_midi(song_path):
midiports.pending_queue.append(mido.Message('note_on'))
if song_path in saving.is_playing_midi.keys():
menu.render_message(song_path, "Already playing", 2000)
return
saving.is_playing_midi.clear()
saving.is_playing_midi[song_path] = True
menu.render_message("Playing: ", song_path, 2000)
saving.t = threading.currentThread()
output_time_last = 0
delay_debt = 0
try:
mid = mido.MidiFile("Songs/"+song_path)
fastColorWipe(ledstrip.strip, True)
#length = mid.length
t0 = False
for message in mid:
if song_path in saving.is_playing_midi.keys():
if(t0 == False):
t0 = time.time()
output_time_start = time.time()
output_time_last = time.time() - output_time_start
delay_temp = message.time - output_time_last
delay = message.time - output_time_last - float(0.003) + delay_debt
if(delay > 0):
time.sleep(delay)
delay_debt = 0
else:
delay_debt += delay_temp
output_time_start = time.time()
if not message.is_meta:
server.send(message)
midiports.pending_queue.append(message.copy(time=0))
else:
break
#print('play time: {:.2f} s (expected {:.2f})'.format(
#time.time() - t0, length))
#saving.is_playing_midi = False
except:
menu.render_message(song_path, "Can't play this file", 2000)
def find_between(s, start, end):
try:
return (s.split(start))[1].split(end)[0]
except:
return False
def shift(l, n):
return l[n:] + l[:n]
def screensaver():
delay = 0.1
interval = 3 / float(delay)
i = 0
cpu_history = [None] * int(interval)
cpu_chart = [0] * 28
cpu_average = 0
upload = 0
download = 0
upload_start = 0
download_start = 0
try:
server.poll()
except:
pass
while True:
if((time.time() - saving.start_time) > 3600 and delay < 0.5 and menu.screensaver_is_running == False):
delay = 0.9
interval = 5 / float(delay)
cpu_history = [None] * int(interval)
cpu_average = 0
i = 0
if(int(menu.screen_off_delay) > 0 and ((time.time() - saving.start_time) > (int(menu.screen_off_delay) * 60))):
menu.screen_status = 0
GPIO.output(24, 0)
if(int(menu.led_animation_delay) > 0 and ((time.time() - saving.start_time) > (int(menu.led_animation_delay) * 60)) and menu.screensaver_is_running == False):
menu.screensaver_is_running == True
if(menu.led_animation == "Theater Chase"):
menu.t = threading.Thread(target=theaterChase, args=(ledstrip.strip, 1))
menu.t.start()
if(menu.led_animation == "Breathing Slow"):
menu.t = threading.Thread(target=breathing, args=(ledstrip.strip, 25))
menu.t.start()
if(menu.led_animation == "Rainbow Slow"):
menu.t = threading.Thread(target=rainbow, args=(ledstrip.strip, 10))
menu.t.start()
if(menu.led_animation == "Rainbow Cycle Slow"):
menu.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip, 10))
menu.t.start()
if(menu.led_animation == "Theater Chase Rainbow"):
menu.t = threading.Thread(target=theaterChaseRainbow, args=(ledstrip.strip, 5))
menu.t.start()
if(menu.led_animation == "Sound of da police"):
menu.t = threading.Thread(target=sound_of_da_police, args=(ledstrip.strip, 1))
menu.t.start()
if(menu.led_animation == "Scanner"):
menu.t = threading.Thread(target=scanner, args=(ledstrip.strip, 1))
menu.t.start()
hour = datetime.datetime.now().strftime("%H:%M:%S")
date = datetime.datetime.now().strftime("%d-%m-%Y")
cpu_usage = psutil.cpu_percent()
cpu_history[i] = cpu_usage
cpu_chart.append(cpu_chart.pop(0))
cpu_chart[27] = cpu_usage
if(i>=(int(interval) - 1)):
i = 0
try:
cpu_average = sum(cpu_history) / (float(len(cpu_history) +1))
last_cpu_average = cpu_average
except:
cpu_average = last_cpu_average
if(menu.screensaver_settings["ram"] == "1"):
ram_usage = psutil.virtual_memory()[2]
else:
ram_usage = 0
if(menu.screensaver_settings["temp"] == "1"):
try:
temp = psutil.sensors_temperatures().values()[0][0].current
temp = round(float(temp), 1)
except:
temp = 0
else:
temp = 0
if(menu.screensaver_settings["network_usage"] == "1"):
upload_end = psutil.net_io_counters().bytes_sent
download_end = psutil.net_io_counters().bytes_recv
if upload_start:
upload = upload_end - upload_start
upload = upload*(1 / delay)
upload = upload/1000000
upload = round(upload, 2)
if download_start:
download = download_end - download_start
download = download*(1 / delay)
download = download/1000000
download = round(download, 2)
upload_start = upload_end
download_start = download_end
else:
upload = 0
download = 0
if(menu.screensaver_settings["sd_card_space"] == "1"):
card_space = psutil.disk_usage('/')
else:
card_space = 0
menu.render_screensaver(hour, date, cpu_usage, round(cpu_average,1), ram_usage, temp, cpu_chart, upload, download, card_space)
time.sleep(delay)
i += 1
try:
server.server_loop()
if (server.poll() != None):
menu.screensaver_is_running = False
saving.start_time = time.time()
menu.screen_status = 1
GPIO.output(24, 1)
menu.show()
break
except:
pass
if GPIO.input(KEY2) == 0:
menu.screensaver_is_running = False
saving.start_time = time.time()
menu.screen_status = 1
GPIO.output(24, 1)
menu.show()
break
class SaveMIDI:
def __init__(self):
self.isrecording = False
self.is_playing_midi = {}
self.start_time = time.time()
def start_recording(self):
self.isrecording = True
menu.render_message("Recording started", "", 500)
self.messages_to_save = dict()
self.messages_to_save["main"] = []
self.restart_time()
def cancel_recording(self):
self.isrecording = False
menu.render_message("Recording canceled", "", 1500)
def add_track(self, status, note, velocity, time_value, hex_color="main"):
if(hex_color not in self.messages_to_save):
self.messages_to_save[str(hex_color)] = []
if(status == "note_off"):
for key, note_off_message in self.messages_to_save.items():
self.messages_to_save[key].append(["note", time_value, status, note, velocity])
else:
self.messages_to_save[str(hex_color)].append(["note", time_value, status, note, velocity])
if(str(hex_color) != "main"):
self.messages_to_save["main"].append(["note", time_value, status, note, velocity])
def add_control_change(self, status, channel, control, value, time_value):
self.messages_to_save["main"].append(["control_change", time_value, status, channel, control, value])
def save(self, filename):
for key, multicolor_track in self.messages_to_save.items():
self.mid = MidiFile(None, None, 0, 20000) # 20000 is a ticks_per_beat value
self.track = MidiTrack()
self.mid.tracks.append(self.track)
for message in multicolor_track:
try:
time_delay = message[1] - previous_message_time
except:
time_delay = 0
previous_message_time = message[1]
if(time_delay < 0 ):
time_delay = 0
if(message[0] == "note"):
self.track.append(Message(message[2], note=int(message[3]), velocity=int(message[4]), time=int(time_delay*40000)))
else:
self.track.append(Message(message[2], channel=int(message[3]), control=int(message[4]), value=int(message[5]), time=int(time_delay*40000)))
self.last_note_time = message[1]
self.mid.save('Songs/'+filename+'_'+str(key)+'.mid')
self.messages_to_save = []
self.isrecording = False
menu.render_message("File saved", filename+".mid", 1500)
def restart_time(self):
self.start_time = time.time()
class LedSettings:
def __init__(self):
self.red = int(usersettings.get_setting_value("red"))
self.green = int(usersettings.get_setting_value("green"))
self.blue = int(usersettings.get_setting_value("blue"))
self.mode = usersettings.get_setting_value("mode")
self.fadingspeed = int(usersettings.get_setting_value("fadingspeed"))
self.color_mode = usersettings.get_setting_value("color_mode")
self.rainbow_offset = int(usersettings.get_setting_value("rainbow_offset"))
self.rainbow_scale = int(usersettings.get_setting_value("rainbow_scale"))
self.rainbow_timeshift = int(usersettings.get_setting_value("rainbow_timeshift"))
self.multicolor = ast.literal_eval(usersettings.get_setting_value("multicolor"))
self.multicolor_range = ast.literal_eval(usersettings.get_setting_value("multicolor_range"))
menu.update_multicolor(self.multicolor)
self.sequence_active = usersettings.get_setting_value("sequence_active")
self.backlight_brightness = int(usersettings.get_setting_value("backlight_brightness"))
self.backlight_brightness_percent = int(usersettings.get_setting_value("backlight_brightness_percent"))
self.backlight_red = int(usersettings.get_setting_value("backlight_red"))
self.backlight_green = int(usersettings.get_setting_value("backlight_green"))
self.backlight_blue = int(usersettings.get_setting_value("backlight_blue"))
self.adjacent_mode = usersettings.get_setting_value("adjacent_mode")
self.adjacent_red = int(usersettings.get_setting_value("adjacent_red"))
self.adjacent_green = int(usersettings.get_setting_value("adjacent_green"))
self.adjacent_blue = int(usersettings.get_setting_value("adjacent_blue"))
self.skipped_notes = usersettings.get_setting_value("skipped_notes")
self.notes_in_last_period = []
self.speed_period_in_seconds = 0.8
self.speed_slowest = {}
self.speed_slowest["red"] = int(usersettings.get_setting_value("speed_slowest_red"))
self.speed_slowest["green"] = int(usersettings.get_setting_value("speed_slowest_green"))
self.speed_slowest["blue"] = int(usersettings.get_setting_value("speed_slowest_blue"))
self.speed_fastest = {}
self.speed_fastest["red"] = int(usersettings.get_setting_value("speed_fastest_red"))
self.speed_fastest["green"] = int(usersettings.get_setting_value("speed_fastest_green"))
self.speed_fastest["blue"] = int(usersettings.get_setting_value("speed_fastest_blue"))
self.speed_period_in_seconds = float(usersettings.get_setting_value("speed_period_in_seconds"))
self.speed_max_notes = int(usersettings.get_setting_value("speed_max_notes"))
def addcolor(self):
self.multicolor.append([0, 255, 0])
self.multicolor_range.append([20, 108])
usersettings.change_setting_value("multicolor", self.multicolor)
usersettings.change_setting_value("multicolor_range", self.multicolor_range)
menu.update_multicolor(self.multicolor)
def deletecolor(self, key):
del self.multicolor[int(key) - 1]
del self.multicolor_range[int(key) - 1]
usersettings.change_setting_value("multicolor", self.multicolor)
usersettings.change_setting_value("multicolor_range", self.multicolor_range)
menu.update_multicolor(self.multicolor)
menu.go_back()
def change_multicolor(self, choice, location, value):
self.sequence_active = False
location = location.replace('RGB_Color','')
location = int(location) - 1
if(choice == "Red"):
choice = 0
elif(choice == "Green"):
choice = 1
else:
choice = 2
self.multicolor[int(location)][choice] += int(value)
if(self.multicolor[int(location)][choice] < 0):
self.multicolor[int(location)][choice] = 0
elif(self.multicolor[int(location)][choice] > 255):
self.multicolor[int(location)][choice] = 255
usersettings.change_setting_value("multicolor", self.multicolor)
def change_multicolor_range(self, choice, location, value):
location = location.replace('Key_range','')
location = int(location) - 1
if(choice == "Start"):
choice = 0
else:
choice = 1
self.multicolor_range[int(location)][choice] += int(value)
usersettings.change_setting_value("multicolor_range", self.multicolor_range)
def get_multicolors(self, number):
number = int(number) - 1
return str(self.multicolor[int(number)][0])+", "+str(self.multicolor[int(number)][1])+", "+str(self.multicolor[int(number)][2])
def get_random_multicolor_in_range(self, note):
temporary_multicolor = []
i = 0
for range in self.multicolor_range:
if(note >= range[0] and note <= range[1]):
temporary_multicolor.append(self.multicolor[i])
i += 1
try:
choosen_color = random.choice(temporary_multicolor)
except:
choosen_color = [0, 0, 0]
return choosen_color
def light_keys_in_range(self, location):
fastColorWipe(ledstrip.strip, True)
color_counter = 0
for i in self.multicolor:
start = self.multicolor_range[int(color_counter)][0]
end = self.multicolor_range[int(color_counter)][1]
if(start > 92):
note_offset_start = 2
elif(start > 55):
note_offset_start = 1
else:
note_offset_start = 0
if(end > 92):
note_offset_end = 2
elif(end > 55):
note_offset_end = 1
else:
note_offset_end = 0
red = self.multicolor[int(color_counter)][0]
green = self.multicolor[int(color_counter)][1]
blue = self.multicolor[int(color_counter)][2]
ledstrip.strip.setPixelColor(int(((start - 20)*2 - note_offset_start)), Color(int(green), int(red), int(blue)))
ledstrip.strip.setPixelColor(int(((end - 20)*2 - note_offset_end)), Color(int(green), int(red), int(blue)))
color_counter += 1
def change_color(self, color, value):
self.sequence_active = False
usersettings.change_setting_value("sequence_active", self.sequence_active)
self.color_mode = "Single"
usersettings.change_setting_value("color_mode", self.color_mode)
if(color == "Red"):
if(self.red <= 255 and self.red >= 0):
self.red += int(value)
if(self.red < 0):
self.red = 0
if(self.red > 255):
self.red = 255
usersettings.change_setting_value("red", self.red)
elif(color == "Green"):
if(self.green <= 255 and self.green >= 0):
self.green += int(value)
if(self.green < 0):
self.green = 0
if(self.green > 255):
self.green = 255
usersettings.change_setting_value("green", self.green)
elif(color == "Blue"):
if(self.blue <= 255 and self.blue >= 0):
self.blue += int(value)
if(self.blue < 0):
self.blue = 0
if(self.blue > 255):
self.blue = 255
usersettings.change_setting_value("blue", self.blue)
def change_color_name(self, color):
self.sequence_active = False
usersettings.change_setting_value("sequence_active", self.sequence_active)
self.color_mode = "Single"
usersettings.change_setting_value("color_mode", self.color_mode)
self.red = int(find_between(str(color), "red=", ","))
self.green = int(find_between(str(color), "green=", ","))
self.blue = int(find_between(str(color), "blue=", ")"))
usersettings.change_setting_value("red", self.red)
usersettings.change_setting_value("green", self.green)
usersettings.change_setting_value("blue", self.blue)
def get_color(self, color):
if(color == "Red"):
return self.red
elif(color == "Green"):
return self.green
elif(color == "Blue"):
return self.blue
def get_colors(self):
return str(self.red)+", "+str(self.green)+", "+str(self.blue)
def get_backlight_color(self, color):
if(color == "Red"):
return self.backlight_red
elif(color == "Green"):
return self.backlight_green
elif(color == "Blue"):
return self.backlight_blue
def get_backlight_colors(self):
return str(self.backlight_red)+", "+str(self.backlight_green)+", "+str(self.backlight_blue)
def get_adjacent_color(self, color):
if(color == "Red"):
return self.adjacent_red
elif(color == "Green"):
return self.adjacent_green
elif(color == "Blue"):
return self.adjacent_blue
def get_adjacent_colors(self):
return str(self.adjacent_red)+", "+str(self.adjacent_green)+", "+str(self.adjacent_blue)
def set_sequence(self, sequence, step):
try:
if(step != 1):
self.step_number = 1
self.sequences_tree = minidom.parse("sequences.xml")
self.sequence_number = str(sequence + 1)
self.next_step = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("next_step")[0].firstChild.nodeValue
self.control_number = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("control_number")[0].firstChild.nodeValue
self.count_steps = 1
self.sequence_active = True
while(True):
try:
temp_step = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.count_steps))[0].getElementsByTagName("color")[0].firstChild.nodeValue
self.count_steps += 1
except:
self.count_steps -= 1
break
else:
#print("step_number: "+str(self.step_number)+" count steps: "+str(self.count_steps))
self.step_number += 1
if(self.step_number > self.count_steps):
self.step_number = 1
self.color_mode = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("color")[0].firstChild.nodeValue
self.mode = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("light_mode")[0].firstChild.nodeValue
if(self.mode == "Velocity" or self.mode == "Fading"):
self.fadingspeed = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed")[0].firstChild.nodeValue
if(self.mode == "Fading"):
if(self.fadingspeed == "Very fast"):
self.fadingspeed = 50
elif(self.fadingspeed == "Fast"):
self.fadingspeed = 40
elif(self.fadingspeed == "Medium"):
self.fadingspeed = 20
elif(self.fadingspeed == "Slow"):
self.fadingspeed = 10
elif(self.fadingspeed == "Very slow"):
self.fadingspeed = 2
if(self.mode == "Velocity"):
if(self.fadingspeed == "Fast"):
self.fadingspeed = 10
elif(self.fadingspeed == "Medium"):
self.fadingspeed = 8
elif(self.fadingspeed == "Slow"):
self.fadingspeed = 6
elif(self.fadingspeed == "Very slow"):
self.fadingspeed = 3
if(self.color_mode == "RGB"):
self.color_mode = "Single"
self.red = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Red")[0].firstChild.nodeValue)
self.green = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Green")[0].firstChild.nodeValue)
self.blue = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Blue")[0].firstChild.nodeValue)
if(self.color_mode == "Rainbow"):
self.rainbow_offset = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Offset")[0].firstChild.nodeValue)
self.rainbow_scale = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Scale")[0].firstChild.nodeValue)
self.rainbow_timeshift = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("Timeshift")[0].firstChild.nodeValue)
if(self.color_mode == "Speed"):
self.speed_slowest["red"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_slowest_red")[0].firstChild.nodeValue)
self.speed_slowest["green"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_slowest_green")[0].firstChild.nodeValue)
self.speed_slowest["blue"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_slowest_blue")[0].firstChild.nodeValue)
self.speed_fastest["red"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_fastest_red")[0].firstChild.nodeValue)
self.speed_fastest["green"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_fastest_green")[0].firstChild.nodeValue)
self.speed_fastest["blue"] = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_fastest_blue")[0].firstChild.nodeValue)
self.speed_period_in_seconds = float(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_period_in_seconds")[0].firstChild.nodeValue)
self.speed_max_notes = int(self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("speed_max_notes")[0].firstChild.nodeValue)
if(self.color_mode == "Multicolor"):
self.multicolor = []
self.multicolor_range = []
multicolor_number = 1
multicolor_range_number = 1
while(True):
try:
colors = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("color_"+str(multicolor_number))[0].firstChild.nodeValue
colors = colors.split(',')
red = colors[0].replace(" ", "")
green = colors[1].replace(" ", "")
blue = colors[2].replace(" ", "")
self.multicolor.append([int(red), int(green), int(blue)])
multicolor_number += 1
except:
break
while(True):
try:
colors_range = self.sequences_tree.getElementsByTagName("sequence_"+str(self.sequence_number))[0].getElementsByTagName("step_"+str(self.step_number))[0].getElementsByTagName("color_range_"+str(multicolor_range_number))[0].firstChild.nodeValue
colors_range = colors_range.split(',')
start = colors_range[0].replace(" ", "")
end = colors_range[1].replace(" ", "")
self.multicolor_range.append([int(start), int(end)])
multicolor_range_number += 1
except:
break
except:
return False
def change_backlight_brightness(self, value):
self.backlight_brightness_percent += value
if(self.backlight_brightness_percent < 0):
self.backlight_brightness_percent = 0
elif(self.backlight_brightness_percent > 100):
self.backlight_brightness_percent = 100
self.backlight_brightness = 255 * self.backlight_brightness_percent / 100
usersettings.change_setting_value("backlight_brightness", self.backlight_brightness)
usersettings.change_setting_value("backlight_brightness_percent", self.backlight_brightness_percent)
fastColorWipe(ledstrip.strip, True)
def change_backlight_color(self, color, value):
if(color == "Red"):
if(self.backlight_red <= 255 and self.backlight_red >= 0):
self.backlight_red += int(value)
if(self.backlight_red < 0):
self.backlight_red = 0
if(self.backlight_red > 255):
self.backlight_red = 255
elif(color == "Green"):
if(self.backlight_green <= 255 and self.backlight_green >= 0):
self.backlight_green += int(value)
if(self.backlight_green < 0):
self.backlight_green = 0
if(self.backlight_green > 255):
self.backlight_green = 255
elif(color == "Blue"):
if(self.backlight_blue <= 255 and self.backlight_blue >= 0):
self.backlight_blue += int(value)
if(self.backlight_blue < 0):
self.backlight_blue = 0
if(self.backlight_blue > 255):
self.backlight_blue = 255
usersettings.change_setting_value("backlight_red", self.backlight_red)
usersettings.change_setting_value("backlight_green", self.backlight_green)
usersettings.change_setting_value("backlight_blue", self.backlight_blue)
fastColorWipe(ledstrip.strip, True)
def change_adjacent_color(self, color, value):
self.adjacent_mode = "RGB"
usersettings.change_setting_value("adjacent_mode", self.adjacent_mode)
if(color == "Red"):
if(self.adjacent_red <= 255 and self.adjacent_red >= 0):
self.adjacent_red += int(value)
if(self.adjacent_red < 0):
self.adjacent_red = 0
if(self.adjacent_red > 255):
self.adjacent_red = 255
elif(color == "Green"):
if(self.adjacent_green <= 255 and self.adjacent_green >= 0):
self.adjacent_green += int(value)
if(self.adjacent_green < 0):
self.adjacent_green = 0
if(self.adjacent_green > 255):
self.adjacent_green = 255
elif(color == "Blue"):
if(self.adjacent_blue <= 255 and self.adjacent_blue >= 0):
self.adjacent_blue += int(value)
if(self.adjacent_blue < 0):
self.adjacent_blue = 0
if(self.adjacent_blue > 255):
self.adjacent_blue = 255
usersettings.change_setting_value("adjacent_red", self.adjacent_red)
usersettings.change_setting_value("adjacent_green", self.adjacent_green)
usersettings.change_setting_value("adjacent_blue", self.adjacent_blue)
fastColorWipe(ledstrip.strip, True)
def speed_add_note(self):
current_time = time.time()
self.notes_in_last_period.append(current_time)
def speed_get_colors(self):
for note_time in self.notes_in_last_period[:]:
if ((time.time() - self.speed_period_in_seconds) > note_time):
self.notes_in_last_period.remove(note_time)
notes_count = len(self.notes_in_last_period)
max_notes = self.speed_max_notes
speed_percent = notes_count / float(max_notes)
if(notes_count > max_notes):
red = self.speed_fastest["red"]
green = self.speed_fastest["green"]
blue = self.speed_fastest["blue"]
else:
red = ((self.speed_fastest["red"]- self.speed_slowest["red"]) * float(speed_percent)) + self.speed_slowest["red"]
green = ((self.speed_fastest["green"] - self.speed_slowest["green"]) * float(speed_percent)) + self.speed_slowest["green"]
blue = ((self.speed_fastest["blue"] - self.speed_slowest["blue"]) * float(speed_percent)) + self.speed_slowest["blue"]
return[round(red), round(green), round(blue)]
class MidiPorts():
def __init__(self):
self.pending_queue = []
ports = mido.get_input_names()
try:
for port in ports:
if "Through" not in port and "RPi" not in port and "RtMidOut" not in port and "USB-USB" not in port:
self.inport = mido.open_input(port)
print("Inport set to "+port)
except:
print ("no input port")
try:
for port in ports:
if "Through" not in port and "RPi" not in port and "RtMidOut" not in port and "USB-USB" not in port:
self.playport = mido.open_output(port)
print("playport set to "+port)
except:
print("no playback port")
self.portname = "inport"
def change_port(self, port, portname):
try:
if(port == "inport"):
self.inport = mido.open_input(portname)
elif(port == "playport"):
self.playport = mido.open_output(portname)
menu.render_message("Changing "+port+" to:", portname, 1500)
except:
menu.render_message("Can't change "+port+" to:", portname, 1500)
usersettings = UserSettings()
midiports = MidiPorts()
ledstrip = LedStrip()
menu = MenuLCD("menu.xml")
menu.show()
saving = SaveMIDI()
ledsettings = LedSettings()
z = 0
display_cycle = 0
idle_time = 0
last_activity = time.time()
last_control_change = 0
pedal_deadzone = 10
timeshift_start = time.time()
fastColorWipe(ledstrip.strip, True)
# disable wifi power management to get better performance
call("sudo iwconfig wlan0 power off", shell=True)
while True:
try:
server.server_loop()
except:
pass
red = ledsettings.get_color("Red")
green = ledsettings.get_color("Green")
blue = ledsettings.get_color("Blue")
timeshift = (time.time() - timeshift_start) * ledsettings.rainbow_timeshift
if(ledsettings.mode == "Fading" or ledsettings.mode == "Velocity"):
n = 0
for note in ledstrip.keylist:
if(ledsettings.color_mode == "Multicolor"):
try:
red = ledstrip.keylist_color[n][0]
green = ledstrip.keylist_color[n][1]
blue = ledstrip.keylist_color[n][2]
except:
pass
if(ledsettings.color_mode == "Rainbow"):
red = get_rainbow_colors(int((int(n) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale)/ 100)) & 255, "red")
green = get_rainbow_colors(int((int(n) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale) / 100)) & 255, "green")
blue = get_rainbow_colors(int((int(n) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale)/ 100)) & 255, "blue")
if(ledsettings.color_mode == "Speed"):
speed_colors = ledsettings.speed_get_colors()
red = speed_colors[0]
green = speed_colors[1]
blue = speed_colors[2]
if(int(note) != 1001):
if(int(note) > 0):
fading = (note / float(100)) / 10
ledstrip.strip.setPixelColor((n), Color(int(int(green) * fading), int(int(red) * fading), int(int(blue) * fading)))
ledstrip.set_adjacent_colors(n, Color(int(int(green) * fading), int(int(red) * fading), int(int(blue) * fading)), False)
ledstrip.keylist[n] = ledstrip.keylist[n] - ledsettings.fadingspeed
if(ledstrip.keylist[n] <= 0):
red_fading = int(ledsettings.get_backlight_color("Red"))* float(ledsettings.backlight_brightness_percent) / 100
green_fading = int(ledsettings.get_backlight_color("Green")) * float(ledsettings.backlight_brightness_percent) / 100
blue_fading = int(ledsettings.get_backlight_color("Blue")) * float(ledsettings.backlight_brightness_percent) / 100
color = Color(int(green_fading),int(red_fading),int(blue_fading))
ledstrip.strip.setPixelColor((n), color)
ledstrip.set_adjacent_colors(n, color, False)
else:
ledstrip.keylist[n] = 0
if(ledsettings.mode == "Velocity"):
if(int(last_control_change) < pedal_deadzone):
if(int(ledstrip.keylist_status[n]) == 0):
red_fading = int(ledsettings.get_backlight_color("Red"))* float(ledsettings.backlight_brightness_percent) / 100
green_fading = int(ledsettings.get_backlight_color("Green")) * float(ledsettings.backlight_brightness_percent) / 100
blue_fading = int(ledsettings.get_backlight_color("Blue")) * float(ledsettings.backlight_brightness_percent) / 100
color = Color(int(green_fading),int(red_fading),int(blue_fading))
ledstrip.strip.setPixelColor((n), color)
ledstrip.set_adjacent_colors(n, color, False)
ledstrip.keylist[n] = 0
n += 1
try:
if(len(saving.is_playing_midi) == 0):
midiports.midipending = server.iter_pending()
else:
midiports.midipending = midiports.pending_queue
except:
continue
if len(midiports.midipending) > 0:
idle_time = 0
elif idle_time == 0:
idle_time = time.time()
key_pressed = False
if (len(midiports.midipending) == 0) or len(saving.is_playing_midi) > 0:
if GPIO.input(KEYUP) == 0:
last_activity = time.time()
menu.change_pointer(0)
while GPIO.input(KEYUP) == 0:
time.sleep(0.001)
key_pressed = True
if GPIO.input(KEYDOWN) == 0:
last_activity = time.time()
menu.change_pointer(1)
while GPIO.input(KEYDOWN) == 0:
time.sleep(0.001)
key_pressed = True
if GPIO.input(KEY1) == 0:
last_activity = time.time()
menu.enter_menu()
while GPIO.input(KEY1) == 0:
time.sleep(0.001)
key_pressed = True
if GPIO.input(KEY2) == 0:
last_activity = time.time()
menu.go_back()
if(menu.screensaver_is_running == False):
fastColorWipe(ledstrip.strip, True)
while GPIO.input(KEY2) == 0:
time.sleep(0.01)
key_pressed = True
if GPIO.input(KEY3) == 0:
last_activity = time.time()
if(ledsettings.sequence_active == True):
ledsettings.set_sequence(0, 1)
while GPIO.input(KEY3) == 0:
time.sleep(0.01)
key_pressed = True
if GPIO.input(KEYLEFT) == 0:
last_activity = time.time()
menu.change_value("LEFT")
time.sleep(0.02)
key_pressed = True
if GPIO.input(KEYRIGHT) == 0:
last_activity = time.time()
menu.change_value("RIGHT")
time.sleep(0.02)
key_pressed = True
if GPIO.input(JPRESS) == 0:
last_activity = time.time()
menu.speed_change()
while GPIO.input(JPRESS) == 0:
time.sleep(0.01)
key_pressed = True
# handle menu only after keyboard is idle for 2 seconds, to be more responsive
if key_pressed or len(saving.is_playing_midi) > 0 or (idle_time > 0 and time.time() - idle_time > 2):
#screensaver
if(int(menu.screensaver_delay) > 0):
if((time.time() - last_activity) > (int(menu.screensaver_delay) * 60)):
screensaver()
try:
elapsed_time = time.time() - saving.start_time
except:
elapsed_time = 0
if key_pressed or (display_cycle >= 60):
display_cycle = 0
if(saving.isrecording == True):
screen_hold_time = 12
else:
screen_hold_time = 3
if key_pressed or (elapsed_time > screen_hold_time):
menu.show()
timeshift_start = time.time()
saving.start_time = time.time()
display_cycle += 1
if((time.time() - last_activity) > 1):
usersettings.save_changes()
if(usersettings.pending_reset == True):
usersettings.pending_reset = False
ledstrip = LedStrip()
menu = MenuLCD("menu.xml")
menu.show()
ledsettings = LedSettings()
# cpu saving, after inactivity
if idle_time > 0 and time.time() - idle_time > 15:
time.sleep(0.001)
#loop through incoming midi messages
for msg in midiports.midipending:
last_activity = time.time()
note = find_between(str(msg), "note=", " ")
original_note = note
note = int(note)
if "note_off" in str(msg):
velocity = 0
else:
velocity = find_between(str(msg), "velocity=", " ")
control_change = find_between(str(msg), "value=", " ")
if(control_change != False):
last_control_change = control_change
if(ledsettings.sequence_active == True):
control = find_between(str(msg), "control=", " ")
value = find_between(str(msg), "value=", " ")
try:
if("+" in ledsettings.next_step):
if(int(value) > int(ledsettings.next_step) and control == ledsettings.control_number):
ledsettings.set_sequence(0, 1)
else:
if(int(value) < int(ledsettings.next_step) and control == ledsettings.control_number):
ledsettings.set_sequence(0, 1)
except:
pass
#changing offset to adjust the distance between the LEDs to the key spacing
if(note > 108):
note_offset = 2
elif(note > 68):
note_offset = 1
else:
note_offset = 0
note_offset -= ledstrip.shift
note_position = (note - 20)*2 - note_offset
if ledstrip.invert:
note_position = ledstrip.led_number - note_position - 1
if((note_position > ledstrip.led_number or note_position < 0) and control_change == False):
continue
elapsed_time = time.time() - saving.start_time
if(ledsettings.color_mode == "Rainbow"):
red = get_rainbow_colors(int((int((note_position)) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale)/ 100)) & 255, "red")
green = get_rainbow_colors(int((int((note_position)) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale) / 100)) & 255, "green")
blue = get_rainbow_colors(int((int((note_position)) + ledsettings.rainbow_offset + int(timeshift)) * (float(ledsettings.rainbow_scale)/ 100)) & 255, "blue")
if(ledsettings.color_mode == "Speed"):
speed_colors = ledsettings.speed_get_colors()
red = speed_colors[0]
green = speed_colors[1]
blue = speed_colors[2]
if(int(velocity) == 0 and int(note) > 0):
ledstrip.keylist_status[note_position] = 0
if(ledsettings.mode == "Fading"):
ledstrip.keylist[note_position] = 1000
elif(ledsettings.mode == "Velocity"):
if(int(last_control_change) < pedal_deadzone):
ledstrip.keylist[note_position] = 0
else:
if(ledsettings.backlight_brightness > 0):
red_backlight = int(ledsettings.get_backlight_color("Red"))* (ledsettings.backlight_brightness_percent) / 100
green_backlight = int(ledsettings.get_backlight_color("Green")) * (ledsettings.backlight_brightness_percent) / 100
blue_backlight = int(ledsettings.get_backlight_color("Blue")) * float(ledsettings.backlight_brightness_percent) / 100
color_backlight = Color(int(green_backlight),int(red_backlight),int(blue_backlight))
ledstrip.strip.setPixelColor((note_position), color_backlight)
ledstrip.set_adjacent_colors((note_position), color_backlight, True)
else:
ledstrip.strip.setPixelColor((note_position), Color(0, 0, 0))
ledstrip.set_adjacent_colors((note_position), Color(0, 0, 0), False)
if(saving.isrecording == True):
saving.add_track("note_off", original_note, velocity, last_activity)
elif(int(velocity) > 0 and int(note) > 0):
ledsettings.speed_add_note()
if(ledsettings.color_mode == "Multicolor"):
choosen_color = ledsettings.get_random_multicolor_in_range(note)
red = choosen_color[0]
green = choosen_color[1]
blue = choosen_color[2]
ledstrip.keylist_color[note_position] = [red, green, blue]
ledstrip.keylist_status[note_position] = 1
if(ledsettings.mode == "Velocity"):
brightness = (100 / (float(velocity) / 127 ) )/ 100
else:
brightness = 1
if(ledsettings.mode == "Fading"):
ledstrip.keylist[note_position] = 1001
if(ledsettings.mode == "Velocity"):
ledstrip.keylist[note_position] = 1000/float(brightness)
if(find_between(str(msg), "channel=", " ") == "12"):
if(ledsettings.skipped_notes != "Finger-based"):
ledstrip.strip.setPixelColor((note_position), Color(255, 0, 0))
elif(find_between(str(msg), "channel=", " ") == "11"):
if(ledsettings.skipped_notes != "Finger-based"):
ledstrip.strip.setPixelColor((note_position), Color(0, 0, 255))
else:
if(ledsettings.skipped_notes != "Normal"):
ledstrip.strip.setPixelColor((note_position), Color(int(int(green)/float(brightness)), int(int(red)/float(brightness)), int(int(blue)/float(brightness))))
ledstrip.set_adjacent_colors((note_position), Color(int(int(green)/float(brightness)), int(int(red)/float(brightness)), int(int(blue)/float(brightness))), False)
if(saving.isrecording == True):
if (ledsettings.color_mode == "Multicolor"):
saving.add_track("note_on", original_note, velocity, last_activity, wc.rgb_to_hex((red,green,blue)))
else:
saving.add_track("note_on", original_note, velocity, last_activity)
else:
control = find_between(str(msg), "control=", " ")
value = find_between(str(msg), "value=", " ")
if(saving.isrecording == True):
saving.add_control_change("control_change", 0, control, value, last_activity)
saving.restart_time()
if(len(saving.is_playing_midi) > 0):
midiports.pending_queue.remove(msg)
ledstrip.strip.show()
|
mail_server.py | #!/usr/bin/python3
import re
import sqlite3
import configparser
import asyncore
import mailparser
import base64
import threading
import time
import sys
import json
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from datetime import datetime
from smtpd import SMTPServer
config = configparser.ConfigParser()
config.read('config.py')
conn = sqlite3.connect(config.get('DATABASE', 'FILE'))
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS mails (timestamp real, sender text, _from text, _to text, body text)')
conn.commit()
class EmlServer(SMTPServer):
no = 0
def process_message(self, peer, mailfrom, rcpttos, data):
parsed = mailparser.parse_from_string(data)
sender = parsed.from_[0][0]
_from = parsed.from_[0][1]
_to = parsed.to[0][1]
print('New email to %s.' % _to)
_date = int(time.time())
body = str(base64.b64encode(parsed.body.encode()), 'utf-8')
params = (_date, sender, _from, _to, body)
c.execute('INSERT INTO mails VALUES (?, ?, ?, ?, ?)', params)
conn.commit()
def cleaner():
while True:
time.sleep(int(config.get('CLEANER', 'CLEAN_INTERVAL')))
c.execute('DELETE FROM mails WHERE timestamp<%d' % int(time.time()-int(config.get('CLEANER', 'KEEP_TIME'))))
conn.commit()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class APIServer(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self.conn = sqlite3.connect(config.get('DATABASE', 'FILE'))
self.c = self.conn.cursor()
if len(self.path) <= 1:
self.do_HEAD()
self.wfile.write(json.dumps({'result': 'false', 'msg': 'Nothing was requested.'}).encode())
return
query = urlparse(self.path).query
query_components = dict(qc.split("=") for qc in query.split("&"))
if 'email' not in query_components.keys():
self.do_HEAD()
self.wfile.write(json.dumps({'result': False, 'msg': 'Unknown parameter(s).'}).encode())
return
sqlQuery = 'SELECT * FROM mails' if query_components['email'] == 'all' else 'SELECT * FROM mails WHERE _to=\'%s\'' % query_components['email']
mails = []
for row in self.c.execute(sqlQuery):
mails.append({
'timestamp': row[0],
'from' : row[1],
'from_address' : row[2],
'to' : row[3],
'body' : row[4]
})
counts = len(mails)
ret = {
'result': True,
'counts': counts,
'mails' : mails,
'msg' : 'Emails for %s' % query_components['email']
}
self.do_HEAD()
self.wfile.write(json.dumps(ret).encode())
class hs:
def __init__(self):
th = threading.Thread(target=self.run, daemon=True).start()
def kill(self):
self.httpd.shutdown()
def run(self):
self.httpd = ThreadedHTTPServer(('', int(config.get('API', 'PORT'))), APIServer)
self.httpd.timeout = 2
self.httpd.serve_forever()
sys.exit()
def run():
threading.Thread(target=cleaner, daemon=True).start()
hsd = hs()
foo = EmlServer(('0.0.0.0', 25), None)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run()
|
analysis.py | from typing import List, Tuple
import multiprocessing
import os
import posixpath
import queue
import threading
import cv2
import dask as da
import h5py as hf
import numpy as np
import pint
from structure import ureg, Q_
from structure.analysis.statistics import Statistics, compute_statistics
from structure.grouped.fpw import make_fpw_measurements
from structure.interactive.dataset import (DATA_PATH, LABEL_PATH, OUT_PATH,
Dataset, create_input_dataset,
create_input_output_dataset)
ANALYSIS_PATH = 'analysis'
COMPRESSION = 'lzf'
PIXEL_UNIT = Q_('pixel')
QP_ = lambda V: Q_(V, 'pixel') # create a pixel quantity
COMPRESSION_OPTS = None
class GroupedAnalysis(object):
def __init__(self, dataset: (str, Dataset), settings: dict = {}, scale_data: bool = True):
self.ds = dataset if isinstance(dataset, Dataset) else Dataset(
dataset, create_new=False) # load an existing dataset from a file
self.scaled = self.ds.is_scaled() and scale_data
self.scaling = self.ds.get_dataset_scaling() if scale_data else None
self.units = str((PIXEL_UNIT * self.scaling).units) if self.scaling is not None else str(PIXEL_UNIT.units)
self.settings = settings
self.threads = multiprocessing.cpu_count()
def set_threads(self, num: int):
self.threads = num
def _make_dataset(self, group: hf.Group, name: str, data: (list, set, pint.Quantity, np.ndarray), dtype=np.double, cur_units: (str, pint.Quantity) = None, scale: bool = False) -> hf.Dataset:
if name in group:
del group[name] # remove the current dataset
# convert basic types
if isinstance(data, (list, set)):
data = np.array(data, dtype=dtype)
# auto scale dataset
if scale:
# determine current unit type
if cur_units is None:
if isinstance(data, pint.Quantity):
cur_units = data.units
cur_units, data = self._scale_data(
cur_units if cur_units is not None else PIXEL_UNIT, data) # rescale the data
# auto-assume pixel units if no conversion units were specified
if cur_units is None:
cur_units = PIXEL_UNIT
dset = group.create_dataset(name, data=(data if isinstance(
data, np.ndarray) else data.magnitude), dtype=dtype, compression=COMPRESSION, compression_opts=COMPRESSION_OPTS)
dset.attrs['units'] = str(cur_units) if isinstance(cur_units, pint.Unit) else str(Q_(cur_units).units)
return dset
def _scale_data(self, cur_units: (str, pint.Quantity), data: (np.ndarray, pint.Quantity)) -> tuple:
if self.scaled and self.scaling is not None:
# make sure there is data
no_data = False
if isinstance(data, np.ndarray):
no_data = len(data) == 0
else:
no_data = len(data.magnitude) == 0
# no point in scaling...
if no_data:
if cur_units is None and isinstance(data, pint.Quantity):
units = data.units
elif cur_units is None:
units = PIXEL_UNIT
else:
units = cur_units
c_data = Q_(units) * self.scaling
else:
# attempt to rescale all the data
c_data = (Q_(data, cur_units) if isinstance(
data, np.ndarray) else data) * self.scaling
# make sure end scaling doesn't result in some weird pixel division
if '/pixel' in str(c_data.units).replace(' ', ''):
raise RuntimeError(
'Failed to scale data from ' + str(cur_units) + ' with ' + str(self.scaling) + ' resulting units ' + str(c_data.units) + 'proc' + str(Q_(data, cur_units)))
# return new scaled data
if no_data:
return c_data.units, (data if isinstance(data, np.ndarray) else data.magnitude)
return c_data.units, c_data.magnitude
# nothing to do
return cur_units, data
def process_fpw(self, group_slits: hf.Group, group_membrane: hf.Group, data: np.ndarray):
membrane = data[-2] # second last = membrane edge layer
slits = data[-1] # last = slit layer
# make the measurements
settings = self.settings.get('fpw', {})
results = make_fpw_measurements(
membrane_layer=membrane,
slit_layer=slits,
draw=False,
export=False, # this is DISABLED for now as we're not using it
settings=settings
)
# get the respective data
data = results.get_data()
# copy the data over to the output group
# @TODO implement region references for each membrane group https://docs.h5py.org/en/stable/refs.html
has_results = results.is_valid()
all_points = []
all_point_pairs = []
all_arc_distances = []
all_direct_distances = []
all_membrane_ranges = []
all_membrane_points = []
all_membrane_distances = []
# if not results.is_valid():
# cv2.imshow('mem', membrane* 255)
# cv2.imshow('slits', slits* 255)
# cv2.waitKey(0)
if has_results:
for mdata in data:
all_points.append(mdata['points'])
all_point_pairs.append(mdata['point_pairs'])
all_arc_distances.append(mdata['arc_distances'])
all_direct_distances.append(mdata['direct_distances'])
all_membrane_ranges.append(mdata['membrane_ranges'])
all_membrane_points.append(mdata['membrane_points'])
all_membrane_distances.append(mdata['membrane_distance'])
# combine the results and convert them to numpy arrays
all_points = np.concatenate(all_points).astype(
np.int32) if all_points else np.array([], np.int32)
all_point_pairs = np.concatenate(all_point_pairs).astype(
np.int32) if all_point_pairs else np.array([], np.int32)
all_arc_distances = np.concatenate(all_arc_distances).astype(
np.double) if all_arc_distances else np.array([], np.double)
all_direct_distances = np.concatenate(all_direct_distances).astype(
np.double) if all_direct_distances else np.array([], np.double)
all_membrane_ranges = np.concatenate(all_membrane_ranges).astype(
np.int32) if all_membrane_ranges else np.array([], np.int32)
all_membrane_points = np.concatenate(all_membrane_points).astype(
np.int32) if all_membrane_points else np.array([], np.int32)
# create the h5 data
group_slits.attrs['valid'] = has_results
self._make_dataset(group_slits, 'slit_locs', all_points, dtype=np.int32)
self._make_dataset(group_slits, 'slit_pairs', all_point_pairs, dtype=np.int32)
self._make_dataset(group_slits, 'slit_arc_distances', all_arc_distances)
self._make_dataset(group_slits, 'slit_direct_distances', all_direct_distances)
self._make_dataset(group_slits, 'slit_membrane_ranges', all_membrane_ranges, dtype=np.int32)
self._make_dataset(group_membrane, 'membrane_points', all_membrane_points, dtype=np.int32)
self._make_dataset(group_membrane, 'membrane_distances', np.array(all_membrane_distances).astype(np.double))
def process_single(self, output: hf.Group, name: str):
# get the data
data = self.ds.get(name, None)
if data is None:
raise RuntimeError(
'Data located at the name of ' + name + ' has no data in it')
# load the data into memory as a numpy array
np_data = data[:]
# create the results group
rep_name = self.__fix_out_to_analysis(name)
# set the output segmented image to have this analysis name
data.attrs['analysis'] = rep_name
# set the input image to have this analysis name
self.ds.get(data.attrs['input']).attrs['analysis'] = rep_name
# create the new group for the current file and add relevant attributes
base_group = output.require_group(rep_name)
base_group.attrs['output'] = data.name # output dataset/group path
# input dataset/group path
base_group.attrs['input'] = data.attrs['input']
slit_group = base_group.require_group('slits')
edge_group = base_group.require_group('membrane_edges')
# add the processed image data
self.process_fpw(slit_group, edge_group, np_data)
def __analysis_process_consumer(self, _input: queue.Queue):
while True:
item = _input.get()
# if None let's exit the thread
if item is None:
break
# process the single data
self.process_single(item['output'], item['out_data'])
_input.task_done()
def run(self, blocking: bool = True, force: bool = False):
# let's determine if the dataset is dirty or not (as in file changes have been made) to continue
if not self.ds.is_dirty() and not force and ANALYSIS_PATH in self.ds.data:
return [] # everything has already been processed
# get all input files
input_names = self.ds.get_np('labels/input')
# create the output groups
output = self.ds.require_group(ANALYSIS_PATH)
# start the threads
threads = []
in_queue = queue.Queue()
for _ in range(self.threads):
thread = threading.Thread(
target=self.__analysis_process_consumer, args=(in_queue,))
thread.start()
threads.append(thread)
# process each input file
unprocessed = []
for name in input_names:
name = name.decode('utf-8') # convert to string
# get the input object
in_data = self.ds.get(name)
# get the path to the output data (usually the segmented data)
out_data = in_data.attrs.get('output', None)
# make sure there is output data
if out_data is None:
unprocessed.append(name)
continue
elif len(out_data) == 0:
unprocessed.append(name)
continue
# push the queue to process this file
in_queue.put({
'output': output,
'out_data': out_data
})
# kill the remaining threads (end of Queue)
for i in range(self.threads * 2):
in_queue.put(None)
# if blocking, then wait for all of them to finish
if blocking:
for thread in threads:
thread.join()
# let's mark the file as no longer being dirty
self.ds.set_dirty(False)
return unprocessed
def get_data_from_group(self, group_or_data: (str, bytes, hf.Group, hf.Dataset) = None, name: str = 'slit_arc_distances', with_dask: bool = False, with_numpy: bool = True):
res = self.ds.get_all_ds_recursive(group_or_data, name=name)
if with_dask:
return self.data_to_dask(res)
elif with_numpy:
return self.data_to_np(res)
return res
def __fix_out_to_analysis(self, path: (str, bytes, hf.Group)):
if isinstance(path, bytes):
path = path.decode('utf-8')
elif isinstance(path, hf.Group):
path = path.name
if path.endswith('/' + OUT_PATH) or path.endswith('/' + DATA_PATH):
path += '/' # add ending to be replaced
return path.replace('/' + OUT_PATH + '/', '/' + ANALYSIS_PATH + '/').replace('/' + DATA_PATH + '/', '/' + ANALYSIS_PATH + '/')
def data_to_dask(self, data: list, axis: int = 0):
return da.concatenate([da.from_array(d) for d in data], axis=axis)
def data_to_np(self, data: list, axis: int = 0):
if len(data) == 0:
return np.array(data, dtype=np.double)
return np.concatenate(data, axis=axis)
def get_stats_on_data(self, group_or_data: (str, bytes, hf.Group, hf.Dataset) = None, name: str = 'slit_arc_distances') -> Statistics:
data = self.get_data_from_group(self.__fix_out_to_analysis(
group_or_data), name, with_dask=False, with_numpy=True).astype(np.double)
stats = compute_statistics(data)
return stats
def get_all_stats(self, file_name: str, group: (str, bytes, hf.Group, ) = None) -> dict:
return {
'file': file_name,
'units': self.units,
'group': group if isinstance(group, (str, bytes)) else group.name,
'slit_arc_distances': self.get_stats_on_data(group, 'slit_arc_distances'),
'slit_direct_distances': self.get_stats_on_data(group, 'slit_direct_distances')
}
def get_stats_at_depth(self, depth: int = 0, stats_type: str='groups'):
""" Scans all subgroups to get their collective stats """
path = posixpath.join(LABEL_PATH, 'depth_out_' + stats_type)
if str(depth) not in self.ds.get(path):
raise RuntimeError('The depth ' + str(depth) + ' does not exist')
# convert groups to their respective analysis equivalents
groups = [self.__fix_out_to_analysis(d) for d in self.ds.get_str(
posixpath.join(path, str(depth)))]
# get the stats for each group
return [self.get_all_stats(os.path.basename(group), group) for group in groups]
def get_stats_at_depths(self, depths: list, stats_type: str='groups'):
""" Scans multiple sub-groups and gets all of their stats """
return [{'depth': int(d), 'stats': self.get_stats_at_depth(int(d), stats_type=stats_type)} for d in depths]
# def analyze_fpw(data: np.ndarray) -> dict:
# in_image = cv2.imread(input, cv2.IMREAD_GRAYSCALE)
# layers = tifffile.imread(output)
# layers = np.ascontiguousarray(layers)
# layers_uint = (layers > 50).astype(np.uint8)
# membrane = layers_uint[-2]
# slits = layers_uint[-1]
# # make the measurements
# fpw = make_fpw_measurements(membrane, slits, draw=False, export=True)
# back_layer = ResultLayer('background', 'Background')
# back_layer.draw_image(in_image, in_image.shape[::-1])
# exports = [back_layer]
# if fpw.is_valid():
# exports += fpw.get_export()
# basic = os.path.splitext(os.path.basename(input))[0]
# exp.write_export(os.path.join(SAVE_OUTPUT, basic + '.html'), os.path.basename(input), exports)
# return fpw
def test():
dset = create_input_output_dataset('C:\\Users\\smerk\\Documents\\Ground Truth Project\\Fabry - 09-0598\\09-0598 blk 1-1\\surs',
'C:\\Users\\smerk\\Documents\\Ground Truth Project\\Fabry - 09-0598\\09-0598 blk 1-1\\out_class', new_file=False, keep_existing=True,
scaling_data={
'ground truth project\\fabry - 09-0598\\09-0598 blk 1-1\\surs\\09--_4339.tif': '10 nm/pixel',
})
# dset = Dataset('dataset.h5', create_new=False)
sset = dset.save_to_scaled_dataset()
analysis = GroupedAnalysis(sset)
unprocessed = analysis.run()
# print(dset.get_all_ds_recursive(start='/analysis', name='slit_arc_distances'))
stats = analysis.get_stats_at_depth(0)
dset.close()
print(stats)
# print('WARNING! The following files were not processed', unprocessed)
# print(dset.get_all_ds_recursive(attribute='depth', attribute_equals=2))
if __name__ == '__main__':
print('starting')
# import cProfile
# cProfile.run('test()', 'profile.prof')
test()
print('done')
|
test8.py | import multiprocessing
import time
def worker(s, i):
s.acquire()
print(multiprocessing.current_process().name + "acquire");
time.sleep(i)
print(multiprocessing.current_process().name + "release\n");
s.release()
if __name__ == "__main__":
s = multiprocessing.Semaphore(2)
for i in range(5):
p = multiprocessing.Process(target=worker, args=(s, i * 2))
p.start() |
dbus_digitalinputs.py | #!/usr/bin/python -u
import sys, os
import signal
from threading import Thread
from select import select, epoll, EPOLLPRI
from functools import partial
from collections import namedtuple
from argparse import ArgumentParser
import traceback
sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'ext', 'velib_python'))
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import gobject
from vedbus import VeDbusService
from settingsdevice import SettingsDevice
VERSION = '0.5'
MAXCOUNT = 2**31-1
SAVEINTERVAL = 60000
INPUT_FUNCTION_COUNTER = 1
INPUT_FUNCTION_INPUT = 2
Translation = namedtuple('Translation', ['no', 'yes'])
# Only append at the end
INPUTTYPES = [
'Disabled',
'Pulse meter',
'Door',
'Bilge pump',
'Bilge alarm',
'Burglar alarm',
'Smoke alarm',
'Fire alarm',
'CO2 alarm',
'Generator'
]
# Translations. The text will be used only for GetText, it will be translated
# in the gui.
TRANSLATIONS = [
Translation('low', 'high'),
Translation('off', 'on'),
Translation('no', 'yes'),
Translation('open', 'closed'),
Translation('ok', 'alarm'),
Translation('running', 'stopped')
]
class SystemBus(dbus.bus.BusConnection):
def __new__(cls):
return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SYSTEM)
class SessionBus(dbus.bus.BusConnection):
def __new__(cls):
return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SESSION)
class BasePulseCounter(object):
pass
class DebugPulseCounter(BasePulseCounter):
def __init__(self):
self.gpiomap = {}
def register(self, path, gpio):
self.gpiomap[gpio] = None
return 0
def unregister(self, gpio):
del self.gpiomap[gpio]
def registered(self, gpio):
return gpio in self.gpiomap
def __call__(self):
from itertools import cycle
from time import sleep
for level in cycle([0, 1]):
gpios = self.gpiomap.keys()
for gpio in gpios:
yield gpio, level
sleep(0.25/len(self.gpiomap))
class EpollPulseCounter(BasePulseCounter):
def __init__(self):
self.fdmap = {}
self.gpiomap = {}
self.ob = epoll()
def register(self, path, gpio):
path = os.path.realpath(path)
# Set up gpio for rising edge interrupts
with open(os.path.join(path, 'edge'), 'ab') as fp:
fp.write('both')
fp = open(os.path.join(path, 'value'), 'rb')
level = int(fp.read()) # flush it in case it's high at startup
self.fdmap[fp.fileno()] = gpio
self.gpiomap[gpio] = fp
self.ob.register(fp, EPOLLPRI)
return level
def unregister(self, gpio):
fp = self.gpiomap[gpio]
self.ob.unregister(fp)
del self.gpiomap[gpio]
del self.fdmap[fp.fileno()]
fp.close()
def registered(self, gpio):
return gpio in self.gpiomap
def __call__(self):
while True:
# We have a timeout of 1 second on the poll, because poll() only
# looks at files in the epoll object at the time poll() was called.
# The timeout means we let other files (added via calls to
# register/unregister) into the loop at least that often.
for fd, evt in self.ob.poll(1):
os.lseek(fd, 0, os.SEEK_SET)
v = os.read(fd, 1)
yield self.fdmap[fd], int(v)
class HandlerMaker(type):
""" Meta-class for keeping track of all extended classes. """
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'handlers'):
cls.handlers = {}
else:
cls.handlers[cls.type_id] = cls
class PinHandler(object):
product_id = 0xFFFF
_product_name = 'Generic GPIO'
dbus_name = "digital"
__metaclass__ = HandlerMaker
def __init__(self, bus, base, path, gpio, settings):
self.gpio = gpio
self.path = path
self.bus = bus
self.settings = settings
self._level = 0 # Remember last state
self.service = VeDbusService(
"{}.{}.input{:02d}".format(base, self.dbus_name, gpio), bus=bus)
# Add objects required by ve-api
self.service.add_path('/Management/ProcessName', __file__)
self.service.add_path('/Management/ProcessVersion', VERSION)
self.service.add_path('/Management/Connection', path)
self.service.add_path('/DeviceInstance', gpio)
self.service.add_path('/ProductId', self.product_id)
self.service.add_path('/ProductName', self.product_name)
self.service.add_path('/Connected', 1)
# Custom name setting
def _change_name(p, v):
# This should fire a change event that will update product_name
# below.
settings['name'] = v
return True
self.service.add_path('/CustomName', settings['name'], writeable=True,
onchangecallback=_change_name)
# We'll count the pulses for all types of services
self.service.add_path('/Count', value=settings['count'])
@property
def product_name(self):
return self.settings['name'] or self._product_name
@product_name.setter
def product_name(self, v):
# Some pin types don't have an associated service (Disabled pins for
# example)
if self.service is not None:
self.service['/ProductName'] = v or self._product_name
def deactivate(self):
self.save_count()
self.service.__del__()
del self.service
self.service = None
@property
def level(self):
return self._level
@level.setter
def level(self, l):
self._level = int(bool(l))
def toggle(self, level):
# Only increment Count on rising edge.
if level and level != self._level:
self.service['/Count'] = (self.service['/Count']+1) % MAXCOUNT
self._level = level
def refresh(self):
""" Toggle state to last remembered state. This is called if settings
are changed so the Service can recalculate paths. """
self.toggle(self._level)
def save_count(self):
if self.service is not None:
self.settings['count'] = self.count
@property
def active(self):
return self.service is not None
@property
def count(self):
return self.service['/Count']
@count.setter
def count(self, v):
self.service['/Count'] = v
@classmethod
def createHandler(cls, _type, *args, **kwargs):
if _type in cls.handlers:
return cls.handlers[_type](*args, **kwargs)
return None
class DisabledPin(PinHandler):
""" Place holder for a disabled pin. """
_product_name = 'Disabled'
type_id = 0
def __init__(self, bus, base, path, gpio, settings):
self.service = None
self.bus = bus
self.settings = settings
self._level = 0 # Remember last state
def deactivate(self):
pass
def toggle(self, level):
self._level = level
def save_count(self):
# Do nothing
pass
@property
def count(self):
return self.settings['count']
@count.setter
def count(self, v):
pass
def refresh(self):
pass
class VolumeCounter(PinHandler):
product_id = 0xA163
_product_name = "Pulse meter"
dbus_name = "pulsemeter"
type_id = 1
def __init__(self, bus, base, path, gpio, settings):
super(VolumeCounter, self).__init__(bus, base, path, gpio, settings)
self.service.add_path('/Aggregate', value=self.count*self.rate,
gettextcallback=lambda p, v: (str(v) + ' cubic meter'))
@property
def rate(self):
return self.settings['rate']
def toggle(self, level):
super(VolumeCounter, self).toggle(level)
self.service['/Aggregate'] = self.count * self.rate
class PinAlarm(PinHandler):
product_id = 0xA164
_product_name = "Digital input"
dbus_name = "digitalinput"
type_id = 0xFF
translation = 0 # low, high
def __init__(self, bus, base, path, gpio, settings):
super(PinAlarm, self).__init__(bus, base, path, gpio, settings)
self.service.add_path('/InputState', value=0)
self.service.add_path('/State', value=self.get_state(0),
gettextcallback=lambda p, v: TRANSLATIONS[v/2][v%2])
self.service.add_path('/Alarm', value=self.get_alarm_state(0))
# Also expose the type
self.service.add_path('/Type', value=self.type_id,
gettextcallback=lambda p, v: INPUTTYPES[v])
def toggle(self, level):
super(PinAlarm, self).toggle(level)
self.service['/InputState'] = bool(level)*1
self.service['/State'] = self.get_state(level)
# Ensure that the alarm flag resets if the /AlarmSetting config option
# disappears.
self.service['/Alarm'] = self.get_alarm_state(level)
def get_state(self, level):
state = level ^ self.settings['invert']
return 2 * self.translation + state
def get_alarm_state(self, level):
return 2 * bool(
(level ^ self.settings['invertalarm']) and self.settings['alarm'])
# Various types of things we might want to monitor
class DoorSensor(PinAlarm):
_product_name = "Door alarm"
type_id = 2
translation = 3 # open, closed
class BilgePump(PinAlarm):
_product_name = "Bilge pump"
type_id = 3
translation = 1 # off, on
class BilgeAlarm(PinAlarm):
_product_name = "Bilge alarm"
type_id = 4
translation = 4 # ok, alarm
class BurglarAlarm(PinAlarm):
_product_name = "Burglar alarm"
type_id = 5
translation = 4 # ok, alarm
class SmokeAlarm(PinAlarm):
_product_name = "Smoke alarm"
type_id = 6
translation = 4 # ok, alarm
class FireAlarm(PinAlarm):
_product_name = "Fire alarm"
type_id = 7
translation = 4 # ok, alarm
class CO2Alarm(PinAlarm):
_product_name = "CO2 alarm"
type_id = 8
translation = 4 # ok, alarm
class Generator(PinAlarm):
_product_name = "Generator"
type_id = 9
translation = 5 # running, stopped
def dbusconnection():
return SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else SystemBus()
def main():
parser = ArgumentParser(description=sys.argv[0])
parser.add_argument('--servicebase',
help='Base service name on dbus, default is com.victronenergy',
default='com.victronenergy')
parser.add_argument('--debug',
help='Enable debug counter, this ignores the real gpios and simulates input',
default=False, action="store_true")
parser.add_argument('inputs', nargs='+', help='Path to digital input')
args = parser.parse_args()
if args.debug:
PulseCounter = DebugPulseCounter
else:
PulseCounter = EpollPulseCounter
DBusGMainLoop(set_as_default=True)
# Keep track of enabled services
services = {}
inputs = dict(enumerate(args.inputs, 1))
pulses = PulseCounter() # callable that iterates over pulses
def register_gpio(path, gpio, bus, settings):
_type = settings['inputtype']
print "Registering GPIO {} for type {}".format(gpio, _type)
handler = PinHandler.createHandler(_type,
bus, args.servicebase, path, gpio, settings)
services[gpio] = handler
# Only monitor if enabled
if _type > 0:
handler.level = pulses.register(path, gpio)
handler.refresh()
def unregister_gpio(gpio):
print "unRegistering GPIO {}".format(gpio)
pulses.unregister(gpio)
services[gpio].deactivate()
def handle_setting_change(inp, setting, old, new):
if setting == 'inputtype':
if new:
# Get current bus and settings objects, to be reused
service = services[inp]
bus, settings = service.bus, service.settings
# Input enabled. If already enabled, unregister the old one first.
if pulses.registered(inp):
unregister_gpio(inp)
register_gpio(inputs[inp], inp, bus, settings)
elif old:
# Input disabled
unregister_gpio(inp)
elif setting in ('rate', 'invert', 'alarm', 'invertalarm'):
services[inp].refresh()
elif setting == 'name':
services[inp].product_name = new
elif setting == 'count':
# Don't want this triggered on a period save, so only execute
# if it has changed.
v = int(new)
s = services[inp]
if s.count != v:
s.count = v
s.refresh()
for inp, pth in inputs.items():
supported_settings = {
'inputtype': ['/Settings/DigitalInput/{}/Type'.format(inp), 0, 0, len(INPUTTYPES)],
'rate': ['/Settings/DigitalInput/{}/Multiplier'.format(inp), 0.001, 0, 1.0],
'count': ['/Settings/DigitalInput/{}/Count'.format(inp), 0, 0, MAXCOUNT, 1],
'invert': ['/Settings/DigitalInput/{}/InvertTranslation'.format(inp), 0, 0, 1],
'invertalarm': ['/Settings/DigitalInput/{}/InvertAlarm'.format(inp), 0, 0, 1],
'alarm': ['/Settings/DigitalInput/{}/AlarmSetting'.format(inp), 0, 0, 1],
'name': ['/Settings/DigitalInput/{}/CustomName'.format(inp), '', '', ''],
}
bus = dbusconnection()
sd = SettingsDevice(bus, supported_settings, partial(handle_setting_change, inp), timeout=10)
register_gpio(pth, inp, bus, sd)
def poll(mainloop):
from time import time
idx = 0
try:
for inp, level in pulses():
# epoll object only resyncs once a second. We may receive
# a pulse for something that's been deregistered.
try:
services[inp].toggle(level)
except KeyError:
continue
except:
traceback.print_exc()
mainloop.quit()
# Need to run the gpio polling in separate thread. Pass in the mainloop so
# the thread can kill us if there is an exception.
gobject.threads_init()
mainloop = gobject.MainLoop()
poller = Thread(target=lambda: poll(mainloop))
poller.daemon = True
poller.start()
# Periodically save the counter
def save_counters():
for inp in inputs:
services[inp].save_count()
return True
gobject.timeout_add(SAVEINTERVAL, save_counters)
# Save counter on shutdown
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
mainloop.run()
except KeyboardInterrupt:
pass
finally:
save_counters()
if __name__ == "__main__":
main()
|
player.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import audioop
import io
import json
import logging
import re
import shlex
import subprocess
import sys
import threading
import time
import traceback
from typing import (IO, TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple,
Type, TypeVar, Union)
from .errors import ClientException
from .oggparse import OggStream
from .opus import Encoder as OpusEncoder
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"ERROR: Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'ERROR: expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError(f'ERROR: ERROR: Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
server.py | from concurrent import futures
import time
import logging
from activeNodes import activeNodes
from threading import Thread
import sys
import grpc
from NodePing import Heartbeat
from FileOperations import FileService
sys.path.append('./Gen')
import heartbeat_pb2
import heartbeat_pb2_grpc
import fileservice_pb2_grpc as fileService_pb2_grpc
import fileservice_pb2 as fileService_pb2
from leaderbackground import TestObj
from state import State
from IPUtil import IPUtil
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
activeNodeObj= activeNodes()
"""
Thread function to check Leader
"""
state=State(False)
mainLeader = None
ipList = IPUtil()
superNodeIp = ipList.getSuperNodeIp()
def threaded_function(a):
global superNodeIp
global mainLeader
print("Argument",a)
port = ""
partners = ipList.getPartnersForElection()
self_node = ipList.getSelfForElection()
o = TestObj(self_node, partners)
n = 0
old_value = -1
informSuperNode(self_node)
a = 0
while True:
#print('Current Counter value:', old_value)
print(" is Leader : ",mainLeader)
time.sleep(0.5)
if o.getCounter() != old_value:
old_value = o.getCounter()
print('Current Counter value:', old_value)
if o._getLeader() is None:
mainLeader = self_node
continue
# if n < 2000:
if n < 20:
if (port == 2000):
o.addValue(10, n)
n += 1
#if n % 20 == 0:
print("thread function-----"+o._getLeader())
mainLeader = o._getLeader()
state.changeState(mainLeader == self_node)
if state.isLeader() and a % 6 == 0:
informSuperNode(self_node)
a+=1
# continue
def informSuperNode(self_node):
temp = self_node.split(":")[0]
try:
channel = grpc.insecure_channel(superNodeIp)
if isChannelAlive(channel):
stub = fileService_pb2_grpc.FileserviceStub(channel)
response = stub.getLeaderInfo(fileService_pb2.ClusterInfo(ip=str(temp), port="3000", clusterName="Saket"))
print(response.message)
else:
raise
except Exception as e:
print(e)
def isChannelAlive(channel):
try:
grpc.channel_ready_future(channel).result(timeout=1)
except grpc.FutureTimeoutError:
return False
return True
def serve():
global mainLeader
cmd_host = ipList.getSelf()
serverAddress= ipList.getSelfForServer()
print("Server started on" + ipList.getServerPort())
thread = Thread(target = threaded_function, args = (ipList.getServerPort(), ))
print("System args",ipList.getServerPort())
thread.start()
leader=False
while not mainLeader:
pass
else:
leader=(cmd_host==mainLeader.split(":")[0])
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1000))
heartbeat_pb2_grpc.add_HearBeatServicer_to_server(Heartbeat(), server)
# if sys.argv[1] == str(3000):
# state.changeState(True)
# else:
# state.changeState(False)
fileService_pb2_grpc.add_FileserviceServicer_to_server(FileService(state, serverAddress, activeNodeObj), server)
server.add_insecure_port(ipList.getSelfForServer())
#time.sleep(30)
print("Current leader is ",mainLeader)
server.start()
#thread.join()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
#if mainLeader:
# print("Current Main leader is ",mainLeader)
#else:
# print("not up")
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
print("Mian start here")
serve()
# print(superNodeIp)
# channel = grpc.insecure_channel(superNodeIp)
# stub = fileService_pb2_grpc.FileserviceStub(channel)
# response = stub.getLeaderInfo(fileService_pb2.ClusterInfo(ip=str("192.168.43.81"), port="3000", clusterName="Saket"))
# print(response.message)
# try:
# while True:
# time.sleep(_ONE_DAY_IN_SECONDS)
# #if mainLeader:
# # print("Current Main leader is ",mainLeader)
# #else:
# # print("not up")
# except KeyboardInterrupt:
# pass
# from concurrent import futures
# import time
# import logging
# from activeNodes import activeNodes
# import threading
# import sys
# import grpc
# from NodePing import Heartbeat
# from FileOperations import FileService
# sys.path.append('./Gen')
# import heartbeat_pb2
# import heartbeat_pb2_grpc
# import fileService_pb2
# import fileService_pb2_grpc
# _ONE_DAY_IN_SECONDS = 60 * 60 * 24
# activeNodeObj= activeNodes()
# def serve():
# leader=True
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=1000))
# if sys.argv[1] != str(3000):
# leader=False
# serverAddress= '127.0.0.1:'+sys.argv[1]
# print("Server started on" + sys.argv[1])
# heartbeat_pb2_grpc.add_HearBeatServicer_to_server(Heartbeat(), server)
# fileService_pb2_grpc.add_FileserviceServicer_to_server(FileService(leader, serverAddress, activeNodeObj), server)
# server.add_insecure_port('127.0.0.1:'+sys.argv[1])
# server.start()
# try:
# while True:
# time.sleep(_ONE_DAY_IN_SECONDS)
# except KeyboardInterrupt:
# server.stop(0)
# if __name__ == '__main__':
# serve()
|
kinect2grasp_python2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 05/08/2018 6:04 PM
# File Name : kinect2grasp_python2.py
# Note: this file is written in Python2
import torch
import rospy
from sensor_msgs.msg import PointCloud2
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from gpd.msg import GraspConfig
from gpd.msg import GraspConfigList
import tf
import moveit_commander
import numpy as np
import pointclouds
import voxelgrid
import pcl
from autolab_core import YamlConfig
from dexnet.grasping import RobotGripper
from dexnet.grasping import GpgGraspSamplerPcl
import os
from pyquaternion import Quaternion
import sys
from os import path
import time
from scipy.stats import mode
import multiprocessing as mp
try:
from mayavi import mlab
except ImportError:
print("Can not import mayavi")
mlab = None
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath("__file__")))))
sys.path.append(os.environ['HOME'] + "/code/grasp-pointnet/PointNetGPD")
from main_test import test_network, model, args
# global config:
yaml_config = YamlConfig(os.environ['HOME'] + "/code/grasp-pointnet/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, os.environ['HOME'] + "/code/grasp-pointnet/dex-net/data/grippers")
ags = GpgGraspSamplerPcl(gripper, yaml_config)
value_fc = 0.4 # no use, set a random number
num_grasps = 40
num_workers = 20
max_num_samples = 150
n_voxel = 500
minimal_points_send_to_point_net = 20
marker_life_time = 8
show_bad_grasp = False
save_grasp_related_file = False
show_final_grasp = args.show_final_grasp
tray_grasp = args.tray_grasp
using_mp = args.using_mp
single_obj_testing = False # if True, it will wait for input before get pointcloud
# number of points put into neural network
if args.model_type == "100": # minimal points send for training
input_points_num = 500
elif args.model_type == "50":
input_points_num = 750
elif args.model_type == "3class":
input_points_num = 500
else:
input_points_num = 0
def remove_table_points(points_voxel_, vis=False):
"""
移除平面
:param points_voxel_: 体素栅格下采样之后的点云
:param vis:
:return: 移除平面之后的点云
"""
xy_unique = np.unique(points_voxel_[:, 0:2], axis=0)
new_points_voxel_ = points_voxel_
pre_del = np.zeros([1])
for i in range(len(xy_unique)):
tmp = []
for j in range(len(points_voxel_)):
if np.array_equal(points_voxel_[j, 0:2], xy_unique[i]):
tmp.append(j)
print(len(tmp))
if len(tmp) < 3:
tmp = np.array(tmp)
pre_del = np.hstack([pre_del, tmp])
if len(pre_del) != 1:
pre_del = pre_del[1:]
new_points_voxel_ = np.delete(points_voxel_, pre_del, 0)
print("Success delete [[ {} ]] points from the table!".format(len(points_voxel_) - len(new_points_voxel_)))
if vis:
p = points_voxel_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = new_points_voxel_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0)) # plot 0 point
mlab.show()
return new_points_voxel_
def remove_white_pixel(msg, points_, vis=False):
points_with_c_ = pointclouds.pointcloud2_to_array(msg)
points_with_c_ = pointclouds.split_rgb_field(points_with_c_)
r = np.asarray(points_with_c_['r'], dtype=np.uint32)
g = np.asarray(points_with_c_['g'], dtype=np.uint32)
b = np.asarray(points_with_c_['b'], dtype=np.uint32)
rgb_colors = np.vstack([r, g, b]).T
# rgb = rgb_colors.astype(np.float) / 255
ind_good_points_ = np.sum(rgb_colors[:] < 210, axis=-1) == 3
ind_good_points_ = np.where(ind_good_points_ == 1)[0]
new_points_ = points_[ind_good_points_]
if vis:
p = points_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = new_points_
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0)) # plot 0 point
mlab.show()
return new_points_
def get_voxel_fun(points_, n):
"""
体素栅格下采样
:param points_: 点云中的点序列
:param n: 体素大小
:return: 降采样之后的点
"""
get_voxel = voxelgrid.VoxelGrid(points_, n_x=n, n_y=n, n_z=n)
get_voxel.compute()
points_voxel_ = get_voxel.voxel_centers[get_voxel.voxel_n]
points_voxel_ = np.unique(points_voxel_, axis=0)
return points_voxel_
def cal_grasp(msg, cam_pos_):
"""
抓取姿态生成
:param msg: ROS点云消息
:param cam_pos_: 摄像头姿态
:return:
"""
points_ = pointclouds.pointcloud2_to_xyz_array(msg)
points_ = points_.astype(np.float32)
remove_white = False
if remove_white:
points_ = remove_white_pixel(msg, points_, vis=True)
# begin voxel points
n = n_voxel # parameter related to voxel method
# gpg improvements, highlights: flexible n parameter for voxelizing.
points_[:, 0] = points_[:, 0] + 0.025 # liang: as the kinect2 is not well calibrated, here is a work around
points_[:, 2] = points_[:, 2] # + 0.018 # liang: as the kinect2 is not well calibrated, here is a work around
points_voxel_ = get_voxel_fun(points_, n) # point cloud down sample
# 点数过少, 调整降采样参数
if len(points_) < 2000: # should be a parameter
while len(points_voxel_) < len(points_)-15:
points_voxel_ = get_voxel_fun(points_, n)
n = n + 100
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
points_ = points_voxel_ # 降采样之后的点云
remove_points = False
if remove_points:
points_ = remove_table_points(points_, vis=True)
point_cloud = pcl.PointCloud(points_) # 传入pcl处理
# 计算表面法线
norm = point_cloud.make_NormalEstimation()
norm.set_KSearch(30) # critical parameter when calculating the norms
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
# 处理法线方向
vector_p2cam = cam_pos_ - points_
vector_p2cam = vector_p2cam / np.linalg.norm(vector_p2cam, axis=1).reshape(-1, 1)
tmp = np.dot(vector_p2cam, surface_normal.T).diagonal()
angel = np.arccos(np.clip(tmp, -1.0, 1.0))
wrong_dir_norm = np.where(angel > np.pi * 0.5)[0]
tmp = np.ones([len(angel), 3])
tmp[wrong_dir_norm, :] = -1
surface_normal = surface_normal * tmp
select_point_above_table = 0.010
# modify of gpg: make it as a parameter. avoid select points near the table.
points_for_sample = points_[np.where(points_[:, 2] > select_point_above_table)[0]]
if len(points_for_sample) == 0:
rospy.loginfo("Can not select point, maybe the point cloud is too low?")
return [], points_, surface_normal
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
if not using_mp:
rospy.loginfo("Begin cal grasps using single thread, slow!")
grasps_together_ = ags.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps,
max_num_samples=max_num_samples, show_final_grasp=show_final_grasp)
else:
# begin parallel grasp:
rospy.loginfo("Begin cal grasps using parallel!")
def grasp_task(num_grasps_, ags_, queue_):
ret = ags_.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps_,
max_num_samples=max_num_samples, show_final_grasp=show_final_grasp)
queue_.put(ret)
queue = mp.Queue()
num_grasps_p_worker = int(num_grasps/num_workers)
workers = [mp.Process(target=grasp_task, args=(num_grasps_p_worker, ags, queue)) for _ in range(num_workers)]
[i.start() for i in workers]
grasps_together_ = []
for i in range(num_workers):
grasps_together_ = grasps_together_ + queue.get()
rospy.loginfo("Finish mp processing!")
rospy.loginfo("Grasp sampler finish, generated {} grasps.".format(len(grasps_together_)))
return grasps_together_, points_, surface_normal
def check_collision_square(grasp_bottom_center, approach_normal, binormal,
minor_pc, points_, p, way="p_open"):
"""
碰撞检测
:param grasp_bottom_center:
:param approach_normal:
:param binormal:
:param minor_pc:
:param points_:
:param p:
:param way:
:return:
"""
approach_normal = approach_normal.reshape(1, 3)
approach_normal = approach_normal / np.linalg.norm(approach_normal)
binormal = binormal.reshape(1, 3)
binormal = binormal / np.linalg.norm(binormal)
minor_pc = minor_pc.reshape(1, 3)
minor_pc = minor_pc / np.linalg.norm(minor_pc)
matrix_ = np.hstack([approach_normal.T, binormal.T, minor_pc.T])
grasp_matrix = matrix_.T
points_ = points_ - grasp_bottom_center.reshape(1, 3)
tmp = np.dot(grasp_matrix, points_.T)
points_g = tmp.T
use_dataset_py = True
if not use_dataset_py:
if way == "p_open":
s1, s2, s4, s8 = p[1], p[2], p[4], p[8]
elif way == "p_left":
s1, s2, s4, s8 = p[9], p[1], p[10], p[12]
elif way == "p_right":
s1, s2, s4, s8 = p[2], p[13], p[3], p[7]
elif way == "p_bottom":
s1, s2, s4, s8 = p[11], p[15], p[12], p[20]
else:
raise ValueError('No way!')
a1 = s1[1] < points_g[:, 1]
a2 = s2[1] > points_g[:, 1]
a3 = s1[2] > points_g[:, 2]
a4 = s4[2] < points_g[:, 2]
a5 = s4[0] > points_g[:, 0]
a6 = s8[0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
# for the way of pointGPD/dataset.py:
else:
width = ags.gripper.hand_outer_diameter - 2 * ags.gripper.finger_width
x_limit = ags.gripper.hand_depth
z_limit = width / 4
y_limit = width / 2
x1 = points_g[:, 0] > 0
x2 = points_g[:, 0] < x_limit
y1 = points_g[:, 1] > -y_limit
y2 = points_g[:, 1] < y_limit
z1 = points_g[:, 2] > -z_limit
z2 = points_g[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
vis = False
if vis:
p = points_g
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1))
p = points_g[points_in_area]
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0))
p = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.005, color=(0, 1, 0))
mlab.show()
return has_p, points_in_area, points_g
def collect_pc(grasp_, pc):
"""
获取手抓坐标系下的点云
grasp_bottom_center, normal, major_pc, minor_pc
:param grasp_:
:param pc:
:return:
"""
grasp_num = len(grasp_)
grasp_ = np.array(grasp_)
grasp_ = grasp_.reshape(-1, 5, 3) # prevent to have grasp that only have number 1
grasp_bottom_center = grasp_[:, 0]
approach_normal = grasp_[:, 1]
binormal = grasp_[:, 2]
minor_pc = grasp_[:, 3]
in_ind_ = []
in_ind_points_ = []
p = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
for i_ in range(grasp_num):
has_p, in_ind_tmp, points_g = check_collision_square(grasp_bottom_center[i_], approach_normal[i_],
binormal[i_], minor_pc[i_], pc, p)
in_ind_.append(in_ind_tmp)
in_ind_points_.append(points_g[in_ind_[i_]])
return in_ind_, in_ind_points_
def show_marker(marker_array_, pos_, ori_, scale_, color_, lifetime_):
"""
rviz中显示桌面
:param marker_array_:
:param pos_:
:param ori_:
:param scale_:
:param color_:
:param lifetime_:
"""
marker_ = Marker()
marker_.header.frame_id = "/table_top"
# marker_.header.stamp = rospy.Time.now()
marker_.type = marker_.CUBE
marker_.action = marker_.ADD
marker_.pose.position.x = pos_[0]
marker_.pose.position.y = pos_[1]
marker_.pose.position.z = pos_[2]
marker_.pose.orientation.x = ori_[1]
marker_.pose.orientation.y = ori_[2]
marker_.pose.orientation.z = ori_[3]
marker_.pose.orientation.w = ori_[0]
marker_.lifetime = rospy.Duration.from_sec(lifetime_)
marker_.scale.x = scale_[0]
marker_.scale.y = scale_[1]
marker_.scale.z = scale_[2]
marker_.color.a = 0.5
red_, green_, blue_ = color_
marker_.color.r = red_
marker_.color.g = green_
marker_.color.b = blue_
marker_array_.markers.append(marker_)
def show_grasp_marker(marker_array_, real_grasp_, gripper_, color_, lifetime_):
"""
rviz中显示抓取姿态
:param marker_array_: marker array
:param real_grasp_: [0] position, [1] approach [2] binormal [3] minor pc
:param gripper_: gripper parameter of a grasp
:param color_: color of the gripper
:param lifetime_: time for showing the maker
:return: return add makers to the maker array
"""
hh = gripper_.hand_height
fw = gripper_.real_finger_width
hod = gripper_.hand_outer_diameter
hd = gripper_.real_hand_depth
open_w = hod - fw * 2
approach = real_grasp_[1]
binormal = real_grasp_[2]
minor_pc = real_grasp_[3]
grasp_bottom_center = real_grasp_[4] - approach * (gripper_.real_hand_depth - gripper_.hand_depth)
rotation = np.vstack([approach, binormal, minor_pc]).T
qua = Quaternion(matrix=rotation)
marker_bottom_pos = grasp_bottom_center - approach * hh * 0.5
marker_left_pos = grasp_bottom_center - binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
marker_right_pos = grasp_bottom_center + binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
show_marker(marker_array_, marker_bottom_pos, qua, np.array([hh, hod, hh]), color_, lifetime_)
show_marker(marker_array_, marker_left_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
show_marker(marker_array_, marker_right_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
def check_hand_points_fun(real_grasp_):
ind_points_num = []
for i in range(len(real_grasp_)):
grasp_bottom_center = real_grasp_[i][4]
approach_normal = real_grasp_[i][1]
binormal = real_grasp_[i][2]
minor_pc = real_grasp_[i][3]
local_hand_points = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
has_points_tmp, ind_points_tmp = ags.check_collision_square(grasp_bottom_center, approach_normal,
binormal, minor_pc, points,
local_hand_points, "p_open")
ind_points_num.append(len(ind_points_tmp))
print(ind_points_num)
file_name = "./generated_grasps/real_points/" + str(np.random.randint(300)) + str(len(real_grasp_)) + ".npy"
np.save(file_name, np.array(ind_points_num))
def get_grasp_msg(real_good_grasp_, score_value_):
"""
获取抓取姿态消息
:param real_good_grasp_:
:param score_value_:
:return:
"""
grasp_bottom_center_modify = real_good_grasp_[4]
approach = real_good_grasp_[1]
binormal = real_good_grasp_[2]
minor_pc = real_good_grasp_[3]
grasp_config_ = GraspConfig()
top_p_ = grasp_bottom_center_modify + approach * ags.gripper.hand_depth
grasp_config_.sample.x = grasp_bottom_center_modify[0]
grasp_config_.sample.y = grasp_bottom_center_modify[1]
grasp_config_.sample.z = grasp_bottom_center_modify[2]
grasp_config_.top.x = top_p_[0]
grasp_config_.top.y = top_p_[1]
grasp_config_.top.z = top_p_[2]
grasp_config_.approach.x = approach[0]
grasp_config_.approach.y = approach[1]
grasp_config_.approach.z = approach[2]
grasp_config_.binormal.x = binormal[0]
grasp_config_.binormal.y = binormal[1]
grasp_config_.binormal.z = binormal[2]
grasp_config_.axis.x = minor_pc[0]
grasp_config_.axis.y = minor_pc[1]
grasp_config_.axis.z = minor_pc[2]
grasp_config_.score.data = score_value_
return grasp_config_
def remove_grasp_outside_tray(grasps_, points_):
x_min = points_[:, 0].min()
x_max = points_[:, 0].max()
y_min = points_[:, 1].min()
y_max = points_[:, 1].max()
valid_grasp_ind_ = []
for i in range(len(grasps_)):
grasp_bottom_center = grasps_[i][4]
approach_normal = grasps_[i][1]
major_pc = grasps_[i][2]
hand_points_ = ags.get_hand_points(grasp_bottom_center, approach_normal, major_pc)
finger_points_ = hand_points_[[1, 2, 3, 4, 9, 10, 13, 14], :]
# aa = points_[:, :2] - finger_points_[0][:2] # todo: work of remove outside grasp not finished.
# from IPython import embed;embed()
a = finger_points_[:, 0] < x_min
b = finger_points_[:, 0] > x_max
c = finger_points_[:, 1] < y_min
d = finger_points_[:, 1] > y_max
if np.sum(a) + np.sum(b) + np.sum(c) + np.sum(d) == 0:
valid_grasp_ind_.append(i)
grasps_inside_ = [grasps_[i] for i in valid_grasp_ind_]
rospy.loginfo("gpg got {} grasps, after remove grasp outside tray, {} grasps left".format(len(grasps_),
len(grasps_inside_)))
return grasps_inside_
if __name__ == '__main__':
"""
definition of gotten grasps:
grasp_bottom_center = grasp_[0]
approach_normal = grasp_[1]
binormal = grasp_[2]
"""
rospy.init_node('grasp_tf_broadcaster', anonymous=True)
pub1 = rospy.Publisher('gripper_vis', MarkerArray, queue_size=1)
pub2 = rospy.Publisher('/detect_grasps/clustered_grasps', GraspConfigList, queue_size=1)
rate = rospy.Rate(10)
rospy.set_param("/robot_at_home", "true") # only use when in simulation test.
rospy.loginfo("getting transform from kinect2 to table top")
cam_pos = []
listener = tf.TransformListener()
get_transform = False
while not get_transform:
try:
cam_pos, _ = listener.lookupTransform('/table_top', '/kinect2_ir_optical_frame', rospy.Time(0))
get_transform = True
rospy.loginfo("got transform complete")
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
while not rospy.is_shutdown():
if rospy.get_param("/robot_at_home") == "false":
robot_at_home = False
else:
robot_at_home = True
if not robot_at_home:
rospy.loginfo("Robot is moving, waiting the robot go home.")
continue
else:
rospy.loginfo("Robot is at home, safely catching point cloud data.")
if single_obj_testing:
input("Please put object on table and press any number to continue!")
rospy.loginfo("rospy is waiting for message: /table_top_points")
kinect_data = rospy.wait_for_message("/table_top_points", PointCloud2)
real_good_grasp = []
real_bad_grasp = []
real_score_value = []
repeat = 1 # speed up this try 10 time is too time consuming 同一姿态重复预测的次数
# begin of grasp detection
# if there is no point cloud on table, waiting for point cloud.
if kinect_data.data == '':
rospy.loginfo("There is no points on the table, waiting...")
continue
real_grasp, points, normals_cal = cal_grasp(kinect_data, cam_pos) # 获取抓取姿态
if tray_grasp: # 在托盘中抓取
real_grasp = remove_grasp_outside_tray(real_grasp, points) # 移除托盘外的抓取姿态
check_grasp_points_num = True # evaluate the number of points in a grasp
check_hand_points_fun(real_grasp) if check_grasp_points_num else 0
in_ind, in_ind_points = collect_pc(real_grasp, points) # 计算手抓坐标下的点云坐标
if save_grasp_related_file:
np.save("./generated_grasps/points.npy", points)
np.save("./generated_grasps/in_ind.npy", in_ind)
np.save("./generated_grasps/real_grasp.npy", real_grasp)
np.save("./generated_grasps/cal_norm.npy", normals_cal)
score = [] # should be 0 or 1
score_value = [] # should be float [0, 1]
ind_good_grasp = [] # 用于记录好的抓取姿态
ind_bad_grasp = [] # 用于记录差的抓取姿态
rospy.loginfo("Begin send grasp into pointnet, cal grasp score")
for ii in range(len(in_ind_points)):
if rospy.get_param("/robot_at_home") == "false":
robot_at_home = False
else:
robot_at_home = True
if not robot_at_home:
rospy.loginfo("robot is not at home, stop calculating the grasp score")
break
if in_ind_points[ii].shape[0] < minimal_points_send_to_point_net: # 点数太少
rospy.loginfo("Mark as bad grasp! Only {} points, should be at least {} points.".format(
in_ind_points[ii].shape[0], minimal_points_send_to_point_net))
score.append(0)
score_value.append(0.0)
if show_bad_grasp:
ind_bad_grasp.append(ii)
else: # 使用 pointnet 判断抓取质量
predict = []
grasp_score = []
for _ in range(repeat):
if len(in_ind_points[ii]) >= input_points_num: # 点数太多, 降采样, 随机数不可重复
points_modify = in_ind_points[ii][np.random.choice(len(in_ind_points[ii]),
input_points_num, replace=False)]
else: # 点数不够, 上采样, 随机数可重复
points_modify = in_ind_points[ii][np.random.choice(len(in_ind_points[ii]),
input_points_num, replace=True)]
if_good_grasp, grasp_score_tmp = test_network(model.eval(), points_modify) # 调用模型预测得分
predict.append(if_good_grasp.item())
grasp_score.append(grasp_score_tmp)
predict_vote = mode(predict)[0][0] # vote from all the "repeat" results. mode函数用于找到出现次数最多的成员
grasp_score = np.array(grasp_score)
if args.model_type == "3class": # the best in 3 class classification is the last column, third column
which_one_is_best = 2 # should set as 2
else: # for two class classification best is the second column (also the last column)
which_one_is_best = 1 # should set as 1
score_vote = np.mean(grasp_score[np.where(predict == predict_vote)][:, 0, which_one_is_best])
score.append(predict_vote)
score_value.append(score_vote)
if score[ii] == which_one_is_best:
ind_good_grasp.append(ii)
else:
if show_bad_grasp:
ind_bad_grasp.append(ii)
print("Got {} good grasps, and {} bad grasps".format(len(ind_good_grasp),
len(in_ind_points) - len(ind_good_grasp)))
if len(ind_good_grasp) != 0:
real_good_grasp = [real_grasp[i] for i in ind_good_grasp]
real_score_value = [score_value[i] for i in ind_good_grasp]
if show_bad_grasp:
real_bad_grasp = [real_grasp[i] for i in ind_bad_grasp]
# end of grasp detection
# get sorted ind by the score values
sorted_value_ind = list(index for index, item in sorted(enumerate(real_score_value),
key=lambda item: item[1],
reverse=True))
# sort grasps using the ind
sorted_real_good_grasp = [real_good_grasp[i] for i in sorted_value_ind]
real_good_grasp = sorted_real_good_grasp
# get the sorted score value, from high to low
real_score_value = sorted(real_score_value, reverse=True)
# 显示抓取姿态
marker_array = MarkerArray()
marker_array_single = MarkerArray()
grasp_msg_list = GraspConfigList()
for i in range(len(real_good_grasp)):
grasp_msg = get_grasp_msg(real_good_grasp[i], real_score_value[i])
grasp_msg_list.grasps.append(grasp_msg)
for i in range(len(real_good_grasp)):
show_grasp_marker(marker_array, real_good_grasp[i], gripper, (0, 1, 0), marker_life_time)
if show_bad_grasp:
for i in range(len(real_bad_grasp)):
show_grasp_marker(marker_array, real_bad_grasp[i], gripper, (1, 0, 0), marker_life_time)
id_ = 0
for m in marker_array.markers:
m.id = id_
id_ += 1
grasp_msg_list.header.stamp = rospy.Time.now()
grasp_msg_list.header.frame_id = "/table_top"
# from IPython import embed;embed()
if len(real_good_grasp) != 0:
i = 0
single_grasp_list_pub = GraspConfigList()
single_grasp_list_pub.header.stamp = rospy.Time.now()
single_grasp_list_pub.header.frame_id = "/table_top"
grasp_msg = get_grasp_msg(real_good_grasp[i], real_score_value[i])
single_grasp_list_pub.grasps.append(grasp_msg)
show_grasp_marker(marker_array_single, real_good_grasp[i], gripper, (1, 0, 0), marker_life_time+20)
for m in marker_array_single.markers:
m.id = id_
id_ += 1
pub1.publish(marker_array)
rospy.sleep(4)
pub2.publish(single_grasp_list_pub)
pub1.publish(marker_array_single)
# pub2.publish(grasp_msg_list)
rospy.loginfo(" Publishing grasp pose to rviz using marker array and good grasp pose")
rate.sleep()
|
subprocess2.py | # coding=utf8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of subprocess wrapper functions.
In theory you shouldn't need anything else in subprocess, or this module failed.
"""
import cStringIO
import errno
import logging
import os
import Queue
import subprocess
import sys
import time
import threading
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Sends stdout or stderr to os.devnull.
VOID = object()
# Error code when a process was killed because it timed out.
TIMED_OUT = -2001
# Globals.
# Set to True if you somehow need to disable this hack.
SUBPROCESS_CLEANUP_HACKED = False
class CalledProcessError(subprocess.CalledProcessError):
"""Augment the standard exception with more data."""
def __init__(self, returncode, cmd, cwd, stdout, stderr):
super(CalledProcessError, self).__init__(returncode, cmd, output=stdout)
self.stdout = self.output # for backward compatibility.
self.stderr = stderr
self.cwd = cwd
def __str__(self):
out = 'Command %s returned non-zero exit status %s' % (
' '.join(self.cmd), self.returncode)
if self.cwd:
out += ' in ' + self.cwd
return '\n'.join(filter(None, (out, self.stdout, self.stderr)))
class CygwinRebaseError(CalledProcessError):
"""Occurs when cygwin's fork() emulation fails due to rebased dll."""
## Utility functions
def kill_pid(pid):
"""Kills a process by its process id."""
try:
# Unable to import 'module'
# pylint: disable=E1101,F0401
import signal
return os.kill(pid, signal.SIGKILL)
except ImportError:
pass
def kill_win(process):
"""Kills a process with its windows handle.
Has no effect on other platforms.
"""
try:
# Unable to import 'module'
# pylint: disable=F0401
import win32process
# Access to a protected member _handle of a client class
# pylint: disable=W0212
return win32process.TerminateProcess(process._handle, -1)
except ImportError:
pass
def add_kill():
"""Adds kill() method to subprocess.Popen for python <2.6"""
if hasattr(subprocess.Popen, 'kill'):
return
if sys.platform == 'win32':
subprocess.Popen.kill = kill_win
else:
subprocess.Popen.kill = lambda process: kill_pid(process.pid)
def hack_subprocess():
"""subprocess functions may throw exceptions when used in multiple threads.
See http://bugs.python.org/issue1731717 for more information.
"""
global SUBPROCESS_CLEANUP_HACKED
if not SUBPROCESS_CLEANUP_HACKED and threading.activeCount() != 1:
# Only hack if there is ever multiple threads.
# There is no point to leak with only one thread.
subprocess._cleanup = lambda: None
SUBPROCESS_CLEANUP_HACKED = True
def get_english_env(env):
"""Forces LANG and/or LANGUAGE to be English.
Forces encoding to utf-8 for subprocesses.
Returns None if it is unnecessary.
"""
if sys.platform == 'win32':
return None
env = env or os.environ
# Test if it is necessary at all.
is_english = lambda name: env.get(name, 'en').startswith('en')
if is_english('LANG') and is_english('LANGUAGE'):
return None
# Requires modifications.
env = env.copy()
def fix_lang(name):
if not is_english(name):
env[name] = 'en_US.UTF-8'
fix_lang('LANG')
fix_lang('LANGUAGE')
return env
class NagTimer(object):
"""
Triggers a callback when a time interval passes without an event being fired.
For example, the event could be receiving terminal output from a subprocess;
and the callback could print a warning to stderr that the subprocess appeared
to be hung.
"""
def __init__(self, interval, cb):
self.interval = interval
self.cb = cb
self.timer = threading.Timer(self.interval, self.fn)
self.last_output = self.previous_last_output = 0
def start(self):
self.last_output = self.previous_last_output = time.time()
self.timer.start()
def event(self):
self.last_output = time.time()
def fn(self):
now = time.time()
if self.last_output == self.previous_last_output:
self.cb(now - self.previous_last_output)
# Use 0.1 fudge factor, just in case
# (self.last_output - now) is very close to zero.
sleep_time = (self.last_output - now - 0.1) % self.interval
self.previous_last_output = self.last_output
self.timer = threading.Timer(sleep_time + 0.1, self.fn)
self.timer.start()
def cancel(self):
self.timer.cancel()
class Popen(subprocess.Popen):
"""Wraps subprocess.Popen() with various workarounds.
- Forces English output since it's easier to parse the stdout if it is always
in English.
- Sets shell=True on windows by default. You can override this by forcing
shell parameter to a value.
- Adds support for VOID to not buffer when not needed.
- Adds self.start property.
Note: Popen() can throw OSError when cwd or args[0] doesn't exist. Translate
exceptions generated by cygwin when it fails trying to emulate fork().
"""
def __init__(self, args, **kwargs):
# Make sure we hack subprocess if necessary.
hack_subprocess()
add_kill()
env = get_english_env(kwargs.get('env'))
if env:
kwargs['env'] = env
if kwargs.get('shell') is None:
# *Sigh*: Windows needs shell=True, or else it won't search %PATH% for
# the executable, but shell=True makes subprocess on Linux fail when it's
# called with a list because it only tries to execute the first item in
# the list.
kwargs['shell'] = bool(sys.platform=='win32')
if isinstance(args, basestring):
tmp_str = args
elif isinstance(args, (list, tuple)):
tmp_str = ' '.join(args)
else:
raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
if kwargs.get('cwd', None):
tmp_str += '; cwd=%s' % kwargs['cwd']
logging.debug(tmp_str)
self.stdout_cb = None
self.stderr_cb = None
self.stdin_is_void = False
self.stdout_is_void = False
self.stderr_is_void = False
self.cmd_str = tmp_str
if kwargs.get('stdin') is VOID:
kwargs['stdin'] = open(os.devnull, 'r')
self.stdin_is_void = True
for stream in ('stdout', 'stderr'):
if kwargs.get(stream) in (VOID, os.devnull):
kwargs[stream] = open(os.devnull, 'w')
setattr(self, stream + '_is_void', True)
if callable(kwargs.get(stream)):
setattr(self, stream + '_cb', kwargs[stream])
kwargs[stream] = PIPE
self.start = time.time()
self.timeout = None
self.nag_timer = None
self.nag_max = None
self.shell = kwargs.get('shell', None)
# Silence pylint on MacOSX
self.returncode = None
try:
super(Popen, self).__init__(args, **kwargs)
except OSError, e:
if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
# Convert fork() emulation failure into a CygwinRebaseError().
raise CygwinRebaseError(
e.errno,
args,
kwargs.get('cwd'),
None,
'Visit '
'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure '
'to learn how to fix this error; you need to rebase your cygwin '
'dlls')
# Popen() can throw OSError when cwd or args[0] doesn't exist.
raise OSError('Execution failed with error: %s.\n'
'Check that %s or %s exist and have execution permission.'
% (str(e), kwargs.get('cwd'), args[0]))
def _tee_threads(self, input): # pylint: disable=W0622
"""Does I/O for a process's pipes using threads.
It's the simplest and slowest implementation. Expect very slow behavior.
If there is a callback and it doesn't keep up with the calls, the timeout
effectiveness will be delayed accordingly.
"""
# Queue of either of <threadname> when done or (<threadname>, data). In
# theory we would like to limit to ~64kb items to not cause large memory
# usage when the callback blocks. It is not done because it slows down
# processing on OSX10.6 by a factor of 2x, making it even slower than
# Windows! Revisit this decision if it becomes a problem, e.g. crash
# because of memory exhaustion.
queue = Queue.Queue()
done = threading.Event()
nag = None
def write_stdin():
try:
stdin_io = cStringIO.StringIO(input)
while True:
data = stdin_io.read(1024)
if data:
self.stdin.write(data)
else:
self.stdin.close()
break
finally:
queue.put('stdin')
def _queue_pipe_read(pipe, name):
"""Queues characters read from a pipe into a queue."""
try:
while True:
data = pipe.read(1)
if not data:
break
if nag:
nag.event()
queue.put((name, data))
finally:
queue.put(name)
def timeout_fn():
try:
done.wait(self.timeout)
finally:
queue.put('timeout')
def wait_fn():
try:
self.wait()
finally:
queue.put('wait')
# Starts up to 5 threads:
# Wait for the process to quit
# Read stdout
# Read stderr
# Write stdin
# Timeout
threads = {
'wait': threading.Thread(target=wait_fn),
}
if self.timeout is not None:
threads['timeout'] = threading.Thread(target=timeout_fn)
if self.stdout_cb:
threads['stdout'] = threading.Thread(
target=_queue_pipe_read, args=(self.stdout, 'stdout'))
if self.stderr_cb:
threads['stderr'] = threading.Thread(
target=_queue_pipe_read, args=(self.stderr, 'stderr'))
if input:
threads['stdin'] = threading.Thread(target=write_stdin)
elif self.stdin:
# Pipe but no input, make sure it's closed.
self.stdin.close()
for t in threads.itervalues():
t.start()
if self.nag_timer:
def _nag_cb(elapsed):
logging.warn(' No output for %.0f seconds from command:' % elapsed)
logging.warn(' %s' % self.cmd_str)
if (self.nag_max and
int('%.0f' % (elapsed / self.nag_timer)) >= self.nag_max):
queue.put('timeout')
done.set() # Must do this so that timeout thread stops waiting.
nag = NagTimer(self.nag_timer, _nag_cb)
nag.start()
timed_out = False
try:
# This thread needs to be optimized for speed.
while threads:
item = queue.get()
if item[0] == 'stdout':
self.stdout_cb(item[1])
elif item[0] == 'stderr':
self.stderr_cb(item[1])
else:
# A thread terminated.
if item in threads:
threads[item].join()
del threads[item]
if item == 'wait':
# Terminate the timeout thread if necessary.
done.set()
elif item == 'timeout' and not timed_out and self.poll() is None:
logging.debug('Timed out after %.0fs: killing' % (
time.time() - self.start))
self.kill()
timed_out = True
finally:
# Stop the threads.
done.set()
if nag:
nag.cancel()
if 'wait' in threads:
# Accelerate things, otherwise it would hang until the child process is
# done.
logging.debug('Killing child because of an exception')
self.kill()
# Join threads.
for thread in threads.itervalues():
thread.join()
if timed_out:
self.returncode = TIMED_OUT
# pylint: disable=W0221,W0622
def communicate(self, input=None, timeout=None, nag_timer=None,
nag_max=None):
"""Adds timeout and callbacks support.
Returns (stdout, stderr) like subprocess.Popen().communicate().
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
"""
self.timeout = timeout
self.nag_timer = nag_timer
self.nag_max = nag_max
if (not self.timeout and not self.nag_timer and
not self.stdout_cb and not self.stderr_cb):
return super(Popen, self).communicate(input)
if self.timeout and self.shell:
raise TypeError(
'Using timeout and shell simultaneously will cause a process leak '
'since the shell will be killed instead of the child process.')
stdout = None
stderr = None
# Convert to a lambda to workaround python's deadlock.
# http://docs.python.org/library/subprocess.html#subprocess.Popen.wait
# When the pipe fills up, it would deadlock this process.
if self.stdout and not self.stdout_cb and not self.stdout_is_void:
stdout = []
self.stdout_cb = stdout.append
if self.stderr and not self.stderr_cb and not self.stderr_is_void:
stderr = []
self.stderr_cb = stderr.append
self._tee_threads(input)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
return (stdout, stderr)
def communicate(args, timeout=None, nag_timer=None, nag_max=None, **kwargs):
"""Wraps subprocess.Popen().communicate() and add timeout support.
Returns ((stdout, stderr), returncode).
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
stdin = kwargs.pop('stdin', None)
if stdin is not None:
if isinstance(stdin, basestring):
# When stdin is passed as an argument, use it as the actual input data and
# set the Popen() parameter accordingly.
kwargs['stdin'] = PIPE
else:
kwargs['stdin'] = stdin
stdin = None
proc = Popen(args, **kwargs)
if stdin:
return proc.communicate(stdin, timeout, nag_timer), proc.returncode
else:
return proc.communicate(None, timeout, nag_timer), proc.returncode
def call(args, **kwargs):
"""Emulates subprocess.call().
Automatically convert stdout=PIPE or stderr=PIPE to VOID.
In no case they can be returned since no code path raises
subprocess2.CalledProcessError.
"""
if kwargs.get('stdout') == PIPE:
kwargs['stdout'] = VOID
if kwargs.get('stderr') == PIPE:
kwargs['stderr'] = VOID
return communicate(args, **kwargs)[1]
def check_call_out(args, **kwargs):
"""Improved version of subprocess.check_call().
Returns (stdout, stderr), unlike subprocess.check_call().
"""
out, returncode = communicate(args, **kwargs)
if returncode:
raise CalledProcessError(
returncode, args, kwargs.get('cwd'), out[0], out[1])
return out
def check_call(args, **kwargs):
"""Emulate subprocess.check_call()."""
check_call_out(args, **kwargs)
return 0
def capture(args, **kwargs):
"""Captures stdout of a process call and returns it.
Returns stdout.
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
kwargs.setdefault('stdin', VOID)
# Like check_output, deny the caller from using stdout arg.
return communicate(args, stdout=PIPE, **kwargs)[0][0]
def check_output(args, **kwargs):
"""Emulates subprocess.check_output().
Captures stdout of a process call and returns stdout only.
- Throws if return code is not 0.
- Works even prior to python 2.7.
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
kwargs.setdefault('stdin', VOID)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it would be overridden.')
return check_call_out(args, stdout=PIPE, **kwargs)[0]
|
bm_threading.py | #!/usr/bin/env python
"""Some simple microbenchmarks for Python's threading support.
Current microbenchmarks:
- *_count: count down from a given large number. Example used by David
Beazley in his talk on the GIL (http://blip.tv/file/2232410). The
iterative version is named iterative_count, the threaded version
is threaded_count.
Example usage:
./bm_threading.py --num_threads=8 --check_interval=1000 threaded_count
"""
# Python imports
import optparse
import sys
import threading
import time
# Local imports
import util
def count(iterations=1000000):
"""Count down from a given starting point."""
while iterations > 0:
iterations -= 1
def test_iterative_count(iterations, num_threads):
# Warm up.
count(1000)
times = []
for _ in xrange(iterations):
t0 = time.time()
for _ in xrange(num_threads):
count()
t1 = time.time()
times.append(t1 - t0)
return times
def test_threaded_count(iterations, num_threads):
# Warm up.
count(1000)
times = []
for _ in xrange(iterations):
threads = [threading.Thread(target=count) for _ in xrange(num_threads)]
t0 = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
t1 = time.time()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options] benchmark_name",
description="Test the performance of Python's threads.")
parser.add_option("--num_threads", action="store", type="int", default=2,
dest="num_threads", help="Number of threads to test.")
parser.add_option("--check_interval", action="store", type="int",
default=sys.getcheckinterval(),
dest="check_interval",
help="Value to pass to sys.setcheckinterval().")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
bm_name = args[0].lower()
func = globals().get("test_" + bm_name)
if not func:
parser.error("unknown benchmark: %s" % bm_name)
sys.setcheckinterval(options.check_interval)
util.run_benchmark(options, options.num_runs, func, options.num_threads)
|
WaagentLib.py | #!/usr/bin/env python
#
# Azure Linux Agent
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import crypt
import random
import base64
try:
import httplib as httplibs
except ImportError:
import http.client as httplibs
import os
import os.path
import platform
import pwd
import re
import shutil
import socket
try:
import SocketServer as SocketServers
except ImportError:
import socketserver as SocketServers
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import traceback
import xml.dom.minidom
import inspect
import zipfile
import json
import datetime
import xml.sax.saxutils
from distutils.version import LooseVersion
if not hasattr(subprocess, 'check_output'):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
GuestAgentName = "WALinuxAgent"
GuestAgentLongName = "Azure Linux Agent"
GuestAgentVersion = "WALinuxAgent-2.0.16"
ProtocolVersion = "2012-11-30" # WARNING this value is used to confirm the correct fabric protocol.
Config = None
WaAgent = None
DiskActivated = False
Openssl = "openssl"
Children = []
ExtensionChildren = []
VMM_STARTUP_SCRIPT_NAME = 'install'
VMM_CONFIG_FILE_NAME = 'linuxosconfiguration.xml'
global RulesFiles
RulesFiles = ["/lib/udev/rules.d/75-persistent-net-generator.rules",
"/etc/udev/rules.d/70-persistent-net.rules"]
VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"]
EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"]
global LibDir
LibDir = "/var/lib/waagent"
global provisioned
provisioned = False
global provisionError
provisionError = None
HandlerStatusToAggStatus = {"installed": "Installing", "enabled": "Ready", "unintalled": "NotReady",
"disabled": "NotReady"}
WaagentConf = """\
#
# Azure Linux Agent Configuration
#
Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status
# to the endpoint server.
Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration.
Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology.
Provisioning.Enabled=y #
Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable.
Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.
Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa".
Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests.
ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted.
ResourceDisk.Filesystem=ext4 # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
ResourceDisk.MountPoint=/mnt/resource #
ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk.
ResourceDisk.SwapSizeMB=0 # Size of the swapfile.
LBProbeResponder=y # Respond to load balancer probes if requested by Azure.
Logs.Verbose=n # Enable verbose logs
OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds.
OS.OpensslPath=None # If "None", the system default version is used.
"""
README_FILENAME = "DATALOSS_WARNING_README.txt"
README_FILECONTENT = """\
WARNING: THIS IS A TEMPORARY DISK.
Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT.
Please do not use this disk for storing any personal or application data.
For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx
"""
############################################################
# BEGIN DISTRO CLASS DEFS
############################################################
############################################################
# AbstractDistro
############################################################
class AbstractDistro(object):
"""
AbstractDistro defines a skeleton neccesary for a concrete Distro class.
Generic methods and attributes are kept here, distribution specific attributes
and behavior are to be placed in the concrete child named distroDistro, where
distro is the string returned by calling python platform.linux_distribution()[0].
So for CentOS the derived class is called 'centosDistro'.
"""
def __init__(self):
"""
Generic Attributes go here. These are based on 'majority rules'.
This __init__() may be called or overriden by the child.
"""
self.agent_service_name = os.path.basename(sys.argv[0])
self.selinux = None
self.service_cmd = '/usr/sbin/service'
self.ssh_service_restart_option = 'restart'
self.ssh_service_name = 'ssh'
self.ssh_config_file = '/etc/ssh/sshd_config'
self.hostname_file_path = '/etc/hostname'
self.dhcp_client_name = 'dhclient'
self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod',
'openssl', 'sfdisk', 'fdisk', 'mkfs',
'sed', 'grep', 'sudo', 'parted']
self.init_script_file = '/etc/init.d/waagent'
self.agent_package_name = 'WALinuxAgent'
self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log", '/etc/resolv.conf']
self.agent_files_to_uninstall = ["/etc/waagent.conf", "/etc/logrotate.d/waagent"]
self.grubKernelBootOptionsFile = '/etc/default/grub'
self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT='
self.getpidcmd = 'pidof'
self.mount_dvd_cmd = 'mount'
self.sudoers_dir_base = '/etc'
self.waagent_conf_file = WaagentConf
self.shadow_file_mode = 0o600
self.shadow_file_path = "/etc/shadow"
self.dhcp_enabled = False
def isSelinuxSystem(self):
"""
Checks and sets self.selinux = True if SELinux is available on system.
"""
if self.selinux == None:
if Run("which getenforce", chk_err=False):
self.selinux = False
else:
self.selinux = True
return self.selinux
def isSelinuxRunning(self):
"""
Calls shell command 'getenforce' and returns True if 'Enforcing'.
"""
if self.isSelinuxSystem():
return RunGetOutput("getenforce")[1].startswith("Enforcing")
else:
return False
def setSelinuxEnforce(self, state):
"""
Calls shell command 'setenforce' with 'state' and returns resulting exit code.
"""
if self.isSelinuxSystem():
if state:
s = '1'
else:
s = '0'
return Run("setenforce " + s)
def setSelinuxContext(self, path, cn):
"""
Calls shell 'chcon' with 'path' and 'cn' context.
Returns exit result.
"""
if self.isSelinuxSystem():
if not os.path.exists(path):
Error("Path does not exist: {0}".format(path))
return 1
return Run('chcon ' + cn + ' ' + path)
def setHostname(self, name):
"""
Shell call to hostname.
Returns resulting exit code.
"""
return Run('hostname ' + name)
def publishHostname(self, name):
"""
Set the contents of the hostname file to 'name'.
Return 1 on failure.
"""
try:
r = SetFileContents(self.hostname_file_path, name)
for f in EtcDhcpClientConfFiles:
if os.path.exists(f) and FindStringInFile(f,
r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])') == None:
r = ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', "send host-name \"" + name + "\";\n"
+ "\n".join(filter(lambda a: not a.startswith("send host-name"),
GetFileContents('/etc/dhcp/dhclient.conf').split(
'\n'))))
except:
return 1
return r
def installAgentServiceScriptFiles(self):
"""
Create the waagent support files for service installation.
Called by registerAgentService()
Abstract Virtual Function. Over-ridden in concrete Distro classes.
"""
pass
def registerAgentService(self):
"""
Calls installAgentService to create service files.
Shell exec service registration commands. (e.g. chkconfig --add waagent)
Abstract Virtual Function. Over-ridden in concrete Distro classes.
"""
pass
def uninstallAgentService(self):
"""
Call service subsystem to remove waagent script.
Abstract Virtual Function. Over-ridden in concrete Distro classes.
"""
pass
def unregisterAgentService(self):
"""
Calls self.stopAgentService and call self.uninstallAgentService()
"""
self.stopAgentService()
self.uninstallAgentService()
def startAgentService(self):
"""
Service call to start the Agent service
"""
return Run(self.service_cmd + ' ' + self.agent_service_name + ' start')
def stopAgentService(self):
"""
Service call to stop the Agent service
"""
return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop', False)
def restartSshService(self):
"""
Service call to re(start) the SSH service
"""
sshRestartCmd = self.service_cmd + " " + self.ssh_service_name + " " + self.ssh_service_restart_option
retcode = Run(sshRestartCmd)
if retcode > 0:
Error("Failed to restart SSH service with return code:" + str(retcode))
return retcode
def checkPackageInstalled(self, p):
"""
Query package database for prescence of an installed package.
Abstract Virtual Function. Over-ridden in concrete Distro classes.
"""
pass
def checkPackageUpdateable(self, p):
"""
Online check if updated package of walinuxagent is available.
Abstract Virtual Function. Over-ridden in concrete Distro classes.
"""
pass
def deleteRootPassword(self):
"""
Generic root password removal.
"""
filepath = "/etc/shadow"
ReplaceFileContentsAtomic(filepath, "root:*LOCK*:14600::::::\n"
+ "\n".join(
filter(lambda a: not a.startswith("root:"), GetFileContents(filepath).split('\n'))))
os.chmod(filepath, self.shadow_file_mode)
if self.isSelinuxSystem():
self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')
Log("Root password deleted.")
return 0
def changePass(self, user, password):
Log("Change user password")
crypt_id = Config.get("Provisioning.PasswordCryptId")
if crypt_id is None:
crypt_id = "6"
salt_len = Config.get("Provisioning.PasswordCryptSaltLength")
try:
salt_len = int(salt_len)
if salt_len < 0 or salt_len > 10:
salt_len = 10
except (ValueError, TypeError):
salt_len = 10
return self.chpasswd(user, password, crypt_id=crypt_id,
salt_len=salt_len)
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
passwd_hash = self.gen_password_hash(password, crypt_id, salt_len)
cmd = "usermod -p '{0}' {1}".format(passwd_hash, username)
ret, output = RunGetOutput(cmd, log_cmd=False)
if ret != 0:
return "Failed to set password for {0}: {1}".format(username, output)
def gen_password_hash(self, password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
return crypt.crypt(password, salt)
def load_ata_piix(self):
return WaAgent.TryLoadAtapiix()
def unload_ata_piix(self):
"""
Generic function to remove ata_piix.ko.
"""
return WaAgent.TryUnloadAtapiix()
def deprovisionWarnUser(self):
"""
Generic user warnings used at deprovision.
"""
print("WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.")
def deprovisionDeleteFiles(self):
"""
Files to delete when VM is deprovisioned
"""
for a in VarLibDhcpDirectories:
Run("rm -f " + a + "/*")
# Clear LibDir, remove nameserver and root bash history
for f in os.listdir(LibDir) + self.fileBlackList:
try:
os.remove(f)
except:
pass
return 0
def uninstallDeleteFiles(self):
"""
Files to delete when agent is uninstalled.
"""
for f in self.agent_files_to_uninstall:
try:
os.remove(f)
except:
pass
return 0
def checkDependencies(self):
"""
Generic dependency check.
Return 1 unless all dependencies are satisfied.
"""
if self.checkPackageInstalled('NetworkManager'):
Error(GuestAgentLongName + " is not compatible with network-manager.")
return 1
try:
m = __import__('pyasn1')
except ImportError:
Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.")
return 1
for a in self.requiredDeps:
if Run("which " + a + " > /dev/null 2>&1", chk_err=False):
Error("Missing required dependency: " + a)
return 1
return 0
def packagedInstall(self, buildroot):
"""
Called from setup.py for use by RPM.
Copies generated files waagent.conf, under the buildroot.
"""
if not os.path.exists(buildroot + '/etc'):
os.mkdir(buildroot + '/etc')
SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)
if not os.path.exists(buildroot + '/etc/logrotate.d'):
os.mkdir(buildroot + '/etc/logrotate.d')
SetFileContents(buildroot + '/etc/logrotate.d/waagent', WaagentLogrotate)
self.init_script_file = buildroot + self.init_script_file
# this allows us to call installAgentServiceScriptFiles()
if not os.path.exists(os.path.dirname(self.init_script_file)):
os.mkdir(os.path.dirname(self.init_script_file))
self.installAgentServiceScriptFiles()
def RestartInterface(self, iface, max_retry=3):
for retry in range(1, max_retry + 1):
ret = Run("ifdown " + iface + " && ifup " + iface)
if ret == 0:
return
Log("Failed to restart interface: {0}, ret={1}".format(iface, ret))
if retry < max_retry:
Log("Retry restart interface in 5 seconds")
time.sleep(5)
def CreateAccount(self, user, password, expiration, thumbprint):
return CreateAccount(user, password, expiration, thumbprint)
def DeleteAccount(self, user):
return DeleteAccount(user)
def Install(self):
return Install()
def mediaHasFilesystem(self, dsk):
if len(dsk) == 0:
return False
if Run("LC_ALL=C fdisk -l " + dsk + " | grep Disk"):
return False
return True
def mountDVD(self, dvd, location):
return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location)
def GetHome(self):
return GetHome()
def getDhcpClientName(self):
return self.dhcp_client_name
def initScsiDiskTimeout(self):
"""
Set the SCSI disk timeout when the agent starts running
"""
self.setScsiDiskTimeout()
def setScsiDiskTimeout(self):
"""
Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout
"""
try:
scsiTimeout = Config.get("OS.RootDeviceScsiTimeout")
for diskName in [disk for disk in os.listdir("/sys/block") if disk.startswith("sd")]:
self.setBlockDeviceTimeout(diskName, scsiTimeout)
except:
pass
def setBlockDeviceTimeout(self, device, timeout):
"""
Set SCSI disk timeout by set /sys/block/sd*/device/timeout
"""
if timeout != None and device:
filePath = "/sys/block/" + device + "/device/timeout"
if (GetFileContents(filePath).splitlines()[0].rstrip() != timeout):
SetFileContents(filePath, timeout)
Log("SetBlockDeviceTimeout: Update the device " + device + " with timeout " + timeout)
def waitForSshHostKey(self, path):
"""
Provide a dummy waiting, since by default, ssh host key is created by waagent and the key
should already been created.
"""
if (os.path.isfile(path)):
return True
else:
Error("Can't find host key: {0}".format(path))
return False
def isDHCPEnabled(self):
return self.dhcp_enabled
def stopDHCP(self):
"""
Stop the system DHCP client so that the agent can bind on its port. If
the distro has set dhcp_enabled to True, it will need to provide an
implementation of this method.
"""
raise NotImplementedError('stopDHCP method missing')
def startDHCP(self):
"""
Start the system DHCP client. If the distro has set dhcp_enabled to
True, it will need to provide an implementation of this method.
"""
raise NotImplementedError('startDHCP method missing')
def translateCustomData(self, data):
"""
Translate the custom data from a Base64 encoding. Default to no-op.
"""
decodeCustomData = Config.get("Provisioning.DecodeCustomData")
if decodeCustomData != None and decodeCustomData.lower().startswith("y"):
return base64.b64decode(data)
return data
def getConfigurationPath(self):
return "/etc/waagent.conf"
def getProcessorCores(self):
return int(RunGetOutput("grep 'processor.*:' /proc/cpuinfo |wc -l")[1])
def getTotalMemory(self):
return int(RunGetOutput("grep MemTotal /proc/meminfo |awk '{print $2}'")[1]) / 1024
def getInterfaceNameByMac(self, mac):
ret, output = RunGetOutput("ifconfig -a")
if ret != 0:
raise Exception("Failed to get network interface info")
output = output.replace('\n', '')
match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac),
output, re.IGNORECASE)
if match is None:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
output = match.group(0)
eths = re.findall(r"eth\d", output)
if eths is None or len(eths) == 0:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
return eths[-1]
def configIpV4(self, ifName, addr, netmask=24):
ret, output = RunGetOutput("ifconfig {0} up".format(ifName))
if ret != 0:
raise Exception("Failed to bring up {0}: {1}".format(ifName,
output))
ret, output = RunGetOutput("ifconfig {0} {1}/{2}".format(ifName, addr,
netmask))
if ret != 0:
raise Exception("Failed to config ipv4 for {0}: {1}".format(ifName,
output))
def setDefaultGateway(self, gateway):
Run("/sbin/route add default gw" + gateway, chk_err=False)
def routeAdd(self, net, mask, gateway):
Run("/sbin/route add -net " + net + " netmask " + mask + " gw " + gateway,
chk_err=False)
############################################################
# GentooDistro
############################################################
gentoo_init_file = """\
#!/sbin/runscript
command=/usr/sbin/waagent
pidfile=/var/run/waagent.pid
command_args=-daemon
command_background=true
name="Azure Linux Agent"
depend()
{
need localmount
use logger network
after bootmisc modules
}
"""
class gentooDistro(AbstractDistro):
"""
Gentoo distro concrete class
"""
def __init__(self): #
super(gentooDistro, self).__init__()
self.service_cmd = '/sbin/service'
self.ssh_service_name = 'sshd'
self.hostname_file_path = '/etc/conf.d/hostname'
self.dhcp_client_name = 'dhcpcd'
self.shadow_file_mode = 0o640
self.init_file = gentoo_init_file
def publishHostname(self, name):
try:
if (os.path.isfile(self.hostname_file_path)):
r = ReplaceFileContentsAtomic(self.hostname_file_path, "hostname=\"" + name + "\"\n"
+ "\n".join(filter(lambda a: not a.startswith("hostname="),
GetFileContents(self.hostname_file_path).split("\n"))))
except:
return 1
return r
def installAgentServiceScriptFiles(self):
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o755)
def registerAgentService(self):
self.installAgentServiceScriptFiles()
return Run('rc-update add ' + self.agent_service_name + ' default')
def uninstallAgentService(self):
return Run('rc-update del ' + self.agent_service_name + ' default')
def unregisterAgentService(self):
self.stopAgentService()
return self.uninstallAgentService()
def checkPackageInstalled(self, p):
if Run('eix -I ^' + p + '$', chk_err=False):
return 0
else:
return 1
def checkPackageUpdateable(self, p):
if Run('eix -u ^' + p + '$', chk_err=False):
return 0
else:
return 1
def RestartInterface(self, iface):
Run("/etc/init.d/net." + iface + " restart")
############################################################
# SuSEDistro
############################################################
suse_init_file = """\
#! /bin/sh
#
# Azure Linux Agent sysV init script
#
# Copyright 2013 Microsoft Corporation
# Copyright SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# /etc/init.d/waagent
#
# and symbolic link
#
# /usr/sbin/rcwaagent
#
# System startup script for the waagent
#
### BEGIN INIT INFO
# Provides: AzureLinuxAgent
# Required-Start: $network sshd
# Required-Stop: $network sshd
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Description: Start the AzureLinuxAgent
### END INIT INFO
PYTHON=/usr/bin/python
WAZD_BIN=/usr/sbin/waagent
WAZD_CONF=/etc/waagent.conf
WAZD_PIDFILE=/var/run/waagent.pid
test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; }
test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; }
. /etc/rc.status
# First reset status of this service
rc_reset
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - misc error
# 2 - invalid or excess args
# 3 - unimplemented feature (e.g. reload)
# 4 - insufficient privilege
# 5 - program not installed
# 6 - program not configured
#
# Note that starting an already running service, stopping
# or restarting a not-running service as well as the restart
# with force-reload (in case signalling is not supported) are
# considered a success.
case "$1" in
start)
echo -n "Starting AzureLinuxAgent"
## Start daemon with startproc(8). If this fails
## the echo return value is set appropriate.
startproc -f ${PYTHON} ${WAZD_BIN} -daemon
rc_status -v
;;
stop)
echo -n "Shutting down AzureLinuxAgent"
## Stop daemon with killproc(8) and if this fails
## set echo the echo return value.
killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}
rc_status -v
;;
try-restart)
## Stop the service and if this succeeds (i.e. the
## service was running before), start it again.
$0 status >/dev/null && $0 restart
rc_status
;;
restart)
## Stop the service and regardless of whether it was
## running or not, start it again.
$0 stop
sleep 1
$0 start
rc_status
;;
force-reload|reload)
rc_status
;;
status)
echo -n "Checking for service AzureLinuxAgent "
## Check status with checkproc(8), if process is running
## checkproc will return with exit status 0.
checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}
rc_status -v
;;
probe)
;;
*)
echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}"
exit 1
;;
esac
rc_exit
"""
class SuSEDistro(AbstractDistro):
"""
SuSE Distro concrete class
Put SuSE specific behavior here...
"""
def __init__(self):
super(SuSEDistro, self).__init__()
self.service_cmd = '/sbin/service'
self.ssh_service_name = 'sshd'
self.kernel_boot_options_file = '/boot/grub/menu.lst'
self.hostname_file_path = '/etc/HOSTNAME'
self.requiredDeps += ["/sbin/insserv"]
self.init_file = suse_init_file
self.dhcp_client_name = 'dhcpcd'
if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \
(DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')):
self.dhcp_client_name = 'wickedd-dhcp4'
self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'
self.grubKernelBootOptionsLine = 'kernel'
self.getpidcmd = 'pidof '
self.dhcp_enabled = True
def checkPackageInstalled(self, p):
if Run("rpm -q " + p, chk_err=False):
return 0
else:
return 1
def checkPackageUpdateable(self, p):
if Run("zypper list-updates | grep " + p, chk_err=False):
return 1
else:
return 0
def installAgentServiceScriptFiles(self):
try:
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o744)
except:
pass
def registerAgentService(self):
self.installAgentServiceScriptFiles()
return Run('insserv ' + self.agent_service_name)
def uninstallAgentService(self):
return Run('insserv -r ' + self.agent_service_name)
def unregisterAgentService(self):
self.stopAgentService()
return self.uninstallAgentService()
def startDHCP(self):
Run("service " + self.dhcp_client_name + " start", chk_err=False)
def stopDHCP(self):
Run("service " + self.dhcp_client_name + " stop", chk_err=False)
############################################################
# redhatDistro
############################################################
redhat_init_file = """\
#!/bin/bash
#
# Init file for AzureLinuxAgent.
#
# chkconfig: 2345 60 80
# description: AzureLinuxAgent
#
# source function library
. /etc/rc.d/init.d/functions
RETVAL=0
FriendlyName="AzureLinuxAgent"
WAZD_BIN=/usr/sbin/waagent
start()
{
echo -n $"Starting $FriendlyName: "
$WAZD_BIN -daemon &
}
stop()
{
echo -n $"Stopping $FriendlyName: "
killproc -p /var/run/waagent.pid $WAZD_BIN
RETVAL=$?
echo
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
reload)
;;
report)
;;
status)
status $WAZD_BIN
RETVAL=$?
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
RETVAL=1
esac
exit $RETVAL
"""
class redhatDistro(AbstractDistro):
"""
Redhat Distro concrete class
Put Redhat specific behavior here...
"""
def __init__(self):
super(redhatDistro, self).__init__()
self.service_cmd = '/sbin/service'
self.ssh_service_restart_option = 'condrestart'
self.ssh_service_name = 'sshd'
self.hostname_file_path = None if DistInfo()[1] < '7.0' else '/etc/hostname'
self.init_file = redhat_init_file
self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'
self.grubKernelBootOptionsLine = 'kernel'
def publishHostname(self, name):
super(redhatDistro, self).publishHostname(name)
if DistInfo()[1] < '7.0':
filepath = "/etc/sysconfig/network"
if os.path.isfile(filepath):
ReplaceFileContentsAtomic(filepath, "HOSTNAME=" + name + "\n"
+ "\n".join(
filter(lambda a: not a.startswith("HOSTNAME"), GetFileContents(filepath).split('\n'))))
ethernetInterface = MyDistro.GetInterfaceName()
filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface
if os.path.isfile(filepath):
ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n"
+ "\n".join(
filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n'))))
return 0
def installAgentServiceScriptFiles(self):
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o744)
return 0
def registerAgentService(self):
self.installAgentServiceScriptFiles()
return Run('chkconfig --add waagent')
def uninstallAgentService(self):
return Run('chkconfig --del ' + self.agent_service_name)
def unregisterAgentService(self):
self.stopAgentService()
return self.uninstallAgentService()
def checkPackageInstalled(self, p):
if Run("yum list installed " + p, chk_err=False):
return 0
else:
return 1
def checkPackageUpdateable(self, p):
if Run("yum check-update | grep " + p, chk_err=False):
return 1
else:
return 0
def checkDependencies(self):
"""
Generic dependency check.
Return 1 unless all dependencies are satisfied.
"""
if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'):
Error(GuestAgentLongName + " is not compatible with network-manager.")
return 1
try:
m = __import__('pyasn1')
except ImportError:
Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.")
return 1
for a in self.requiredDeps:
if Run("which " + a + " > /dev/null 2>&1", chk_err=False):
Error("Missing required dependency: " + a)
return 1
return 0
############################################################
# centosDistro
############################################################
class centosDistro(redhatDistro):
"""
CentOS Distro concrete class
Put CentOS specific behavior here...
"""
def __init__(self):
super(centosDistro, self).__init__()
############################################################
# eulerosDistro
############################################################
class eulerosDistro(redhatDistro):
"""
EulerOS Distro concrete class
Put EulerOS specific behavior here...
"""
def __init__(self):
super(eulerosDistro, self).__init__()
############################################################
# oracleDistro
############################################################
class oracleDistro(redhatDistro):
"""
Oracle Distro concrete class
Put Oracle specific behavior here...
"""
def __init__(self):
super(oracleDistro, self).__init__()
############################################################
# asianuxDistro
############################################################
class asianuxDistro(redhatDistro):
"""
Asianux Distro concrete class
Put Asianux specific behavior here...
"""
def __init__(self):
super(asianuxDistro, self).__init__()
############################################################
# CoreOSDistro
############################################################
class CoreOSDistro(AbstractDistro):
"""
CoreOS Distro concrete class
Put CoreOS specific behavior here...
"""
CORE_UID = 500
def __init__(self):
super(CoreOSDistro, self).__init__()
self.requiredDeps += ["/usr/bin/systemctl"]
self.agent_service_name = 'waagent'
self.init_script_file = '/etc/systemd/system/waagent.service'
self.fileBlackList.append("/etc/machine-id")
self.dhcp_client_name = 'systemd-networkd'
self.getpidcmd = 'pidof '
self.shadow_file_mode = 0o640
self.waagent_path = '/usr/share/oem/bin'
self.python_path = '/usr/share/oem/python/bin'
self.dhcp_enabled = True
if 'PATH' in os.environ:
os.environ['PATH'] = "{0}:{1}".format(os.environ['PATH'], self.python_path)
else:
os.environ['PATH'] = self.python_path
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = "{0}:{1}".format(os.environ['PYTHONPATH'], self.waagent_path)
else:
os.environ['PYTHONPATH'] = self.waagent_path
def checkPackageInstalled(self, p):
"""
There is no package manager in CoreOS. Return 1 since it must be preinstalled.
"""
return 1
def checkDependencies(self):
for a in self.requiredDeps:
if Run("which " + a + " > /dev/null 2>&1", chk_err=False):
Error("Missing required dependency: " + a)
return 1
return 0
def checkPackageUpdateable(self, p):
"""
There is no package manager in CoreOS. Return 0 since it can't be updated via package.
"""
return 0
def startAgentService(self):
return Run('systemctl start ' + self.agent_service_name)
def stopAgentService(self):
return Run('systemctl stop ' + self.agent_service_name)
def restartSshService(self):
"""
SSH is socket activated on CoreOS. No need to restart it.
"""
return 0
def sshDeployPublicKey(self, fprint, path):
"""
We support PKCS8.
"""
if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path):
return 1
else:
return 0
def RestartInterface(self, iface):
Run("systemctl restart systemd-networkd")
def CreateAccount(self, user, password, expiration, thumbprint):
"""
Create a user account, with 'user', 'password', 'expiration', ssh keys
and sudo permissions.
Returns None if successful, error string on failure.
"""
userentry = None
try:
userentry = pwd.getpwnam(user)
except:
pass
uidmin = None
try:
uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1])
except:
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID:
Error("CreateAccount: " + user + " is a system user. Will not set password.")
return "Failed to set password for system user: " + user + " (0x06)."
if userentry == None:
command = "useradd --create-home --password '*' " + user
if expiration != None:
command += " --expiredate " + expiration.split('.')[0]
if Run(command):
Error("Failed to create user account: " + user)
return "Failed to create user account: " + user + " (0x07)."
else:
Log("CreateAccount: " + user + " already exists. Will update password.")
if password != None:
self.changePass(user, password)
try:
if password == None:
SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n")
else:
SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n")
os.chmod("/etc/sudoers.d/waagent", 0o440)
except:
Error("CreateAccount: Failed to configure sudo access for user.")
return "Failed to configure sudo privileges (0x08)."
home = MyDistro.GetHome()
if thumbprint != None:
dir = home + "/" + user + "/.ssh"
CreateDir(dir, user, 0o700)
pub = dir + "/id_rsa.pub"
prv = dir + "/id_rsa"
Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub)
SetFileContents(prv, GetFileContents(thumbprint + ".prv"))
for f in [pub, prv]:
os.chmod(f, 0o600)
ChangeOwner(f, user)
SetFileContents(dir + "/authorized_keys", GetFileContents(pub))
ChangeOwner(dir + "/authorized_keys", user)
Log("Created user account: " + user)
return None
def startDHCP(self):
Run("systemctl start " + self.dhcp_client_name, chk_err=False)
def stopDHCP(self):
Run("systemctl stop " + self.dhcp_client_name, chk_err=False)
def translateCustomData(self, data):
return base64.b64decode(data)
def getConfigurationPath(self):
return "/usr/share/oem/waagent.conf"
############################################################
# debianDistro
############################################################
debian_init_file = """\
#!/bin/sh
### BEGIN INIT INFO
# Provides: AzureLinuxAgent
# Required-Start: $network $syslog
# Required-Stop: $network $syslog
# Should-Start: $network $syslog
# Should-Stop: $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: AzureLinuxAgent
# Description: AzureLinuxAgent
### END INIT INFO
. /lib/lsb/init-functions
OPTIONS="-daemon"
WAZD_BIN=/usr/sbin/waagent
WAZD_PID=/var/run/waagent.pid
case "$1" in
start)
log_begin_msg "Starting AzureLinuxAgent..."
pid=$( pidofproc $WAZD_BIN )
if [ -n "$pid" ] ; then
log_begin_msg "Already running."
log_end_msg 0
exit 0
fi
start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS
log_end_msg $?
;;
stop)
log_begin_msg "Stopping AzureLinuxAgent..."
start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID
ret=$?
rm -f $WAZD_PID
log_end_msg $ret
;;
force-reload)
$0 restart
;;
restart)
$0 stop
$0 start
;;
status)
status_of_proc $WAZD_BIN && exit 0 || exit $?
;;
*)
log_success_msg "Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}"
exit 1
;;
esac
exit 0
"""
class debianDistro(AbstractDistro):
"""
debian Distro concrete class
Put debian specific behavior here...
"""
def __init__(self):
super(debianDistro, self).__init__()
self.requiredDeps += ["/usr/sbin/update-rc.d"]
self.init_file = debian_init_file
self.agent_package_name = 'walinuxagent'
self.dhcp_client_name = 'dhclient'
self.getpidcmd = 'pidof '
self.shadow_file_mode = 0o640
def checkPackageInstalled(self, p):
"""
Check that the package is installed.
Return 1 if installed, 0 if not installed.
This method of using dpkg-query
allows wildcards to be present in the
package name.
"""
if not Run("dpkg-query -W -f='${Status}\n' '" + p + "' | grep ' installed' 2>&1", chk_err=False):
return 1
else:
return 0
def checkDependencies(self):
"""
Debian dependency check. python-pyasn1 is NOT needed.
Return 1 unless all dependencies are satisfied.
NOTE: using network*manager will catch either package name in Ubuntu or debian.
"""
if self.checkPackageInstalled('network*manager'):
Error(GuestAgentLongName + " is not compatible with network-manager.")
return 1
for a in self.requiredDeps:
if Run("which " + a + " > /dev/null 2>&1", chk_err=False):
Error("Missing required dependency: " + a)
return 1
return 0
def checkPackageUpdateable(self, p):
if Run("apt-get update ; apt-get upgrade -us | grep " + p, chk_err=False):
return 1
else:
return 0
def installAgentServiceScriptFiles(self):
"""
If we are packaged - the service name is walinuxagent, do nothing.
"""
if self.agent_service_name == 'walinuxagent':
return 0
try:
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o744)
except OSError as e:
ErrorWithPrefix('installAgentServiceScriptFiles',
'Exception: ' + str(e) + ' occured creating ' + self.init_script_file)
return 1
return 0
def registerAgentService(self):
if self.installAgentServiceScriptFiles() == 0:
return Run('update-rc.d waagent defaults')
else:
return 1
def uninstallAgentService(self):
return Run('update-rc.d -f ' + self.agent_service_name + ' remove')
def unregisterAgentService(self):
self.stopAgentService()
return self.uninstallAgentService()
def sshDeployPublicKey(self, fprint, path):
"""
We support PKCS8.
"""
if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path):
return 1
else:
return 0
############################################################
# KaliDistro - WIP
# Functioning on Kali 1.1.0a so far
############################################################
class KaliDistro(debianDistro):
"""
Kali Distro concrete class
Put Kali specific behavior here...
"""
def __init__(self):
super(KaliDistro, self).__init__()
############################################################
# UbuntuDistro
############################################################
ubuntu_upstart_file = """\
#walinuxagent - start Azure agent
description "walinuxagent"
author "Ben Howard <ben.howard@canonical.com>"
start on (filesystem and started rsyslog)
pre-start script
WALINUXAGENT_ENABLED=1
[ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent
if [ "$WALINUXAGENT_ENABLED" != "1" ]; then
exit 1
fi
if [ ! -x /usr/sbin/waagent ]; then
exit 1
fi
#Load the udf module
modprobe -b udf
end script
exec /usr/sbin/waagent -daemon
"""
class UbuntuDistro(debianDistro):
"""
Ubuntu Distro concrete class
Put Ubuntu specific behavior here...
"""
def __init__(self):
super(UbuntuDistro, self).__init__()
self.init_script_file = '/etc/init/waagent.conf'
self.init_file = ubuntu_upstart_file
self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log"]
self.dhcp_client_name = None
self.getpidcmd = 'pidof '
def registerAgentService(self):
return self.installAgentServiceScriptFiles()
def uninstallAgentService(self):
"""
If we are packaged - the service name is walinuxagent, do nothing.
"""
if self.agent_service_name == 'walinuxagent':
return 0
os.remove('/etc/init/' + self.agent_service_name + '.conf')
def unregisterAgentService(self):
"""
If we are packaged - the service name is walinuxagent, do nothing.
"""
if self.agent_service_name == 'walinuxagent':
return
self.stopAgentService()
return self.uninstallAgentService()
def deprovisionWarnUser(self):
"""
Ubuntu specific warning string from Deprovision.
"""
print("WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.")
def deprovisionDeleteFiles(self):
"""
Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will
break resolvconf. Therefore, we check to see if resolvconf is in use,
and if so, we remove the resolvconf artifacts.
"""
if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf':
Log("resolvconf is not configured. Removing /etc/resolv.conf")
self.fileBlackList.append('/etc/resolv.conf')
else:
Log("resolvconf is enabled; leaving /etc/resolv.conf intact")
resolvConfD = '/etc/resolvconf/resolv.conf.d/'
self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original'])
for f in os.listdir(LibDir) + self.fileBlackList:
try:
os.remove(f)
except:
pass
return 0
def getDhcpClientName(self):
if self.dhcp_client_name != None:
return self.dhcp_client_name
if DistInfo()[1] == '12.04':
self.dhcp_client_name = 'dhclient3'
else:
self.dhcp_client_name = 'dhclient'
return self.dhcp_client_name
def waitForSshHostKey(self, path):
"""
Wait until the ssh host key is generated by cloud init.
"""
for retry in range(0, 10):
if (os.path.isfile(path)):
return True
time.sleep(1)
Error("Can't find host key: {0}".format(path))
return False
############################################################
# LinuxMintDistro
############################################################
class LinuxMintDistro(UbuntuDistro):
"""
LinuxMint Distro concrete class
Put LinuxMint specific behavior here...
"""
def __init__(self):
super(LinuxMintDistro, self).__init__()
############################################################
# fedoraDistro
############################################################
fedora_systemd_service = """\
[Unit]
Description=Azure Linux Agent
After=network.target
After=sshd.service
ConditionFileIsExecutable=/usr/sbin/waagent
ConditionPathExists=/etc/waagent.conf
[Service]
Type=simple
ExecStart=/usr/sbin/waagent -daemon
[Install]
WantedBy=multi-user.target
"""
class fedoraDistro(redhatDistro):
"""
FedoraDistro concrete class
Put Fedora specific behavior here...
"""
def __init__(self):
super(fedoraDistro, self).__init__()
self.service_cmd = '/usr/bin/systemctl'
self.hostname_file_path = '/etc/hostname'
self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service'
self.init_file = fedora_systemd_service
self.grubKernelBootOptionsFile = '/etc/default/grub'
self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX='
def publishHostname(self, name):
SetFileContents(self.hostname_file_path, name + '\n')
ethernetInterface = MyDistro.GetInterfaceName()
filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface
if os.path.isfile(filepath):
ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n"
+ "\n".join(
filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n'))))
return 0
def installAgentServiceScriptFiles(self):
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o644)
return Run(self.service_cmd + ' daemon-reload')
def registerAgentService(self):
self.installAgentServiceScriptFiles()
return Run(self.service_cmd + ' enable ' + self.agent_service_name)
def uninstallAgentService(self):
"""
Call service subsystem to remove waagent script.
"""
return Run(self.service_cmd + ' disable ' + self.agent_service_name)
def unregisterAgentService(self):
"""
Calls self.stopAgentService and call self.uninstallAgentService()
"""
self.stopAgentService()
self.uninstallAgentService()
def startAgentService(self):
"""
Service call to start the Agent service
"""
return Run(self.service_cmd + ' start ' + self.agent_service_name)
def stopAgentService(self):
"""
Service call to stop the Agent service
"""
return Run(self.service_cmd + ' stop ' + self.agent_service_name, False)
def restartSshService(self):
"""
Service call to re(start) the SSH service
"""
sshRestartCmd = self.service_cmd + " " + self.ssh_service_restart_option + " " + self.ssh_service_name
retcode = Run(sshRestartCmd)
if retcode > 0:
Error("Failed to restart SSH service with return code:" + str(retcode))
return retcode
def deleteRootPassword(self):
return Run("/sbin/usermod root -p '!!'")
def packagedInstall(self, buildroot):
"""
Called from setup.py for use by RPM.
Copies generated files waagent.conf, under the buildroot.
"""
if not os.path.exists(buildroot + '/etc'):
os.mkdir(buildroot + '/etc')
SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)
if not os.path.exists(buildroot + '/etc/logrotate.d'):
os.mkdir(buildroot + '/etc/logrotate.d')
SetFileContents(buildroot + '/etc/logrotate.d/WALinuxAgent', WaagentLogrotate)
self.init_script_file = buildroot + self.init_script_file
# this allows us to call installAgentServiceScriptFiles()
if not os.path.exists(os.path.dirname(self.init_script_file)):
os.mkdir(os.path.dirname(self.init_script_file))
self.installAgentServiceScriptFiles()
def CreateAccount(self, user, password, expiration, thumbprint):
super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint)
Run('/sbin/usermod ' + user + ' -G wheel')
def DeleteAccount(self, user):
Run('/sbin/usermod ' + user + ' -G ""')
super(fedoraDistro, self).DeleteAccount(user)
############################################################
# FreeBSD
############################################################
FreeBSDWaagentConf = """\
#
# Azure Linux Agent Configuration
#
Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status
# to the endpoint server.
Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration.
Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology.
Provisioning.Enabled=y #
Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable.
Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.
Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa".
Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests.
ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted.
ResourceDisk.Filesystem=ufs2 #
ResourceDisk.MountPoint=/mnt/resource #
ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk.
ResourceDisk.SwapSizeMB=0 # Size of the swapfile.
LBProbeResponder=y # Respond to load balancer probes if requested by Azure.
Logs.Verbose=n # Enable verbose logs
OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds.
OS.OpensslPath=None # If "None", the system default version is used.
"""
bsd_init_file = """\
#! /bin/sh
# PROVIDE: waagent
# REQUIRE: DAEMON cleanvar sshd
# BEFORE: LOGIN
# KEYWORD: nojail
. /etc/rc.subr
export PATH=$PATH:/usr/local/bin
name="waagent"
rcvar="waagent_enable"
command="/usr/sbin/${name}"
command_interpreter="/usr/local/bin/python"
waagent_flags=" daemon &"
pidfile="/var/run/waagent.pid"
load_rc_config $name
run_rc_command "$1"
"""
bsd_activate_resource_disk_txt = """\
#!/usr/bin/env python
import os
import sys
import imp
# waagent has no '.py' therefore create waagent module import manually.
__name__='setupmain' #prevent waagent.__main__ from executing
waagent=imp.load_source('waagent','/tmp/waagent')
waagent.LoggerInit('/var/log/waagent.log','/dev/console')
from waagent import RunGetOutput,Run
Config=waagent.ConfigurationProvider(None)
format = Config.get("ResourceDisk.Format")
if format == None or format.lower().startswith("n"):
sys.exit(0)
device_base = 'da1'
device = "/dev/" + device_base
for entry in RunGetOutput("mount")[1].split():
if entry.startswith(device + "s1"):
waagent.Log("ActivateResourceDisk: " + device + "s1 is already mounted.")
sys.exit(0)
mountpoint = Config.get("ResourceDisk.MountPoint")
if mountpoint == None:
mountpoint = "/mnt/resource"
waagent.CreateDir(mountpoint, "root", 0755)
fs = Config.get("ResourceDisk.Filesystem")
if waagent.FreeBSDDistro().mediaHasFilesystem(device) == False :
Run("newfs " + device + "s1")
if Run("mount " + device + "s1 " + mountpoint):
waagent.Error("ActivateResourceDisk: Failed to mount resource disk (" + device + "s1).")
sys.exit(0)
waagent.Log("Resource disk (" + device + "s1) is mounted at " + mountpoint + " with fstype " + fs)
waagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT)
swap = Config.get("ResourceDisk.EnableSwap")
if swap == None or swap.lower().startswith("n"):
sys.exit(0)
sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024
if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024):
os.remove(mountpoint + "/swapfile")
if not os.path.isfile(mountpoint + "/swapfile"):
Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB))
if Run("mdconfig -a -t vnode -f " + mountpoint + "/swapfile -u 0"):
waagent.Error("ActivateResourceDisk: Configuring swap - Failed to create md0")
if not Run("swapon /dev/md0"):
waagent.Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile")
else:
waagent.Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile")
"""
class FreeBSDDistro(AbstractDistro):
"""
"""
def __init__(self):
"""
Generic Attributes go here. These are based on 'majority rules'.
This __init__() may be called or overriden by the child.
"""
super(FreeBSDDistro, self).__init__()
self.agent_service_name = os.path.basename(sys.argv[0])
self.selinux = False
self.ssh_service_name = 'sshd'
self.ssh_config_file = '/etc/ssh/sshd_config'
self.hostname_file_path = '/etc/hostname'
self.dhcp_client_name = 'dhclient'
self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'pw'
, 'openssl', 'fdisk', 'sed', 'grep', 'sudo']
self.init_script_file = '/etc/rc.d/waagent'
self.init_file = bsd_init_file
self.agent_package_name = 'WALinuxAgent'
self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log", '/etc/resolv.conf']
self.agent_files_to_uninstall = ["/etc/waagent.conf"]
self.grubKernelBootOptionsFile = '/boot/loader.conf'
self.grubKernelBootOptionsLine = ''
self.getpidcmd = 'pgrep -n'
self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k
self.sudoers_dir_base = '/usr/local/etc'
self.waagent_conf_file = FreeBSDWaagentConf
def installAgentServiceScriptFiles(self):
SetFileContents(self.init_script_file, self.init_file)
os.chmod(self.init_script_file, 0o777)
AppendFileContents("/etc/rc.conf", "waagent_enable='YES'\n")
return 0
def registerAgentService(self):
self.installAgentServiceScriptFiles()
return Run("services_mkdb " + self.init_script_file)
def sshDeployPublicKey(self, fprint, path):
"""
We support PKCS8.
"""
if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path):
return 1
else:
return 0
def deleteRootPassword(self):
"""
BSD root password removal.
"""
filepath = "/etc/master.passwd"
ReplaceStringInFile(filepath, r'root:.*?:', 'root::')
# ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n"
# + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n'))))
os.chmod(filepath, self.shadow_file_mode)
if self.isSelinuxSystem():
self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')
RunGetOutput("pwd_mkdb -u root /etc/master.passwd")
Log("Root password deleted.")
return 0
def changePass(self, user, password):
return RunSendStdin("pw usermod " + user + " -h 0 ", password, log_cmd=False)
def load_ata_piix(self):
return 0
def unload_ata_piix(self):
return 0
def checkDependencies(self):
"""
FreeBSD dependency check.
Return 1 unless all dependencies are satisfied.
"""
for a in self.requiredDeps:
if Run("which " + a + " > /dev/null 2>&1", chk_err=False):
Error("Missing required dependency: " + a)
return 1
return 0
def packagedInstall(self, buildroot):
pass
def GetInterfaceName(self):
"""
Return the ip of the
active ethernet interface.
"""
iface, inet, mac = self.GetFreeBSDEthernetInfo()
return iface
def RestartInterface(self, iface):
Run("service netif restart")
def GetIpv4Address(self):
"""
Return the ip of the
active ethernet interface.
"""
iface, inet, mac = self.GetFreeBSDEthernetInfo()
return inet
def GetMacAddress(self):
"""
Return the ip of the
active ethernet interface.
"""
iface, inet, mac = self.GetFreeBSDEthernetInfo()
l = mac.split(':')
r = []
for i in l:
r.append(string.atoi(i, 16))
return r
def GetFreeBSDEthernetInfo(self):
"""
There is no SIOCGIFCONF
on freeBSD - just parse ifconfig.
Returns strings: iface, inet4_addr, and mac
or 'None,None,None' if unable to parse.
We will sleep and retry as the network must be up.
"""
code, output = RunGetOutput("ifconfig", chk_err=False)
Log(output)
retries = 10
cmd = 'ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP '
code = 1
while code > 0:
if code > 0 and retries == 0:
Error("GetFreeBSDEthernetInfo - Failed to detect ethernet interface")
return None, None, None
code, output = RunGetOutput(cmd, chk_err=False)
retries -= 1
if code > 0 and retries > 0:
Log("GetFreeBSDEthernetInfo - Error: retry ethernet detection " + str(retries))
if retries == 9:
c, o = RunGetOutput("ifconfig | grep -A1 -B2 ether", chk_err=False)
if c == 0:
t = o.replace('\n', ' ')
t = t.split()
i = t[0][:-1]
Log(RunGetOutput('id')[1])
Run('dhclient ' + i)
time.sleep(10)
j = output.replace('\n', ' ')
j = j.split()
iface = j[0][:-1]
for i in range(len(j)):
if j[i] == 'inet':
inet = j[i + 1]
elif j[i] == 'ether':
mac = j[i + 1]
return iface, inet, mac
def CreateAccount(self, user, password, expiration, thumbprint):
"""
Create a user account, with 'user', 'password', 'expiration', ssh keys
and sudo permissions.
Returns None if successful, error string on failure.
"""
userentry = None
try:
userentry = pwd.getpwnam(user)
except:
pass
uidmin = None
try:
if os.path.isfile("/etc/login.defs"):
uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1])
except:
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin:
Error("CreateAccount: " + user + " is a system user. Will not set password.")
return "Failed to set password for system user: " + user + " (0x06)."
if userentry == None:
command = "pw useradd " + user + " -m"
if expiration != None:
command += " -e " + expiration.split('.')[0]
if Run(command):
Error("Failed to create user account: " + user)
return "Failed to create user account: " + user + " (0x07)."
else:
Log("CreateAccount: " + user + " already exists. Will update password.")
if password != None:
self.changePass(user, password)
try:
# for older distros create sudoers.d
if not os.path.isdir(MyDistro.sudoers_dir_base + '/sudoers.d/'):
# create the /etc/sudoers.d/ directory
os.mkdir(MyDistro.sudoers_dir_base + '/sudoers.d')
# add the include of sudoers.d to the /etc/sudoers
SetFileContents(MyDistro.sudoers_dir_base + '/sudoers', GetFileContents(
MyDistro.sudoers_dir_base + '/sudoers') + '\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\n')
if password == None:
SetFileContents(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n")
else:
SetFileContents(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", user + " ALL = (ALL) ALL\n")
os.chmod(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", 0o440)
except:
Error("CreateAccount: Failed to configure sudo access for user.")
return "Failed to configure sudo privileges (0x08)."
home = MyDistro.GetHome()
if thumbprint != None:
dir = home + "/" + user + "/.ssh"
CreateDir(dir, user, 0o700)
pub = dir + "/id_rsa.pub"
prv = dir + "/id_rsa"
Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub)
SetFileContents(prv, GetFileContents(thumbprint + ".prv"))
for f in [pub, prv]:
os.chmod(f, 0o600)
ChangeOwner(f, user)
SetFileContents(dir + "/authorized_keys", GetFileContents(pub))
ChangeOwner(dir + "/authorized_keys", user)
Log("Created user account: " + user)
return None
def DeleteAccount(self, user):
"""
Delete the 'user'.
Clear utmp first, to avoid error.
Removes the /etc/sudoers.d/waagent file.
"""
userentry = None
try:
userentry = pwd.getpwnam(user)
except:
pass
if userentry == None:
Error("DeleteAccount: " + user + " not found.")
return
uidmin = None
try:
if os.path.isfile("/etc/login.defs"):
uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1])
except:
pass
if uidmin == None:
uidmin = 100
if userentry[2] < uidmin:
Error("DeleteAccount: " + user + " is a system user. Will not delete account.")
return
Run("> /var/run/utmp") # Delete utmp to prevent error if we are the 'user' deleted
pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE).pid
try:
os.remove(MyDistro.sudoers_dir_base + "/sudoers.d/waagent")
except:
pass
return
def ActivateResourceDiskNoThread(self):
"""
Format, mount, and if specified in the configuration
set resource disk as swap.
"""
global DiskActivated
Run('cp /usr/sbin/waagent /tmp/')
SetFileContents('/tmp/bsd_activate_resource_disk.py', bsd_activate_resource_disk_txt)
Run('chmod +x /tmp/bsd_activate_resource_disk.py')
pid = subprocess.Popen(["/tmp/bsd_activate_resource_disk.py", ""]).pid
Log("Spawning bsd_activate_resource_disk.py")
DiskActivated = True
return
def Install(self):
"""
Install the agent service.
Check dependencies.
Create /etc/waagent.conf and move old version to
/etc/waagent.conf.old
Copy RulesFiles to /var/lib/waagent
Create /etc/logrotate.d/waagent
Set /etc/ssh/sshd_config ClientAliveInterval to 180
Call ApplyVNUMAWorkaround()
"""
if MyDistro.checkDependencies():
return 1
os.chmod(sys.argv[0], 0o755)
SwitchCwd()
for a in RulesFiles:
if os.path.isfile(a):
if os.path.isfile(GetLastPathElement(a)):
os.remove(GetLastPathElement(a))
shutil.move(a, ".")
Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a))
MyDistro.registerAgentService()
if os.path.isfile("/etc/waagent.conf"):
try:
os.remove("/etc/waagent.conf.old")
except:
pass
try:
os.rename("/etc/waagent.conf", "/etc/waagent.conf.old")
Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old")
except:
pass
SetFileContents("/etc/waagent.conf", self.waagent_conf_file)
if os.path.exists('/usr/local/etc/logrotate.d/'):
SetFileContents("/usr/local/etc/logrotate.d/waagent", WaagentLogrotate)
filepath = "/etc/ssh/sshd_config"
ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not
a.startswith("ClientAliveInterval"),
GetFileContents(filepath).split(
'\n'))) + "\nClientAliveInterval 180\n")
Log("Configured SSH client probing to keep connections alive.")
# ApplyVNUMAWorkaround()
return 0
def mediaHasFilesystem(self, dsk):
if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep "invalid fdisk partition table found" ', False):
return False
return True
def mountDVD(self, dvd, location):
# At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location
retcode, out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml')
if retcode != 0:
return retcode, out
ovfxml = (GetFileContents(location + "/ovf-env.xml", asbin=False))
if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128:
ovfxml = ovfxml[
3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them.
ovfxml = ovfxml.strip(chr(0x00))
ovfxml = "".join(filter(lambda x: ord(x) < 128, ovfxml))
ovfxml = re.sub(r'</Environment>.*\Z', '', ovfxml, 0, re.DOTALL)
ovfxml += '</Environment>'
SetFileContents(location + "/ovf-env.xml", ovfxml)
return retcode, out
def GetHome(self):
return '/home'
def initScsiDiskTimeout(self):
"""
Set the SCSI disk timeout by updating the kernal config
"""
timeout = Config.get("OS.RootDeviceScsiTimeout")
if timeout:
Run("sysctl kern.cam.da.default_timeout=" + timeout)
def setScsiDiskTimeout(self):
return
def setBlockDeviceTimeout(self, device, timeout):
return
def getProcessorCores(self):
return int(RunGetOutput("sysctl hw.ncpu | awk '{print $2}'")[1])
def getTotalMemory(self):
return int(RunGetOutput("sysctl hw.realmem | awk '{print $2}'")[1]) / 1024
def setDefaultGateway(self, gateway):
Run("/sbin/route add default " + gateway, chk_err=False)
def routeAdd(self, net, mask, gateway):
Run("/sbin/route add -net " + net + " " + mask + " " + gateway, chk_err=False)
############################################################
# END DISTRO CLASS DEFS
############################################################
# This lets us index into a string or an array of integers transparently.
def Ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type("a"):
a = ord(a)
return a
def IsLinux():
"""
Returns True if platform is Linux.
Generic utility function.
"""
return (platform.uname()[0] == "Linux")
def GetLastPathElement(path):
"""
Similar to basename.
Generic utility function.
"""
return path.rsplit('/', 1)[1]
def GetFileContents(filepath, asbin=False):
"""
Read and return contents of 'filepath'.
"""
mode = 'r'
if asbin:
mode += 'b'
c = None
try:
with open(filepath, mode) as F:
c = F.read()
except IOError as e:
ErrorWithPrefix('GetFileContents', 'Reading from file ' + filepath + ' Exception is ' + str(e))
return None
return c
def SetFileContents(filepath, contents):
"""
Write 'contents' to 'filepath'.
"""
if type(contents) == str:
contents = contents.encode('latin-1', 'ignore')
try:
with open(filepath, "wb+") as F:
F.write(contents)
except IOError as e:
ErrorWithPrefix('SetFileContents', 'Writing to file ' + filepath + ' Exception is ' + str(e))
return None
return 0
def AppendFileContents(filepath, contents):
"""
Append 'contents' to 'filepath'.
"""
if type(contents) == str:
contents = contents.encode('latin-1')
try:
with open(filepath, "a+") as F:
F.write(contents)
except IOError as e:
ErrorWithPrefix('AppendFileContents', 'Appending to file ' + filepath + ' Exception is ' + str(e))
return None
return 0
def ReplaceFileContentsAtomic(filepath, contents):
"""
Write 'contents' to 'filepath' by creating a temp file, and replacing original.
"""
handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath))
if type(contents) == str:
contents = contents.encode('latin-1')
try:
os.write(handle, contents)
except IOError as e:
ErrorWithPrefix('ReplaceFileContentsAtomic', 'Writing to file ' + filepath + ' Exception is ' + str(e))
return None
finally:
os.close(handle)
try:
os.rename(temp, filepath)
return None
except IOError as e:
ErrorWithPrefix('ReplaceFileContentsAtomic', 'Renaming ' + temp + ' to ' + filepath + ' Exception is ' + str(e))
try:
os.remove(filepath)
except IOError as e:
ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))
try:
os.rename(temp, filepath)
except IOError as e:
ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))
return 1
return 0
def GetLineStartingWith(prefix, filepath):
"""
Return line from 'filepath' if the line startswith 'prefix'
"""
for line in GetFileContents(filepath).split('\n'):
if line.startswith(prefix):
return line
return None
def Run(cmd, chk_err=True):
"""
Calls RunGetOutput on 'cmd', returning only the return code.
If chk_err=True then errors will be reported in the log.
If chk_err=False then errors will be suppressed from the log.
"""
retcode, out = RunGetOutput(cmd, chk_err)
return retcode
def RunGetOutput(cmd, chk_err=True, log_cmd=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
if log_cmd:
LogIfVerbose(cmd)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
if chk_err and log_cmd:
Error('CalledProcessError. Error Code is ' + str(e.returncode))
Error('CalledProcessError. Command string was ' + e.cmd)
Error('CalledProcessError. Command result was ' + (e.output[:-1]).decode('latin-1'))
return e.returncode, e.output.decode('latin-1')
return 0, output.decode('latin-1')
def RunSendStdin(cmd, input, chk_err=True, log_cmd=True):
"""
Wrapper for subprocess.Popen.
Execute 'cmd', sending 'input' to STDIN of 'cmd'.
Returns return code and STDOUT, trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
if log_cmd:
LogIfVerbose(cmd + input)
try:
me = subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
output = me.communicate(input)
except OSError as e:
if chk_err and log_cmd:
Error('CalledProcessError. Error Code is ' + str(me.returncode))
Error('CalledProcessError. Command string was ' + cmd)
Error('CalledProcessError. Command result was ' + output[0].decode('latin-1'))
return 1, output[0].decode('latin-1')
if me.returncode is not 0 and chk_err is True and log_cmd:
Error('CalledProcessError. Error Code is ' + str(me.returncode))
Error('CalledProcessError. Command string was ' + cmd)
Error('CalledProcessError. Command result was ' + output[0].decode('latin-1'))
return me.returncode, output[0].decode('latin-1')
def GetNodeTextData(a):
"""
Filter non-text nodes from DOM tree
"""
for b in a.childNodes:
if b.nodeType == b.TEXT_NODE:
return b.data
def GetHome():
"""
Attempt to guess the $HOME location.
Return the path string.
"""
home = None
try:
home = GetLineStartingWith("HOME", "/etc/default/useradd").split('=')[1].strip()
except:
pass
if (home == None) or (home.startswith("/") == False):
home = "/home"
return home
def ChangeOwner(filepath, user):
"""
Lookup user. Attempt chown 'filepath' to 'user'.
"""
p = None
try:
p = pwd.getpwnam(user)
except:
pass
if p != None:
if not os.path.exists(filepath):
Error("Path does not exist: {0}".format(filepath))
else:
os.chown(filepath, p[2], p[3])
def CreateDir(dirpath, user, mode):
"""
Attempt os.makedirs, catch all exceptions.
Call ChangeOwner afterwards.
"""
try:
os.makedirs(dirpath, mode)
except:
pass
ChangeOwner(dirpath, user)
def CreateAccount(user, password, expiration, thumbprint):
"""
Create a user account, with 'user', 'password', 'expiration', ssh keys
and sudo permissions.
Returns None if successful, error string on failure.
"""
userentry = None
try:
userentry = pwd.getpwnam(user)
except:
pass
uidmin = None
try:
uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1])
except:
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin:
Error("CreateAccount: " + user + " is a system user. Will not set password.")
return "Failed to set password for system user: " + user + " (0x06)."
if userentry == None:
command = "useradd -m " + user
if expiration != None:
command += " -e " + expiration.split('.')[0]
if Run(command):
Error("Failed to create user account: " + user)
return "Failed to create user account: " + user + " (0x07)."
else:
Log("CreateAccount: " + user + " already exists. Will update password.")
if password != None:
MyDistro.changePass(user, password)
try:
# for older distros create sudoers.d
if not os.path.isdir('/etc/sudoers.d/'):
# create the /etc/sudoers.d/ directory
os.mkdir('/etc/sudoers.d/')
# add the include of sudoers.d to the /etc/sudoers
SetFileContents('/etc/sudoers', GetFileContents('/etc/sudoers') + '\n#includedir /etc/sudoers.d\n')
if password == None:
SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n")
else:
SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n")
os.chmod("/etc/sudoers.d/waagent", 0o440)
except:
Error("CreateAccount: Failed to configure sudo access for user.")
return "Failed to configure sudo privileges (0x08)."
home = MyDistro.GetHome()
if thumbprint != None:
dir = home + "/" + user + "/.ssh"
CreateDir(dir, user, 0o700)
pub = dir + "/id_rsa.pub"
prv = dir + "/id_rsa"
Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub)
SetFileContents(prv, GetFileContents(thumbprint + ".prv"))
for f in [pub, prv]:
os.chmod(f, 0o600)
ChangeOwner(f, user)
SetFileContents(dir + "/authorized_keys", GetFileContents(pub))
ChangeOwner(dir + "/authorized_keys", user)
Log("Created user account: " + user)
return None
def DeleteAccount(user):
"""
Delete the 'user'.
Clear utmp first, to avoid error.
Removes the /etc/sudoers.d/waagent file.
"""
userentry = None
try:
userentry = pwd.getpwnam(user)
except:
pass
if userentry == None:
Error("DeleteAccount: " + user + " not found.")
return
uidmin = None
try:
uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1])
except:
pass
if uidmin == None:
uidmin = 100
if userentry[2] < uidmin:
Error("DeleteAccount: " + user + " is a system user. Will not delete account.")
return
Run("> /var/run/utmp") # Delete utmp to prevent error if we are the 'user' deleted
Run("userdel -f -r " + user)
try:
os.remove("/etc/sudoers.d/waagent")
except:
pass
return
def IsInRangeInclusive(a, low, high):
"""
Return True if 'a' in 'low' <= a >= 'high'
"""
return (a >= low and a <= high)
def IsPrintable(ch):
"""
Return True if character is displayable.
"""
return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'),
Ord('z')) or IsInRangeInclusive(ch,
Ord('0'),
Ord('9'))
def HexDump(buffer, size):
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
k = '.'
if IsPrintable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def SimpleLog(file_path, message):
if not file_path or len(message) < 1:
return
t = time.localtime()
t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
lines = re.sub(re.compile(r'^(.)', re.MULTILINE), t + r'\1', message)
with open(file_path, "a") as F:
lines = filter(lambda x: x in string.printable, lines)
F.write(lines.encode('ascii', 'ignore') + "\n")
class Logger(object):
"""
The Agent's logging assumptions are:
For Log, and LogWithPrefix all messages are logged to the
self.file_path and to the self.con_path. Setting either path
parameter to None skips that log. If Verbose is enabled, messages
calling the LogIfVerbose method will be logged to file_path yet
not to con_path. Error and Warn messages are normal log messages
with the 'ERROR:' or 'WARNING:' prefix added.
"""
def __init__(self, filepath, conpath, verbose=False):
"""
Construct an instance of Logger.
"""
self.file_path = filepath
self.con_path = conpath
self.verbose = verbose
def ThrottleLog(self, counter):
"""
Log everything up to 10, every 10 up to 100, then every 100.
"""
return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0)
def LogToFile(self, message):
"""
Write 'message' to logfile.
"""
if self.file_path:
try:
with open(self.file_path, "a") as F:
message = filter(lambda x: x in string.printable, message)
F.write(message.encode('ascii', 'ignore') + "\n")
except IOError as e:
##print e
pass
def LogToCon(self, message):
"""
Write 'message' to /dev/console.
This supports serial port logging if the /dev/console
is redirected to ttys0 in kernel boot options.
"""
if self.con_path:
try:
with open(self.con_path, "w") as C:
message = filter(lambda x: x in string.printable, message)
C.write(message.encode('ascii', 'ignore') + "\n")
except IOError as e:
pass
def Log(self, message):
"""
Standard Log function.
Logs to self.file_path, and con_path
"""
self.LogWithPrefix("", message)
def LogWithPrefix(self, prefix, message):
"""
Prefix each line of 'message' with current time+'prefix'.
"""
t = time.localtime()
t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
t += prefix
for line in message.split('\n'):
line = t + line
self.LogToFile(line)
self.LogToCon(line)
def NoLog(self, message):
"""
Don't Log.
"""
pass
def LogIfVerbose(self, message):
"""
Only log 'message' if global Verbose is True.
"""
self.LogWithPrefixIfVerbose('', message)
def LogWithPrefixIfVerbose(self, prefix, message):
"""
Only log 'message' if global Verbose is True.
Prefix each line of 'message' with current time+'prefix'.
"""
if self.verbose == True:
t = time.localtime()
t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
t += prefix
for line in message.split('\n'):
line = t + line
self.LogToFile(line)
self.LogToCon(line)
def Warn(self, message):
"""
Prepend the text "WARNING:" to the prefix for each line in 'message'.
"""
self.LogWithPrefix("WARNING:", message)
def Error(self, message):
"""
Call ErrorWithPrefix(message).
"""
ErrorWithPrefix("", message)
def ErrorWithPrefix(self, prefix, message):
"""
Prepend the text "ERROR:" to the prefix for each line in 'message'.
Errors written to logfile, and /dev/console
"""
self.LogWithPrefix("ERROR:", message)
def LoggerInit(log_file_path, log_con_path, verbose=False):
"""
Create log object and export its methods to global scope.
"""
global Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger
l = Logger(log_file_path, log_con_path, verbose)
Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger = l.Log, l.LogWithPrefix, l.LogIfVerbose, l.LogWithPrefixIfVerbose, l.Error, l.ErrorWithPrefix, l.Warn, l.NoLog, l.ThrottleLog, l
class HttpResourceGoneError(Exception):
pass
class Util(object):
"""
Http communication class.
Base of GoalState, and Agent classes.
"""
RetryWaitingInterval = 10
def __init__(self):
self.Endpoint = None
def _ParseUrl(self, url):
secure = False
host = self.Endpoint
path = url
port = None
# "http[s]://hostname[:port][/]"
if url.startswith("http://"):
url = url[7:]
if "/" in url:
host = url[0: url.index("/")]
path = url[url.index("/"):]
else:
host = url
path = "/"
elif url.startswith("https://"):
secure = True
url = url[8:]
if "/" in url:
host = url[0: url.index("/")]
path = url[url.index("/"):]
else:
host = url
path = "/"
if host is None:
raise ValueError("Host is invalid:{0}".format(url))
if (":" in host):
pos = host.rfind(":")
port = int(host[pos + 1:])
host = host[0:pos]
return host, port, secure, path
def GetHttpProxy(self, secure):
"""
Get http_proxy and https_proxy from environment variables.
Username and password is not supported now.
"""
host = Config.get("HttpProxy.Host")
port = Config.get("HttpProxy.Port")
return (host, port)
def _HttpRequest(self, method, host, path, port=None, data=None, secure=False,
headers=None, proxyHost=None, proxyPort=None):
resp = None
conn = None
try:
if secure:
port = 443 if port is None else port
if proxyHost is not None and proxyPort is not None:
conn = httplibs.HTTPSConnection(proxyHost, proxyPort, timeout=10)
conn.set_tunnel(host, port)
# If proxy is used, full url is needed.
path = "https://{0}:{1}{2}".format(host, port, path)
else:
conn = httplibs.HTTPSConnection(host, port, timeout=10)
else:
port = 80 if port is None else port
if proxyHost is not None and proxyPort is not None:
conn = httplibs.HTTPConnection(proxyHost, proxyPort, timeout=10)
# If proxy is used, full url is needed.
path = "http://{0}:{1}{2}".format(host, port, path)
else:
conn = httplibs.HTTPConnection(host, port, timeout=10)
if headers == None:
conn.request(method, path, data)
else:
conn.request(method, path, data, headers)
resp = conn.getresponse()
except httplibs.HTTPException as e:
Error('HTTPException {0}, args:{1}'.format(e, repr(e.args)))
except IOError as e:
Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args)))
return resp
def HttpRequest(self, method, url, data=None,
headers=None, maxRetry=3, chkProxy=False):
"""
Sending http request to server
On error, sleep 10 and maxRetry times.
Return the output buffer or None.
"""
LogIfVerbose("HTTP Req: {0} {1}".format(method, url))
LogIfVerbose("HTTP Req: Data={0}".format(data))
LogIfVerbose("HTTP Req: Header={0}".format(headers))
try:
host, port, secure, path = self._ParseUrl(url)
except ValueError as e:
Error("Failed to parse url:{0}".format(url))
return None
# Check proxy
proxyHost, proxyPort = (None, None)
if chkProxy:
proxyHost, proxyPort = self.GetHttpProxy(secure)
# If httplib module is not built with ssl support. Fallback to http
if secure and not hasattr(httplibs, "HTTPSConnection"):
Warn("httplib is not built with ssl support")
secure = False
proxyHost, proxyPort = self.GetHttpProxy(secure)
# If httplib module doesn't support https tunnelling. Fallback to http
if secure and \
proxyHost is not None and \
proxyPort is not None and \
not hasattr(httplibs.HTTPSConnection, "set_tunnel"):
Warn("httplib doesn't support https tunnelling(new in python 2.7)")
secure = False
proxyHost, proxyPort = self.GetHttpProxy(secure)
resp = self._HttpRequest(method, host, path, port=port, data=data,
secure=secure, headers=headers,
proxyHost=proxyHost, proxyPort=proxyPort)
for retry in range(0, maxRetry):
if resp is not None and \
(resp.status == httplibs.OK or \
resp.status == httplibs.CREATED or \
resp.status == httplibs.ACCEPTED):
return resp;
if resp is not None and resp.status == httplibs.GONE:
raise HttpResourceGoneError("Http resource gone.")
Error("Retry={0}".format(retry))
Error("HTTP Req: {0} {1}".format(method, url))
Error("HTTP Req: Data={0}".format(data))
Error("HTTP Req: Header={0}".format(headers))
if resp is None:
Error("HTTP Err: response is empty.".format(retry))
else:
Error("HTTP Err: Status={0}".format(resp.status))
Error("HTTP Err: Reason={0}".format(resp.reason))
Error("HTTP Err: Header={0}".format(resp.getheaders()))
Error("HTTP Err: Body={0}".format(resp.read()))
time.sleep(self.__class__.RetryWaitingInterval)
resp = self._HttpRequest(method, host, path, port=port, data=data,
secure=secure, headers=headers,
proxyHost=proxyHost, proxyPort=proxyPort)
return None
def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False):
return self.HttpRequest("GET", url, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False):
return self.HttpRequest("HEAD", url, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False):
return self.HttpRequest("POST", url, data=data, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False):
return self.HttpRequest("PUT", url, data=data, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False):
return self.HttpRequest("DELETE", url, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False):
"""
Return data from an HTTP get on 'url'.
"""
resp = self.HttpGet(url, headers=None, maxRetry=maxRetry,
chkProxy=chkProxy)
return resp.read() if resp is not None else None
def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False):
"""
Return data from an HTTP get on 'url' with
x-ms-agent-name and x-ms-version
headers.
"""
resp = self.HttpGet(url, headers={
"x-ms-agent-name": GuestAgentName,
"x-ms-version": ProtocolVersion
}, maxRetry=maxRetry, chkProxy=chkProxy)
return resp.read() if resp is not None else None
def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3,
chkProxy=False):
"""
Return output of get using ssl cert.
"""
resp = self.HttpGet(url, headers={
"x-ms-agent-name": GuestAgentName,
"x-ms-version": ProtocolVersion,
"x-ms-cipher-name": "DES_EDE3_CBC",
"x-ms-guest-agent-public-x509-cert": transportCert
}, maxRetry=maxRetry, chkProxy=chkProxy)
return resp.read() if resp is not None else None
def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False):
headers = {
"x-ms-agent-name": GuestAgentName,
"Content-Type": "text/xml; charset=utf-8",
"x-ms-version": ProtocolVersion
}
try:
return self.HttpPost(url, data=data, headers=headers,
maxRetry=maxRetry, chkProxy=chkProxy)
except HttpResourceGoneError as e:
Error("Failed to post: {0} {1}".format(url, e))
return None
__StorageVersion = "2014-02-14"
def GetBlobType(url):
restutil = Util()
# Check blob type
LogIfVerbose("Check blob type.")
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
blobPropResp = restutil.HttpHead(url, {
"x-ms-date": timestamp,
'x-ms-version': __StorageVersion
}, chkProxy=True);
blobType = None
if blobPropResp is None:
Error("Can't get status blob type.")
return None
blobType = blobPropResp.getheader("x-ms-blob-type")
LogIfVerbose("Blob type={0}".format(blobType))
return blobType
def PutBlockBlob(url, data):
restutil = Util()
LogIfVerbose("Upload block blob")
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
ret = restutil.HttpPut(url, data, {
"x-ms-date": timestamp,
"x-ms-blob-type": "BlockBlob",
"Content-Length": str(len(data)),
"x-ms-version": __StorageVersion
}, chkProxy=True)
if ret is None:
Error("Failed to upload block blob for status.")
return -1
return 0
def PutPageBlob(url, data):
restutil = Util()
LogIfVerbose("Replace old page blob")
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
# Align to 512 bytes
pageBlobSize = ((len(data) + 511) / 512) * 512
ret = restutil.HttpPut(url, "", {
"x-ms-date": timestamp,
"x-ms-blob-type": "PageBlob",
"Content-Length": "0",
"x-ms-blob-content-length": str(pageBlobSize),
"x-ms-version": __StorageVersion
}, chkProxy=True)
if ret is None:
Error("Failed to clean up page blob for status")
return -1
if url.index('?') < 0:
url = "{0}?comp=page".format(url)
else:
url = "{0}&comp=page".format(url)
LogIfVerbose("Upload page blob")
pageMax = 4 * 1024 * 1024 # Max page size: 4MB
start = 0
end = 0
while end < len(data):
end = min(len(data), start + pageMax)
contentSize = end - start
# Align to 512 bytes
pageEnd = ((end + 511) / 512) * 512
bufSize = pageEnd - start
buf = bytearray(bufSize)
buf[0: contentSize] = data[start: end]
if sys.version_info > (3,):
buffer = memoryview
ret = restutil.HttpPut(url, buffer(buf), {
"x-ms-date": timestamp,
"x-ms-range": "bytes={0}-{1}".format(start, pageEnd - 1),
"x-ms-page-write": "update",
"x-ms-version": __StorageVersion,
"Content-Length": str(pageEnd - start)
}, chkProxy=True)
if ret is None:
Error("Failed to upload page blob for status")
return -1
start = end
return 0
def UploadStatusBlob(url, data):
LogIfVerbose("Upload status blob")
LogIfVerbose("Status={0}".format(data))
blobType = GetBlobType(url)
if blobType == "BlockBlob":
return PutBlockBlob(url, data)
elif blobType == "PageBlob":
return PutPageBlob(url, data)
else:
Error("Unknown blob type: {0}".format(blobType))
return -1
class TCPHandler(SocketServers.BaseRequestHandler):
"""
Callback object for LoadBalancerProbeServer.
Recv and send LB probe messages.
"""
def __init__(self, lb_probe):
super(TCPHandler, self).__init__()
self.lb_probe = lb_probe
def GetHttpDateTimeNow(self):
"""
Return formatted gmtime "Date: Fri, 25 Mar 2011 04:53:10 GMT"
"""
return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
def handle(self):
"""
Log LB probe messages, read the socket buffer,
send LB probe response back to server.
"""
self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000
log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)]
strCounter = str(self.lb_probe.ProbeCounter)
if self.lb_probe.ProbeCounter == 1:
Log("Receiving LB probes.")
log("Received LB probe # " + strCounter)
self.request.recv(1024)
self.request.send(
"HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/html\r\nDate: " + self.GetHttpDateTimeNow() + "\r\n\r\nOK")
class LoadBalancerProbeServer(object):
"""
Threaded object to receive and send LB probe messages.
Load Balancer messages but be recv'd by
the load balancing server, or this node may be shut-down.
"""
def __init__(self, port):
self.ProbeCounter = 0
self.server = SocketServers.TCPServer((self.get_ip(), port), TCPHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True)
self.server_thread.start()
def shutdown(self):
self.server.shutdown()
def get_ip(self):
for retry in range(1, 6):
ip = MyDistro.GetIpv4Address()
if ip == None:
Log("LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry " + str(
retry + 1))
time.sleep(10)
else:
return ip
class ConfigurationProvider(object):
"""
Parse amd store key:values in waagent.conf
"""
def __init__(self, walaConfigFile):
self.values = dict()
if 'MyDistro' not in globals():
global MyDistro
MyDistro = GetMyDistro()
if walaConfigFile is None:
walaConfigFile = MyDistro.getConfigurationPath()
if os.path.isfile(walaConfigFile) == False:
raise Exception("Missing configuration in {0}".format(walaConfigFile))
try:
for line in GetFileContents(walaConfigFile).split('\n'):
if not line.startswith("#") and "=" in line:
parts = line.split()[0].split('=')
value = parts[1].strip("\" ")
if value != "None":
self.values[parts[0]] = value
else:
self.values[parts[0]] = None
except:
Error("Unable to parse {0}".format(walaConfigFile))
raise
return
def get(self, key):
return self.values.get(key)
class EnvMonitor(object):
"""
Montor changes to dhcp and hostname.
If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.
"""
def __init__(self):
self.shutdown = False
self.HostName = socket.gethostname()
self.server_thread = threading.Thread(target=self.monitor)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.published = False
def monitor(self):
"""
Monitor dhcp client pid and hostname.
If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.
"""
publish = Config.get("Provisioning.MonitorHostName")
dhcpcmd = MyDistro.getpidcmd + ' ' + MyDistro.getDhcpClientName()
dhcppid = RunGetOutput(dhcpcmd)[1]
while not self.shutdown:
for a in RulesFiles:
if os.path.isfile(a):
if os.path.isfile(GetLastPathElement(a)):
os.remove(GetLastPathElement(a))
shutil.move(a, ".")
Log("EnvMonitor: Moved " + a + " -> " + LibDir)
MyDistro.setScsiDiskTimeout()
if publish != None and publish.lower().startswith("y"):
try:
if socket.gethostname() != self.HostName:
Log("EnvMonitor: Detected host name change: " + self.HostName + " -> " + socket.gethostname())
self.HostName = socket.gethostname()
WaAgent.UpdateAndPublishHostName(self.HostName)
dhcppid = RunGetOutput(dhcpcmd)[1]
self.published = True
except:
pass
else:
self.published = True
pid = ""
if not os.path.isdir("/proc/" + dhcppid.strip()):
pid = RunGetOutput(dhcpcmd)[1]
if pid != "" and pid != dhcppid:
Log("EnvMonitor: Detected dhcp client restart. Restoring routing table.")
WaAgent.RestoreRoutes()
dhcppid = pid
for child in Children:
if child.poll() != None:
Children.remove(child)
time.sleep(5)
def SetHostName(self, name):
"""
Generic call to MyDistro.setHostname(name).
Complian to Log on error.
"""
if socket.gethostname() == name:
self.published = True
elif MyDistro.setHostname(name):
Error("Error: SetHostName: Cannot set hostname to " + name)
return ("Error: SetHostName: Cannot set hostname to " + name)
def IsHostnamePublished(self):
"""
Return self.published
"""
return self.published
def ShutdownService(self):
"""
Stop server comminucation and join the thread to main thread.
"""
self.shutdown = True
self.server_thread.join()
class Certificates(object):
"""
Object containing certificates of host and provisioned user.
Parses and splits certificates into files.
"""
# <CertificateFile xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="certificates10.xsd">
# <Version>2010-12-15</Version>
# <Incarnation>2</Incarnation>
# <Format>Pkcs7BlobWithPfxContents</Format>
# <Data>MIILTAY...
# </Data>
# </CertificateFile>
def __init__(self):
self.reinitialize()
def reinitialize(self):
"""
Reset the Role, Incarnation
"""
self.Incarnation = None
self.Role = None
def Parse(self, xmlText):
"""
Parse multiple certificates into seperate files.
"""
self.reinitialize()
SetFileContents("Certificates.xml", xmlText)
dom = xml.dom.minidom.parseString(xmlText)
for a in ["CertificateFile", "Version", "Incarnation",
"Format", "Data", ]:
if not dom.getElementsByTagName(a):
Error("Certificates.Parse: Missing " + a)
return None
node = dom.childNodes[0]
if node.localName != "CertificateFile":
Error("Certificates.Parse: root not CertificateFile")
return None
SetFileContents("Certificates.p7m",
"MIME-Version: 1.0\n"
+ "Content-Disposition: attachment; filename=\"Certificates.p7m\"\n"
+ "Content-Type: application/x-pkcs7-mime; name=\"Certificates.p7m\"\n"
+ "Content-Transfer-Encoding: base64\n\n"
+ GetNodeTextData(dom.getElementsByTagName("Data")[0]))
if Run(
Openssl + " cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | " + Openssl + " pkcs12 -nodes -password pass: -out Certificates.pem"):
Error("Certificates.Parse: Failed to extract certificates from CMS message.")
return self
# There may be multiple certificates in this package. Split them.
file = open("Certificates.pem")
pindex = 1
cindex = 1
output = open("temp.pem", "w")
for line in file.readlines():
output.write(line)
if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$', line):
output.close()
if re.match(r'[-]+END .*?KEY[-]+$', line):
os.rename("temp.pem", str(pindex) + ".prv")
pindex += 1
else:
os.rename("temp.pem", str(cindex) + ".crt")
cindex += 1
output = open("temp.pem", "w")
output.close()
os.remove("temp.pem")
keys = dict()
index = 1
filename = str(index) + ".crt"
while os.path.isfile(filename):
thumbprint = \
(RunGetOutput(Openssl + " x509 -in " + filename + " -fingerprint -noout")[1]).rstrip().split('=')[
1].replace(':', '').upper()
pubkey = RunGetOutput(Openssl + " x509 -in " + filename + " -pubkey -noout")[1]
keys[pubkey] = thumbprint
os.rename(filename, thumbprint + ".crt")
os.chmod(thumbprint + ".crt", 0o600)
MyDistro.setSelinuxContext(thumbprint + '.crt', 'unconfined_u:object_r:ssh_home_t:s0')
index += 1
filename = str(index) + ".crt"
index = 1
filename = str(index) + ".prv"
while os.path.isfile(filename):
pubkey = RunGetOutput(Openssl + " rsa -in " + filename + " -pubout 2> /dev/null ")[1]
os.rename(filename, keys[pubkey] + ".prv")
os.chmod(keys[pubkey] + ".prv", 0o600)
MyDistro.setSelinuxContext(keys[pubkey] + '.prv', 'unconfined_u:object_r:ssh_home_t:s0')
index += 1
filename = str(index) + ".prv"
return self
class ExtensionsConfig(object):
"""
Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent.
Install if <enabled>true</enabled>, remove if it is set to false.
"""
# <?xml version="1.0" encoding="utf-8"?>
# <Extensions version="1.0.0.0" goalStateIncarnation="6"><Plugins>
# <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.5"
# location="http://previewusnorthcache.blob.core.test-cint.azure-test.net/d84b216d00bf4d96982be531539e1513/OSTCExtensions_ExampleHandlerLinux_usnorth_manifest.xml"
# config="" state="enabled" autoUpgrade="false" runAsStartupTask="false" isJson="true" />
# </Plugins>
# <PluginSettings>
# <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.5">
# <RuntimeSettings seqNo="2">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1",
# "protectedSettings":"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR
# Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6
# tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X
# v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh
# kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}</RuntimeSettings>
# </Plugin>
# </PluginSettings>
def __init__(self):
self.reinitialize()
def reinitialize(self):
"""
Reset members.
"""
self.Extensions = None
self.Plugins = None
self.Util = None
def Parse(self, xmlText):
"""
Write configuration to file ExtensionsConfig.xml.
Log plugin specific activity to /var/log/azure/<Publisher>.<PluginName>/<Version>/CommandExecution.log.
If state is enabled:
if the plugin is installed:
if the new plugin's version is higher
if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade:
download the new archive
do the updateCommand.
disable the old plugin and remove
enable the new plugin
if the new plugin's version is the same or lower:
create the new .settings file from the configuration received
do the enableCommand
if the plugin is not installed:
download/unpack archive and call the installCommand/Enable
if state is disabled:
call disableCommand
if state is uninstall:
call uninstallCommand
remove old plugin directory.
"""
self.reinitialize()
self.Util = Util()
dom = xml.dom.minidom.parseString(xmlText)
LogIfVerbose(xmlText)
self.plugin_log_dir = '/var/log/azure'
if not os.path.exists(self.plugin_log_dir):
os.mkdir(self.plugin_log_dir)
try:
self.Extensions = dom.getElementsByTagName("Extensions")
pg = dom.getElementsByTagName("Plugins")
if len(pg) > 0:
self.Plugins = pg[0].getElementsByTagName("Plugin")
else:
self.Plugins = []
incarnation = self.Extensions[0].getAttribute("goalStateIncarnation")
SetFileContents('ExtensionsConfig.' + incarnation + '.xml', xmlText)
except Exception as e:
Error('ERROR: Error parsing ExtensionsConfig: {0}.'.format(e))
return None
for p in self.Plugins:
if len(p.getAttribute("location")) < 1: # this plugin is inside the PluginSettings
continue
p.setAttribute('restricted', 'false')
previous_version = None
version = p.getAttribute("version")
name = p.getAttribute("name")
plog_dir = self.plugin_log_dir + '/' + name + '/' + version
if not os.path.exists(plog_dir):
os.makedirs(plog_dir)
p.plugin_log = plog_dir + '/CommandExecution.log'
handler = name + '-' + version
if p.getAttribute("isJson") != 'true':
Error("Plugin " + name + " version: " + version + " is not a JSON Extension. Skipping.")
continue
Log("Found Plugin: " + name + ' version: ' + version)
if p.getAttribute("state") == 'disabled' or p.getAttribute("state") == 'uninstall':
# disable
zip_dir = LibDir + "/" + name + '-' + version
mfile = None
for root, dirs, files in os.walk(zip_dir):
for f in files:
if f in ('HandlerManifest.json'):
mfile = os.path.join(root, f)
if mfile != None:
break
if mfile == None:
Error('HandlerManifest.json not found.')
continue
manifest = GetFileContents(mfile)
p.setAttribute('manifestdata', manifest)
if self.launchCommand(p.plugin_log, name, version, 'disableCommand') == None:
self.SetHandlerState(handler, 'Enabled')
Error('Unable to disable ' + name)
SimpleLog(p.plugin_log, 'ERROR: Unable to disable ' + name)
else:
self.SetHandlerState(handler, 'Disabled')
Log(name + ' is disabled')
SimpleLog(p.plugin_log, name + ' is disabled')
# uninstall if needed
if p.getAttribute("state") == 'uninstall':
if self.launchCommand(p.plugin_log, name, version, 'uninstallCommand') == None:
self.SetHandlerState(handler, 'Installed')
Error('Unable to uninstall ' + name)
SimpleLog(p.plugin_log, 'Unable to uninstall ' + name)
else:
self.SetHandlerState(handler, 'NotInstalled')
Log(name + ' uninstallCommand completed .')
# remove the plugin
Run('rm -rf ' + LibDir + '/' + name + '-' + version + '*')
Log(name + '-' + version + ' extension files deleted.')
SimpleLog(p.plugin_log, name + '-' + version + ' extension files deleted.')
continue
# state is enabled
# if the same plugin exists and the version is newer or
# does not exist then download and unzip the new plugin
plg_dir = None
latest_version_installed = LooseVersion("0.0")
for item in os.listdir(LibDir):
itemPath = os.path.join(LibDir, item)
if os.path.isdir(itemPath) and name in item:
try:
# Split plugin dir name with '-' to get intalled plugin name and version
sperator = item.rfind('-')
if sperator < 0:
continue
installed_plg_name = item[0:sperator]
installed_plg_version = LooseVersion(item[sperator + 1:])
# Check installed plugin name and compare installed version to get the latest version installed
if installed_plg_name == name and installed_plg_version > latest_version_installed:
plg_dir = itemPath
previous_version = str(installed_plg_version)
latest_version_installed = installed_plg_version
except Exception as e:
Warn("Invalid plugin dir name: {0} {1}".format(item, e))
continue
if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version):
location = p.getAttribute("location")
Log("Downloading plugin manifest: " + name + " from " + location)
SimpleLog(p.plugin_log, "Downloading plugin manifest: " + name + " from " + location)
self.Util.Endpoint = location.split('/')[2]
Log("Plugin server is: " + self.Util.Endpoint)
SimpleLog(p.plugin_log, "Plugin server is: " + self.Util.Endpoint)
manifest = self.Util.HttpGetWithoutHeaders(location, chkProxy=True)
if manifest == None:
Error(
"Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.")
SimpleLog(p.plugin_log,
"Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.")
failoverlocation = p.getAttribute("failoverlocation")
self.Util.Endpoint = failoverlocation.split('/')[2]
Log("Plugin failover server is: " + self.Util.Endpoint)
SimpleLog(p.plugin_log, "Plugin failover server is: " + self.Util.Endpoint)
manifest = self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True)
# if failoverlocation also fail what to do then?
if manifest == None:
AddExtensionEvent(name, WALAEventOperation.Download, False, 0, version,
"Download mainfest fail " + failoverlocation)
Log("Plugin manifest " + name + " downloading failed from failover location.")
SimpleLog(p.plugin_log, "Plugin manifest " + name + " downloading failed from failover location.")
filepath = LibDir + "/" + name + '.' + incarnation + '.manifest'
if os.path.splitext(location)[-1] == '.xml': # if this is an xml file we may have a BOM
if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128:
manifest = manifest[3:]
SetFileContents(filepath, manifest)
# Get the bundle url from the manifest
p.setAttribute('manifestdata', manifest)
man_dom = xml.dom.minidom.parseString(manifest)
bundle_uri = ""
for mp in man_dom.getElementsByTagName("Plugin"):
if GetNodeTextData(mp.getElementsByTagName("Version")[0]) == version:
bundle_uri = GetNodeTextData(mp.getElementsByTagName("Uri")[0])
break
if len(mp.getElementsByTagName("DisallowMajorVersionUpgrade")):
if GetNodeTextData(mp.getElementsByTagName("DisallowMajorVersionUpgrade")[
0]) == 'true' and previous_version != None and previous_version.split('.')[
0] != version.split('.')[0]:
Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')
SimpleLog(p.plugin_log,
'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')
p.setAttribute('restricted', 'true')
continue
if len(bundle_uri) < 1:
Error("Unable to fetch Bundle URI from manifest for " + name + " v " + version)
SimpleLog(p.plugin_log, "Unable to fetch Bundle URI from manifest for " + name + " v " + version)
continue
Log("Bundle URI = " + bundle_uri)
SimpleLog(p.plugin_log, "Bundle URI = " + bundle_uri)
# Download the zipfile archive and save as '.zip'
bundle = self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True)
if bundle == None:
AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version,
"Download zip fail " + bundle_uri)
Error("Unable to download plugin bundle" + bundle_uri)
SimpleLog(p.plugin_log, "Unable to download plugin bundle" + bundle_uri)
continue
AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version, "Download Success")
b = bytearray(bundle)
filepath = LibDir + "/" + os.path.basename(bundle_uri) + '.zip'
SetFileContents(filepath, b)
Log("Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle)))
SimpleLog(p.plugin_log,
"Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle)))
# unpack the archive
z = zipfile.ZipFile(filepath)
zip_dir = LibDir + "/" + name + '-' + version
z.extractall(zip_dir)
Log('Extracted ' + bundle_uri + ' to ' + zip_dir)
SimpleLog(p.plugin_log, 'Extracted ' + bundle_uri + ' to ' + zip_dir)
# zip no file perms in .zip so set all the scripts to +x
Run("find " + zip_dir + " -type f | xargs chmod u+x ")
# write out the base64 config data so the plugin can process it.
mfile = None
for root, dirs, files in os.walk(zip_dir):
for f in files:
if f in ('HandlerManifest.json'):
mfile = os.path.join(root, f)
if mfile != None:
break
if mfile == None:
Error('HandlerManifest.json not found.')
SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')
continue
manifest = GetFileContents(mfile)
p.setAttribute('manifestdata', manifest)
# create the status and config dirs
Run('mkdir -p ' + root + '/status')
Run('mkdir -p ' + root + '/config')
# write out the configuration data to goalStateIncarnation.settings file in the config path.
config = ''
seqNo = '0'
if len(dom.getElementsByTagName("PluginSettings")) != 0:
pslist = dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin")
for ps in pslist:
if name == ps.getAttribute("name") and version == ps.getAttribute("version"):
Log("Found RuntimeSettings for " + name + " V " + version)
SimpleLog(p.plugin_log, "Found RuntimeSettings for " + name + " V " + version)
config = GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0])
seqNo = ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo")
break
if config == '':
Log("No RuntimeSettings for " + name + " V " + version)
SimpleLog(p.plugin_log, "No RuntimeSettings for " + name + " V " + version)
SetFileContents(root + "/config/" + seqNo + ".settings", config)
# create HandlerEnvironment.json
handler_env = '[{ "name": "' + name + '", "seqNo": "' + seqNo + '", "version": 1.0, "handlerEnvironment": { "logFolder": "' + os.path.dirname(
p.plugin_log) + '", "configFolder": "' + root + '/config", "statusFolder": "' + root + '/status", "heartbeatFile": "' + root + '/heartbeat.log"}}]'
SetFileContents(root + '/HandlerEnvironment.json', handler_env)
self.SetHandlerState(handler, 'NotInstalled')
cmd = ''
getcmd = 'installCommand'
if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(
previous_version):
previous_handler = name + '-' + previous_version
if self.GetHandlerState(previous_handler) != 'NotInstalled':
getcmd = 'updateCommand'
# disable the old plugin if it exists
if self.launchCommand(p.plugin_log, name, previous_version, 'disableCommand') == None:
self.SetHandlerState(previous_handler, 'Enabled')
Error('Unable to disable old plugin ' + name + ' version ' + previous_version)
SimpleLog(p.plugin_log,
'Unable to disable old plugin ' + name + ' version ' + previous_version)
else:
self.SetHandlerState(previous_handler, 'Disabled')
Log(name + ' version ' + previous_version + ' is disabled')
SimpleLog(p.plugin_log, name + ' version ' + previous_version + ' is disabled')
try:
Log("Copy status file from old plugin dir to new")
old_plg_dir = plg_dir
new_plg_dir = os.path.join(LibDir, "{0}-{1}".format(name, version))
old_ext_status_dir = os.path.join(old_plg_dir, "status")
new_ext_status_dir = os.path.join(new_plg_dir, "status")
if os.path.isdir(old_ext_status_dir):
for status_file in os.listdir(old_ext_status_dir):
status_file_path = os.path.join(old_ext_status_dir, status_file)
if os.path.isfile(status_file_path):
shutil.copy2(status_file_path, new_ext_status_dir)
mrseq_file = os.path.join(old_plg_dir, "mrseq")
if os.path.isfile(mrseq_file):
shutil.copy(mrseq_file, new_plg_dir)
except Exception as e:
Error("Failed to copy status file.")
isupgradeSuccess = True
if getcmd == 'updateCommand':
if self.launchCommand(p.plugin_log, name, version, getcmd, previous_version) == None:
Error('Update failed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Update failed for ' + name + '-' + version)
isupgradeSuccess = False
else:
Log('Update complete' + name + '-' + version)
SimpleLog(p.plugin_log, 'Update complete' + name + '-' + version)
# if we updated - call unistall for the old plugin
if self.launchCommand(p.plugin_log, name, previous_version, 'uninstallCommand') == None:
self.SetHandlerState(previous_handler, 'Installed')
Error('Uninstall failed for ' + name + '-' + previous_version)
SimpleLog(p.plugin_log, 'Uninstall failed for ' + name + '-' + previous_version)
isupgradeSuccess = False
else:
self.SetHandlerState(previous_handler, 'NotInstalled')
Log('Uninstall complete' + previous_handler)
SimpleLog(p.plugin_log, 'Uninstall complete' + name + '-' + previous_version)
try:
# rm old plugin dir
if os.path.isdir(plg_dir):
shutil.rmtree(plg_dir)
Log(name + '-' + previous_version + ' extension files deleted.')
SimpleLog(p.plugin_log, name + '-' + previous_version + ' extension files deleted.')
except Exception as e:
Error("Failed to remove old plugin directory")
AddExtensionEvent(name, WALAEventOperation.Upgrade, isupgradeSuccess, 0, previous_version)
else: # run install
if self.launchCommand(p.plugin_log, name, version, getcmd) == None:
self.SetHandlerState(handler, 'NotInstalled')
Error('Installation failed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)
else:
self.SetHandlerState(handler, 'Installed')
Log('Installation completed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)
# end if plg_dir == none or version > = prev
# change incarnation of settings file so it knows how to name status...
zip_dir = LibDir + "/" + name + '-' + version
mfile = None
for root, dirs, files in os.walk(zip_dir):
for f in files:
if f in ('HandlerManifest.json'):
mfile = os.path.join(root, f)
if mfile != None:
break
if mfile == None:
Error('HandlerManifest.json not found.')
SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')
continue
manifest = GetFileContents(mfile)
p.setAttribute('manifestdata', manifest)
config = ''
seqNo = '0'
if len(dom.getElementsByTagName("PluginSettings")) != 0:
try:
pslist = dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin")
except:
Error('Error parsing ExtensionsConfig.')
SimpleLog(p.plugin_log, 'Error parsing ExtensionsConfig.')
continue
for ps in pslist:
if name == ps.getAttribute("name") and version == ps.getAttribute("version"):
Log("Found RuntimeSettings for " + name + " V " + version)
SimpleLog(p.plugin_log, "Found RuntimeSettings for " + name + " V " + version)
config = GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0])
seqNo = ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo")
break
if config == '':
Error("No RuntimeSettings for " + name + " V " + version)
SimpleLog(p.plugin_log, "No RuntimeSettings for " + name + " V " + version)
SetFileContents(root + "/config/" + seqNo + ".settings", config)
# state is still enable
if (self.GetHandlerState(handler) == 'NotInstalled'): # run install first if true
if self.launchCommand(p.plugin_log, name, version, 'installCommand') == None:
self.SetHandlerState(handler, 'NotInstalled')
Error('Installation failed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)
else:
self.SetHandlerState(handler, 'Installed')
Log('Installation completed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)
if (self.GetHandlerState(handler) != 'NotInstalled'):
if self.launchCommand(p.plugin_log, name, version, 'enableCommand') == None:
self.SetHandlerState(handler, 'Installed')
Error('Enable failed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Enable failed for ' + name + '-' + version)
else:
self.SetHandlerState(handler, 'Enabled')
Log('Enable completed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Enable completed for ' + name + '-' + version)
# this plugin processing is complete
Log('Processing completed for ' + name + '-' + version)
SimpleLog(p.plugin_log, 'Processing completed for ' + name + '-' + version)
# end plugin processing loop
Log('Finished processing ExtensionsConfig.xml')
try:
SimpleLog(p.plugin_log, 'Finished processing ExtensionsConfig.xml')
except:
pass
return self
def launchCommand(self, plugin_log, name, version, command, prev_version=None):
commandToEventOperation = {
"installCommand": WALAEventOperation.Install,
"uninstallCommand": WALAEventOperation.UnIsntall,
"updateCommand": WALAEventOperation.Upgrade,
"enableCommand": WALAEventOperation.Enable,
"disableCommand": WALAEventOperation.Disable,
}
isSuccess = True
start = datetime.datetime.now()
r = self.__launchCommandWithoutEventLog(plugin_log, name, version, command, prev_version)
if r == None:
isSuccess = False
Duration = int((datetime.datetime.now() - start).seconds)
if commandToEventOperation.get(command):
AddExtensionEvent(name, commandToEventOperation[command], isSuccess, Duration, version)
return r
def __launchCommandWithoutEventLog(self, plugin_log, name, version, command, prev_version=None):
# get the manifest and read the command
mfile = None
zip_dir = LibDir + "/" + name + '-' + version
for root, dirs, files in os.walk(zip_dir):
for f in files:
if f in ('HandlerManifest.json'):
mfile = os.path.join(root, f)
if mfile != None:
break
if mfile == None:
Error('HandlerManifest.json not found.')
SimpleLog(plugin_log, 'HandlerManifest.json not found.')
return None
manifest = GetFileContents(mfile)
try:
jsn = json.loads(manifest)
except:
Error('Error parsing HandlerManifest.json.')
SimpleLog(plugin_log, 'Error parsing HandlerManifest.json.')
return None
if type(jsn) == list:
jsn = jsn[0]
if jsn.has_key('handlerManifest'):
cmd = jsn['handlerManifest'][command]
else:
Error('Key handlerManifest not found. Handler cannot be installed.')
SimpleLog(plugin_log, 'Key handlerManifest not found. Handler cannot be installed.')
if len(cmd) == 0:
Error('Unable to read ' + command)
SimpleLog(plugin_log, 'Unable to read ' + command)
return None
# for update we send the path of the old installation
arg = ''
if prev_version != None:
arg = ' ' + LibDir + '/' + name + '-' + prev_version
dirpath = os.path.dirname(mfile)
LogIfVerbose('Command is ' + dirpath + '/' + cmd)
# launch
pid = None
try:
child = subprocess.Popen(dirpath + '/' + cmd + arg, shell=True, cwd=dirpath, stdout=subprocess.PIPE)
except Exception as e:
Error('Exception launching ' + cmd + str(e))
SimpleLog(plugin_log, 'Exception launching ' + cmd + str(e))
pid = child.pid
if pid == None or pid < 1:
ExtensionChildren.append((-1, root))
Error('Error launching ' + cmd + '.')
SimpleLog(plugin_log, 'Error launching ' + cmd + '.')
else:
ExtensionChildren.append((pid, root))
Log("Spawned " + cmd + " PID " + str(pid))
SimpleLog(plugin_log, "Spawned " + cmd + " PID " + str(pid))
# wait until install/upgrade is finished
timeout = 300 # 5 minutes
retry = timeout / 5
while retry > 0 and child.poll() == None:
LogIfVerbose(cmd + ' still running with PID ' + str(pid))
time.sleep(5)
retry -= 1
if retry == 0:
Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))
SimpleLog(plugin_log,
'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))
os.kill(pid, 9)
return None
code = child.wait()
if code == None or code != 0:
Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')
SimpleLog(plugin_log, 'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')
return None
Log(command + ' completed.')
SimpleLog(plugin_log, command + ' completed.')
return 0
def ReportHandlerStatus(self):
"""
Collect all status reports.
"""
# { "version": "1.0", "timestampUTC": "2014-03-31T21:28:58Z",
# "aggregateStatus": {
# "guestAgentStatus": { "version": "2.0.4PRE", "status": "Ready", "formattedMessage": { "lang": "en-US", "message": "GuestAgent is running and accepting new configurations." } },
# "handlerAggregateStatus": [{
# "handlerName": "ExampleHandlerLinux", "handlerVersion": "1.0", "status": "Ready", "runtimeSettingsStatus": {
# "sequenceNumber": "2", "settingsStatus": { "timestampUTC": "2014-03-31T23:46:00Z", "status": { "name": "ExampleHandlerLinux", "operation": "Command Execution Finished", "configurationAppliedTime": "2014-03-31T23:46:00Z", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Finished executing command" },
# "substatus": [
# { "name": "StdOut", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Goodbye world!" } },
# { "name": "StdErr", "status": "success", "formattedMessage": { "lang": "en-US", "message": "" } }
# ]
# } } } }
# ]
# }}
try:
incarnation = self.Extensions[0].getAttribute("goalStateIncarnation")
except:
Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports')
return -1
status = ''
statuses = ''
for p in self.Plugins:
if p.getAttribute("state") == 'uninstall' or p.getAttribute("restricted") == 'true':
continue
version = p.getAttribute("version")
name = p.getAttribute("name")
if p.getAttribute("isJson") != 'true':
LogIfVerbose("Plugin " + name + " version: " + version + " is not a JSON Extension. Skipping.")
continue
reportHeartbeat = False
if len(p.getAttribute("manifestdata")) < 1:
Error("Failed to get manifestdata.")
else:
reportHeartbeat = json.loads(p.getAttribute("manifestdata"))[0]['handlerManifest']['reportHeartbeat']
if len(statuses) > 0:
statuses += ','
statuses += self.GenerateAggStatus(name, version, reportHeartbeat)
tstamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
# header
# agent state
if provisioned == False:
if provisionError == None:
agent_state = 'Provisioning'
agent_msg = 'Guest Agent is starting.'
else:
agent_state = 'Provisioning Error.'
agent_msg = provisionError
else:
agent_state = 'Ready'
agent_msg = 'GuestAgent is running and accepting new configurations.'
status = '{"version":"1.0","timestampUTC":"' + tstamp + '","aggregateStatus":{"guestAgentStatus":{"version":"' + GuestAgentVersion + '","status":"' + agent_state + '","formattedMessage":{"lang":"en-US","message":"' + agent_msg + '"}},"handlerAggregateStatus":[' + statuses + ']}}'
try:
uri = GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&', '&')
except:
Error('Error parsing element "StatusUploadBlob". Unable to send status reports')
return -1
LogIfVerbose('Status report ' + status + ' sent to ' + uri)
return UploadStatusBlob(uri, status.encode("utf-8"))
def GetCurrentSequenceNumber(self, plugin_base_dir):
"""
Get the settings file with biggest file number in config folder
"""
config_dir = os.path.join(plugin_base_dir, 'config')
seq_no = 0
for subdir, dirs, files in os.walk(config_dir):
for file in files:
try:
cur_seq_no = int(os.path.basename(file).split('.')[0])
if cur_seq_no > seq_no:
seq_no = cur_seq_no
except ValueError:
continue
return str(seq_no)
def GenerateAggStatus(self, name, version, reportHeartbeat=False):
"""
Generate the status which Azure can understand by the status and heartbeat reported by extension
"""
plugin_base_dir = LibDir + '/' + name + '-' + version + '/'
current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir)
status_file = os.path.join(plugin_base_dir, 'status/', current_seq_no + '.status')
heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log')
handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState')
agg_state = 'NotReady'
handler_state = None
status_obj = None
status_code = None
formatted_message = None
localized_message = None
if os.path.exists(handler_state_file):
handler_state = GetFileContents(handler_state_file).lower()
if HandlerStatusToAggStatus.has_key(handler_state):
agg_state = HandlerStatusToAggStatus[handler_state]
if reportHeartbeat:
if os.path.exists(heartbeat_file):
d = int(time.time() - os.stat(heartbeat_file).st_mtime)
if d > 600: # not updated for more than 10 min
agg_state = 'Unresponsive'
else:
try:
heartbeat = json.loads(GetFileContents(heartbeat_file))[0]["heartbeat"]
agg_state = heartbeat.get("status")
status_code = heartbeat.get("code")
formatted_message = heartbeat.get("formattedMessage")
localized_message = heartbeat.get("message")
except:
Error("Incorrect heartbeat file. Ignore it. ")
else:
agg_state = 'Unresponsive'
# get status file reported by extension
if os.path.exists(status_file):
# raw status generated by extension is an array, get the first item and remove the unnecessary element
try:
status_obj = json.loads(GetFileContents(status_file))[0]
del status_obj["version"]
except:
Error("Incorrect status file. Will NOT settingsStatus in settings. ")
agg_status_obj = {"handlerName": name, "handlerVersion": version, "status": agg_state, "runtimeSettingsStatus":
{"sequenceNumber": current_seq_no}}
if status_obj:
agg_status_obj["runtimeSettingsStatus"]["settingsStatus"] = status_obj
if status_code != None:
agg_status_obj["code"] = status_code
if formatted_message:
agg_status_obj["formattedMessage"] = formatted_message
if localized_message:
agg_status_obj["message"] = localized_message
agg_status_string = json.dumps(agg_status_obj)
LogIfVerbose("Handler Aggregated Status:" + agg_status_string)
return agg_status_string
def SetHandlerState(self, handler, state=''):
zip_dir = LibDir + "/" + handler
mfile = None
for root, dirs, files in os.walk(zip_dir):
for f in files:
if f in ('HandlerManifest.json'):
mfile = os.path.join(root, f)
if mfile != None:
break
if mfile == None:
Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.')
return None
Log("SetHandlerState: " + handler + ", " + state)
return SetFileContents(os.path.dirname(mfile) + '/config/HandlerState', state)
def GetHandlerState(self, handler):
handlerState = GetFileContents(handler + '/config/HandlerState')
if (handlerState):
return handlerState.rstrip('\r\n')
else:
return 'NotInstalled'
class HostingEnvironmentConfig(object):
"""
Parse Hosting enviromnet config and store in
HostingEnvironmentConfig.xml
"""
#
# <HostingEnvironmentConfig version="1.0.0.0" goalStateIncarnation="1">
# <StoredCertificates>
# <StoredCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateId="sha1:C093FA5CD3AAE057CB7C4E04532B2E16E07C26CA" storeName="My" configurationLevel="System" />
# </StoredCertificates>
# <Deployment name="db00a7755a5e4e8a8fe4b19bc3b330c3" guid="{ce5a036f-5c93-40e7-8adf-2613631008ab}" incarnation="2">
# <Service name="MyVMRoleService" guid="{00000000-0000-0000-0000-000000000000}" />
# <ServiceInstance name="db00a7755a5e4e8a8fe4b19bc3b330c3.1" guid="{d113f4d7-9ead-4e73-b715-b724b5b7842c}" />
# </Deployment>
# <Incarnation number="1" instance="MachineRole_IN_0" guid="{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}" />
# <Role guid="{73d95f1c-6472-e58e-7a1a-523554e11d46}" name="MachineRole" hostingEnvironmentVersion="1" software="" softwareType="ApplicationPackage" entryPoint="" parameters="" settleTimeSeconds="10" />
# <HostingEnvironmentSettings name="full" Runtime="rd_fabric_stable.110217-1402.RuntimePackage_1.0.0.8.zip">
# <CAS mode="full" />
# <PrivilegeLevel mode="max" />
# <AdditionalProperties><CgiHandlers></CgiHandlers></AdditionalProperties>
# </HostingEnvironmentSettings>
# <ApplicationSettings>
# <Setting name="__ModelData" value="<m role="MachineRole" xmlns="urn:azure:m:v1"><r name="MachineRole"><e name="a" /><e name="b" /><e name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" /><e name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" /></r></m>" />
# <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="DefaultEndpointsProtocol=http;AccountName=osimages;AccountKey=DNZQ..." />
# <Setting name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" value="true" />
# </ApplicationSettings>
# <ResourceReferences>
# <Resource name="DiagnosticStore" type="directory" request="Microsoft.Cis.Fabric.Controller.Descriptions.ServiceDescription.Data.Policy" sticky="true" size="1" path="db00a7755a5e4e8a8fe4b19bc3b330c3.MachineRole.DiagnosticStore\" disableQuota="false" />
# </ResourceReferences>
# </HostingEnvironmentConfig>
#
def __init__(self):
self.reinitialize()
def reinitialize(self):
"""
Reset Members.
"""
self.StoredCertificates = None
self.Deployment = None
self.Incarnation = None
self.Role = None
self.HostingEnvironmentSettings = None
self.ApplicationSettings = None
self.Certificates = None
self.ResourceReferences = None
def Parse(self, xmlText):
"""
Parse and create HostingEnvironmentConfig.xml.
"""
self.reinitialize()
SetFileContents("HostingEnvironmentConfig.xml", xmlText)
dom = xml.dom.minidom.parseString(xmlText)
for a in ["HostingEnvironmentConfig", "Deployment", "Service",
"ServiceInstance", "Incarnation", "Role", ]:
if not dom.getElementsByTagName(a):
Error("HostingEnvironmentConfig.Parse: Missing " + a)
return None
node = dom.childNodes[0]
if node.localName != "HostingEnvironmentConfig":
Error("HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig")
return None
self.ApplicationSettings = dom.getElementsByTagName("Setting")
self.Certificates = dom.getElementsByTagName("StoredCertificate")
return self
def DecryptPassword(self, e):
"""
Return decrypted password.
"""
SetFileContents("password.p7m",
"MIME-Version: 1.0\n"
+ "Content-Disposition: attachment; filename=\"password.p7m\"\n"
+ "Content-Type: application/x-pkcs7-mime; name=\"password.p7m\"\n"
+ "Content-Transfer-Encoding: base64\n\n"
+ textwrap.fill(e, 64))
return RunGetOutput(Openssl + " cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem")[
1]
def ActivateResourceDisk(self):
return MyDistro.ActivateResourceDisk()
def Process(self):
"""
Execute ActivateResourceDisk in separate thread.
Create the user account.
Launch ConfigurationConsumer if specified in the config.
"""
no_thread = False
if DiskActivated == False:
for m in inspect.getmembers(MyDistro):
if 'ActivateResourceDiskNoThread' in m:
no_thread = True
break
if no_thread == True:
MyDistro.ActivateResourceDiskNoThread()
else:
diskThread = threading.Thread(target=self.ActivateResourceDisk)
diskThread.start()
User = None
Pass = None
Expiration = None
Thumbprint = None
for b in self.ApplicationSettings:
sname = b.getAttribute("name")
svalue = b.getAttribute("value")
if User != None and Pass != None:
if User != "root" and User != "" and Pass != "":
CreateAccount(User, Pass, Expiration, Thumbprint)
else:
Error("Not creating user account: " + User)
for c in self.Certificates:
csha1 = c.getAttribute("certificateId").split(':')[1].upper()
if os.path.isfile(csha1 + ".prv"):
Log("Private key with thumbprint: " + csha1 + " was retrieved.")
if os.path.isfile(csha1 + ".crt"):
Log("Public cert with thumbprint: " + csha1 + " was retrieved.")
program = Config.get("Role.ConfigurationConsumer")
if program != None:
try:
Children.append(subprocess.Popen([program, LibDir + "/HostingEnvironmentConfig.xml"]))
except OSError as e:
ErrorWithPrefix('HostingEnvironmentConfig.Process',
'Exception: ' + str(e) + ' occured launching ' + program)
class WALAEvent(object):
def __init__(self):
self.providerId = ""
self.eventId = 1
self.OpcodeName = ""
self.KeywordName = ""
self.TaskName = ""
self.TenantName = ""
self.RoleName = ""
self.RoleInstanceName = ""
self.ContainerId = ""
self.ExecutionMode = "IAAS"
self.OSVersion = ""
self.GAVersion = ""
self.RAM = 0
self.Processors = 0
def ToXml(self):
strEventid = u'<Event id="{0}"/>'.format(self.eventId)
strProviderid = u'<Provider id="{0}"/>'.format(self.providerId)
strRecordFormat = u'<Param Name="{0}" Value="{1}" T="{2}" />'
strRecordNoQuoteFormat = u'<Param Name="{0}" Value={1} T="{2}" />'
strMtStr = u'mt:wstr'
strMtUInt64 = u'mt:uint64'
strMtBool = u'mt:bool'
strMtFloat = u'mt:float64'
strEventsData = u""
for attName in self.__dict__:
if attName in ["eventId", "filedCount", "providerId"]:
continue
attValue = self.__dict__[attName]
if type(attValue) is int:
strEventsData += strRecordFormat.format(attName, attValue, strMtUInt64)
continue
if type(attValue) is str:
attValue = xml.sax.saxutils.quoteattr(attValue)
strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)
continue
if str(type(attValue)).count("'unicode'") > 0:
attValue = xml.sax.saxutils.quoteattr(attValue)
strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)
continue
if type(attValue) is bool:
strEventsData += strRecordFormat.format(attName, attValue, strMtBool)
continue
if type(attValue) is float:
strEventsData += strRecordFormat.format(attName, attValue, strMtFloat)
continue
Log("Warning: property " + attName + ":" + str(type(attValue)) + ":type" + str(
type(attValue)) + "Can't convert to events data:" + ":type not supported")
return u"<Data>{0}{1}{2}</Data>".format(strProviderid, strEventid, strEventsData)
def Save(self):
eventfolder = LibDir + "/events"
if not os.path.exists(eventfolder):
os.mkdir(eventfolder)
os.chmod(eventfolder, 0o700)
if len(os.listdir(eventfolder)) > 1000:
raise Exception("WriteToFolder:Too many file under " + eventfolder + " exit")
filename = os.path.join(eventfolder, str(int(time.time() * 1000000)))
with open(filename + ".tmp", 'wb+') as hfile:
hfile.write(self.ToXml().encode("utf-8"))
os.rename(filename + ".tmp", filename + ".tld")
class WALAEventOperation:
HeartBeat = "HeartBeat"
Provision = "Provision"
Install = "Install"
UnIsntall = "UnInstall"
Disable = "Disable"
Enable = "Enable"
Download = "Download"
Upgrade = "Upgrade"
Update = "Update"
def AddExtensionEvent(name, op, isSuccess, duration=0, version="1.0", message="", type="", isInternal=False):
event = ExtensionEvent()
event.Name = name
event.Version = version
event.IsInternal = isInternal
event.Operation = op
event.OperationSuccess = isSuccess
event.Message = message
event.Duration = duration
event.ExtensionType = type
try:
event.Save()
except:
Error("Error " + traceback.format_exc())
class ExtensionEvent(WALAEvent):
def __init__(self):
WALAEvent.__init__(self)
self.eventId = 1
self.providerId = "69B669B9-4AF8-4C50-BDC4-6006FA76E975"
self.Name = ""
self.Version = ""
self.IsInternal = False
self.Operation = ""
self.OperationSuccess = True
self.ExtensionType = ""
self.Message = ""
self.Duration = 0
class WALAEventMonitor(WALAEvent):
def __init__(self, postMethod):
WALAEvent.__init__(self)
self.post = postMethod
self.sysInfo = {}
self.eventdir = LibDir + "/events"
self.issysteminfoinitilized = False
def StartEventsLoop(self):
eventThread = threading.Thread(target=self.EventsLoop)
eventThread.setDaemon(True)
eventThread.start()
def EventsLoop(self):
LastReportHeartBeatTime = datetime.datetime.min
try:
while True:
if (datetime.datetime.now() - LastReportHeartBeatTime) > \
datetime.timedelta(minutes=30):
LastReportHeartBeatTime = datetime.datetime.now()
AddExtensionEvent(op=WALAEventOperation.HeartBeat, name="WALA", isSuccess=True)
self.postNumbersInOneLoop = 0
self.CollectAndSendWALAEvents()
time.sleep(60)
except:
Error("Exception in events loop:" + traceback.format_exc())
def SendEvent(self, providerid, events):
dataFormat = u'<?xml version="1.0"?><TelemetryData version="1.0"><Provider id="{0}">{1}' \
'</Provider></TelemetryData>'
data = dataFormat.format(providerid, events)
self.post("/machine/?comp=telemetrydata", data)
def CollectAndSendWALAEvents(self):
if not os.path.exists(self.eventdir):
return
# Throtting, can't send more than 3 events in 15 seconds
eventSendNumber = 0
eventFiles = os.listdir(self.eventdir)
events = {}
for file in eventFiles:
if not file.endswith(".tld"):
continue
with open(os.path.join(self.eventdir, file), "rb") as hfile:
# if fail to open or delete the file, throw exception
xmlStr = hfile.read().decode("utf-8", 'ignore')
os.remove(os.path.join(self.eventdir, file))
params = ""
eventid = ""
providerid = ""
# if exception happen during process an event, catch it and continue
try:
xmlStr = self.AddSystemInfo(xmlStr)
for node in xml.dom.minidom.parseString(xmlStr.encode("utf-8")).childNodes[0].childNodes:
if node.tagName == "Param":
params += node.toxml()
if node.tagName == "Event":
eventid = node.getAttribute("id")
if node.tagName == "Provider":
providerid = node.getAttribute("id")
except:
Error(traceback.format_exc())
continue
if len(params) == 0 or len(eventid) == 0 or len(providerid) == 0:
Error("Empty filed in params:" + params + " event id:" + eventid + " provider id:" + providerid)
continue
eventstr = u'<Event id="{0}"><![CDATA[{1}]]></Event>'.format(eventid, params)
if not events.get(providerid):
events[providerid] = ""
if len(events[providerid]) > 0 and len(events.get(providerid) + eventstr) >= 63 * 1024:
eventSendNumber += 1
self.SendEvent(providerid, events.get(providerid))
if eventSendNumber % 3 == 0:
time.sleep(15)
events[providerid] = ""
if len(eventstr) >= 63 * 1024:
Error("Signle event too large abort " + eventstr[:300])
continue
events[providerid] = events.get(providerid) + eventstr
for key in events.keys():
if len(events[key]) > 0:
eventSendNumber += 1
self.SendEvent(key, events[key])
if eventSendNumber % 3 == 0:
time.sleep(15)
def AddSystemInfo(self, eventData):
if not self.issysteminfoinitilized:
self.issysteminfoinitilized = True
try:
self.sysInfo["OSVersion"] = platform.system() + ":" + "-".join(DistInfo(1)) + ":" + platform.release()
self.sysInfo["GAVersion"] = GuestAgentVersion
self.sysInfo["RAM"] = MyDistro.getTotalMemory()
self.sysInfo["Processors"] = MyDistro.getProcessorCores()
sharedConfig = xml.dom.minidom.parse("/var/lib/waagent/SharedConfig.xml").childNodes[0]
hostEnvConfig = xml.dom.minidom.parse("/var/lib/waagent/HostingEnvironmentConfig.xml").childNodes[0]
gfiles = RunGetOutput("ls -t /var/lib/waagent/GoalState.*.xml")[1]
goalStateConfi = xml.dom.minidom.parse(gfiles.split("\n")[0]).childNodes[0]
self.sysInfo["TenantName"] = hostEnvConfig.getElementsByTagName("Deployment")[0].getAttribute("name")
self.sysInfo["RoleName"] = hostEnvConfig.getElementsByTagName("Role")[0].getAttribute("name")
self.sysInfo["RoleInstanceName"] = sharedConfig.getElementsByTagName("Instance")[0].getAttribute("id")
self.sysInfo["ContainerId"] = goalStateConfi.getElementsByTagName("ContainerId")[0].childNodes[
0].nodeValue
except:
Error(traceback.format_exc())
eventObject = xml.dom.minidom.parseString(eventData.encode("utf-8")).childNodes[0]
for node in eventObject.childNodes:
if node.tagName == "Param":
name = node.getAttribute("Name")
if self.sysInfo.get(name):
node.setAttribute("Value", xml.sax.saxutils.escape(str(self.sysInfo[name])))
return eventObject.toxml()
WaagentLogrotate = """\
/var/log/waagent.log {
monthly
rotate 6
notifempty
missingok
}
"""
def GetMountPoint(mountlist, device):
"""
Example of mountlist:
/dev/sda1 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0")
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sdb1 on /mnt/resource type ext4 (rw)
"""
if (mountlist and device):
for entry in mountlist.split('\n'):
if (re.search(device, entry)):
tokens = entry.split()
# Return the 3rd column of this line
return tokens[2] if len(tokens) > 2 else None
return None
def FindInLinuxKernelCmdline(option):
"""
Return match object if 'option' is present in the kernel boot options
of the grub configuration.
"""
m = None
matchs = r'^.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?' + option + r'.*$'
try:
m = FindStringInFile(MyDistro.grubKernelBootOptionsFile, matchs)
except IOError as e:
Error(
'FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))
return m
def AppendToLinuxKernelCmdline(option):
"""
Add 'option' to the kernel boot options of the grub configuration.
"""
if not FindInLinuxKernelCmdline(option):
src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r')(.*?)("?)$'
rep = r'\1\2 ' + option + r'\3'
try:
ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)
except IOError as e:
Error(
'AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(
e))
return 1
Run("update-grub", chk_err=False)
return 0
def RemoveFromLinuxKernelCmdline(option):
"""
Remove 'option' to the kernel boot options of the grub configuration.
"""
if FindInLinuxKernelCmdline(option):
src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?)(' + option + r')(.*?)("?)$'
rep = r'\1\3\4'
try:
ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)
except IOError as e:
Error(
'RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(
e))
return 1
Run("update-grub", chk_err=False)
return 0
def FindStringInFile(fname, matchs):
"""
Return match object if found in file.
"""
try:
ms = re.compile(matchs)
for l in (open(fname, 'r')).readlines():
m = re.search(ms, l)
if m:
return m
except:
raise
return None
def ReplaceStringInFile(fname, src, repl):
"""
Replace 'src' with 'repl' in file.
"""
try:
sr = re.compile(src)
if FindStringInFile(fname, src):
updated = ''
for l in (open(fname, 'r')).readlines():
n = re.sub(sr, repl, l)
updated += n
ReplaceFileContentsAtomic(fname, updated)
except:
raise
return
def ApplyVNUMAWorkaround():
"""
If kernel version has NUMA bug, add 'numa=off' to
kernel boot options.
"""
VersionParts = platform.release().replace('-', '.').split('.')
if int(VersionParts[0]) > 2:
return
if int(VersionParts[1]) > 6:
return
if int(VersionParts[2]) > 37:
return
if AppendToLinuxKernelCmdline("numa=off") == 0:
Log("Your kernel version " + platform.release() + " has a NUMA-related bug: NUMA has been disabled.")
else:
"Error adding 'numa=off'. NUMA has not been disabled."
def RevertVNUMAWorkaround():
"""
Remove 'numa=off' from kernel boot options.
"""
if RemoveFromLinuxKernelCmdline("numa=off") == 0:
Log('NUMA has been re-enabled')
else:
Log('NUMA has not been re-enabled')
def Install():
"""
Install the agent service.
Check dependencies.
Create /etc/waagent.conf and move old version to
/etc/waagent.conf.old
Copy RulesFiles to /var/lib/waagent
Create /etc/logrotate.d/waagent
Set /etc/ssh/sshd_config ClientAliveInterval to 180
Call ApplyVNUMAWorkaround()
"""
if MyDistro.checkDependencies():
return 1
os.chmod(sys.argv[0], 0o755)
SwitchCwd()
for a in RulesFiles:
if os.path.isfile(a):
if os.path.isfile(GetLastPathElement(a)):
os.remove(GetLastPathElement(a))
shutil.move(a, ".")
Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a))
MyDistro.registerAgentService()
if os.path.isfile("/etc/waagent.conf"):
try:
os.remove("/etc/waagent.conf.old")
except:
pass
try:
os.rename("/etc/waagent.conf", "/etc/waagent.conf.old")
Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old")
except:
pass
SetFileContents("/etc/waagent.conf", MyDistro.waagent_conf_file)
SetFileContents("/etc/logrotate.d/waagent", WaagentLogrotate)
filepath = "/etc/ssh/sshd_config"
ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not
a.startswith("ClientAliveInterval"),
GetFileContents(filepath).split(
'\n'))) + "\nClientAliveInterval 180\n")
Log("Configured SSH client probing to keep connections alive.")
ApplyVNUMAWorkaround()
return 0
def GetMyDistro(dist_class_name=''):
"""
Return MyDistro object.
NOTE: Logging is not initialized at this point.
"""
if dist_class_name == '':
if 'Linux' in platform.system():
Distro = DistInfo()[0]
else: # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro = platform.system()
Distro = Distro.strip('"')
Distro = Distro.strip(' ')
dist_class_name = Distro + 'Distro'
else:
Distro = dist_class_name
if dist_class_name not in globals():
##print Distro + ' is not a supported distribution.'
return None
return globals()[dist_class_name]() # the distro class inside this module.
def DistInfo(fullname=0):
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
distinfo = ['FreeBSD', release]
return distinfo
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=fullname))
distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name
if os.path.exists("/etc/euleros-release"):
distinfo[0] = "euleros"
return distinfo
else:
return platform.dist()
def PackagedInstall(buildroot):
"""
Called from setup.py for use by RPM.
Generic implementation Creates directories and
files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent,
/etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot.
Copies generated files waagent.conf, into place and exits.
"""
MyDistro = GetMyDistro()
if MyDistro == None:
sys.exit(1)
MyDistro.packagedInstall(buildroot)
def LibraryInstall(buildroot):
pass
def Uninstall():
"""
Uninstall the agent service.
Copy RulesFiles back to original locations.
Delete agent-related files.
Call RevertVNUMAWorkaround().
"""
SwitchCwd()
for a in RulesFiles:
if os.path.isfile(GetLastPathElement(a)):
try:
shutil.move(GetLastPathElement(a), a)
Warn("Moved " + LibDir + "/" + GetLastPathElement(a) + " -> " + a)
except:
pass
MyDistro.unregisterAgentService()
MyDistro.uninstallDeleteFiles()
RevertVNUMAWorkaround()
return 0
def Deprovision(force, deluser):
"""
Remove user accounts created by provisioning.
Disables root password if Provisioning.DeleteRootPassword = 'y'
Stop agent service.
Remove SSH host keys if they were generated by the provision.
Set hostname to 'localhost.localdomain'.
Delete cached system configuration files in /var/lib and /var/lib/waagent.
"""
# Append blank line at the end of file, so the ctime of this file is changed every time
Run("echo ''>>" + MyDistro.getConfigurationPath())
SwitchCwd()
print("WARNING! The waagent service will be stopped.")
print("WARNING! All SSH host key pairs will be deleted.")
print("WARNING! Cached DHCP leases will be deleted.")
MyDistro.deprovisionWarnUser()
delRootPass = Config.get("Provisioning.DeleteRootPassword")
if delRootPass != None and delRootPass.lower().startswith("y"):
print("WARNING! root password will be disabled. You will not be able to login as root.")
try:
input = raw_input
except NameError:
pass
if force == False and not input('Do you want to proceed (y/n)? ').startswith('y'):
return 1
MyDistro.stopAgentService()
# Remove SSH host keys
regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair")
if regenerateKeys == None or regenerateKeys.lower().startswith("y"):
Run("rm -f /etc/ssh/ssh_host_*key*")
# Remove root password
if delRootPass != None and delRootPass.lower().startswith("y"):
MyDistro.deleteRootPassword()
# Remove distribution specific networking configuration
MyDistro.publishHostname('localhost.localdomain')
MyDistro.deprovisionDeleteFiles()
return 0
def SwitchCwd():
"""
Switch to cwd to /var/lib/waagent.
Create if not present.
"""
CreateDir(LibDir, "root", 0o700)
os.chdir(LibDir)
def Usage():
"""
Print the arguments to waagent.
"""
print("usage: " + sys.argv[
0] + " [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]")
return 0
def main():
"""
Instantiate MyDistro, exit if distro class is not defined.
Parse command-line arguments, exit with usage() on error.
Instantiate ConfigurationProvider.
Call appropriate non-daemon methods and exit.
If daemon mode, enter Agent.Run() loop.
"""
if GuestAgentVersion == "":
print("WARNING! This is a non-standard agent that does not include a valid version string.")
if len(sys.argv) == 1:
sys.exit(Usage())
LoggerInit('/var/log/waagent.log', '/dev/console')
global LinuxDistro
LinuxDistro = DistInfo()[0]
global MyDistro
MyDistro = GetMyDistro()
if MyDistro == None:
sys.exit(1)
args = []
conf_file = None
global force
force = False
for a in sys.argv[1:]:
if re.match("^([-/]*)(help|usage|\?)", a):
sys.exit(Usage())
elif re.match("^([-/]*)version", a):
print(GuestAgentVersion + " running on " + LinuxDistro)
sys.exit(0)
elif re.match("^([-/]*)verbose", a):
myLogger.verbose = True
elif re.match("^([-/]*)force", a):
force = True
elif re.match("^(?:[-/]*)conf=.+", a):
conf_file = re.match("^(?:[-/]*)conf=(.+)", a).groups()[0]
elif re.match("^([-/]*)(setup|install)", a):
sys.exit(MyDistro.Install())
elif re.match("^([-/]*)(uninstall)", a):
sys.exit(Uninstall())
else:
args.append(a)
global Config
Config = ConfigurationProvider(conf_file)
logfile = Config.get("Logs.File")
if logfile is not None:
myLogger.file_path = logfile
logconsole = Config.get("Logs.Console")
if logconsole is not None and logconsole.lower().startswith("n"):
myLogger.con_path = None
verbose = Config.get("Logs.Verbose")
if verbose != None and verbose.lower().startswith("y"):
myLogger.verbose = True
global daemon
daemon = False
for a in args:
if re.match("^([-/]*)deprovision\+user", a):
sys.exit(Deprovision(force, True))
elif re.match("^([-/]*)deprovision", a):
sys.exit(Deprovision(force, False))
elif re.match("^([-/]*)daemon", a):
daemon = True
elif re.match("^([-/]*)serialconsole", a):
AppendToLinuxKernelCmdline("console=ttyS0 earlyprintk=ttyS0")
Log("Configured kernel to use ttyS0 as the boot console.")
sys.exit(0)
else:
print("Invalid command line parameter:" + a)
sys.exit(1)
if daemon == False:
sys.exit(Usage())
global modloaded
modloaded = False
while True:
try:
SwitchCwd()
Log(GuestAgentLongName + " Version: " + GuestAgentVersion)
if IsLinux():
Log("Linux Distribution Detected : " + LinuxDistro)
except Exception as e:
Error(traceback.format_exc())
Error("Exception: " + str(e))
Log("Restart agent in 15 seconds")
time.sleep(15)
if __name__ == '__main__':
main()
|
sensor.py | import datetime
import json
import logging
import queue
import random
import threading
import time
from abc import ABC, abstractmethod
from lib.logger import logger
from lib.sds011 import SDS011
class Measurement:
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def __init__(self, timestamp:int, pm25:float, pm10:float):
self.timestamp = timestamp
self.pm25 = pm25
self.pm10 = pm10
def __str__(self):
return f"t:{self.timestamp} pm25:{self.pm25} pm10: {self.pm10}"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if self.timestamp == other.timestamp and self.pm25 == other.pm25 and self.pm10 == other.pm10:
return True
return False
class AbstractSensor(ABC):
@abstractmethod
def getAllAvailableData(self) -> [Measurement]:
pass
@abstractmethod
def startGatheringDataInBackground(self):
pass
@abstractmethod
def setNewQueueSize(self, size: int):
pass
@abstractmethod
def setDelayBetweenMeasurements(self, delay: int):
pass
class MeasurementConfiguration:
def __init__(self, sdsConnection: SDS011, queue:queue.Queue, minutesToWaitBetweenMeasurements:int, breakLoopLock:threading.Lock, tests:bool):
self.sdsConnection = sdsConnection
self.queue = queue
self.minutesToWaitBetweenMeasurements = minutesToWaitBetweenMeasurements
self.breakLoopLock = breakLoopLock
self.tests = tests
class Sensor(AbstractSensor):
def __init__(self, sdsConnection: SDS011, queueSize:int, minutesToWaitBetweenMeasurements:int, tests=False):
self.measurementConfiguration = MeasurementConfiguration(sdsConnection, queue.Queue(maxsize=queueSize), minutesToWaitBetweenMeasurements, threading.Lock(), tests)
def stop(self):
logger.info("Stopping loop")
self.measurementConfiguration.breakLoopLock.acquire()
logger.info("Stopped loop")
def getAllAvailableData(self) -> [Measurement]:
return list(self.measurementConfiguration.queue.queue)
def startGatheringDataInBackground(self):
logger.info("starting gathering data")
self.thread = threading.Thread(target=self.__start, args=(self.measurementConfiguration,))
self.thread.start()
def __start(self, measurementConfiguration:MeasurementConfiguration):
while not measurementConfiguration.breakLoopLock.locked():
measurementConfiguration.sdsConnection.sleep(sleep=False)
if not measurementConfiguration.tests:
time.sleep(30)
meas = measurementConfiguration.sdsConnection.query()
measurementConfiguration.sdsConnection.sleep()
if meas is None:
logger.info("Empty response from sensor, skipping")
else:
if measurementConfiguration.queue.full():
measurementConfiguration.queue.get()
measurementConfiguration.queue.put(Measurement(int(datetime.datetime.now().timestamp()), meas[0], meas[1]))
if not measurementConfiguration.tests:
time.sleep((60*measurementConfiguration.minutesToWaitBetweenMeasurements)-30)
def setDelayBetweenMeasurements(self, minutesToWaitBetweenMeasurements):
self.measurementConfiguration.minutesToWaitBetweenMeasurements = minutesToWaitBetweenMeasurements
def setNewQueueSize(self, size: int):
newQueue = queue.Queue(size)
logger.info("copying queue")
for el in list(self.measurementConfiguration.queue.queue):
if newQueue.full():
newQueue.get()
newQueue.put(el)
self.measurementConfiguration.queue = newQueue
class MockedDynamicSensor(AbstractSensor):
def __init__(self, queueSize:int, sleepTime:int, randomUpperRange:float, randomLowerRange:float):
self.measurementsQueue = queue.Queue(maxsize=queueSize)
self.sleepTime = sleepTime
self.randomUpperRange = randomUpperRange
self.randomLowerRange = randomLowerRange
def getAllAvailableData(self) -> [Measurement]:
return list(self.measurementsQueue.queue)
def startGatheringDataInBackground(self):
thread = threading.Thread(target=self.__start, args=(self.measurementsQueue, self.sleepTime, self.randomUpperRange, self.randomLowerRange))
thread.start()
def __start(self, queue:queue.Queue, sleepTime:int, randomUpperRange:float, randomLowerRange:float):
while True:
if queue.full():
queue.get()
self.measurementsQueue.put(Measurement(int(datetime.datetime.now().timestamp()), random.uniform(randomLowerRange, randomUpperRange), random.uniform(randomLowerRange, randomUpperRange)))
time.sleep(sleepTime)
def setNewQueueSize(self, size: int):
pass
def setDelayBetweenMeasurements(self, delay: int):
pass
|
GUI.py | import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from tkinter import *
from bot import *
import tkinter as tk
from threading import Thread
class BotGUI():
def __init__(self):
self.setup_backend()
self.setup_frontend()
self._updater_thread = Thread(target=self.automatic_update)
self._updater_thread.start()
self._root.mainloop()
###################################################################################################
# function: setup_backend
# purpose: initialize the bot architecture.
#
# description: This method should only be called in the constructor for this class unless
# The backend is purposefully destroyed. It will completely re-create the backend
###################################################################################################
def setup_backend(self):
socket = BotSocket(product=["BTC-USD", "LTC-USD", "ETH-USD", "BCH-USD"], channels=["matches"])
self._bot = Bot("Betty", "LTC-USD", socket)
###################################################################################################
# function: setup_frontend
# purpose: Creates the GUI for the user.
#
# description: This method should only be called in the constructor for this class with no
# exceptions. The GUI consists of:
# start/stop buttons
# portfolio pie chart
# price line chart + checkboxes and radio buttons to show the moving averages
# refresh button for pie chart and line chart
# radio buttons to choose which currency to trade.
###################################################################################################
def setup_frontend(self):
####################
# MAIN-WINDOW SETUP
####################
self._root = Tk()
self._root.title("Betty the trade bot")
#create a top and bottom frame to divide the window into 2 parts. You won't see this division in
#the window, but it helps us lay things out properly.
self._topframe = Frame(self._root)
self._bottomframe = Frame(self._root)
self._topframe.pack(side=TOP)
self._bottomframe.pack(side=BOTTOM)
self._pie_chart_frame = Frame(self._topframe)
self._line_chart_frame = Frame(self._bottomframe)
self._upper_dash_board = Frame(self._topframe)
self._lower_dash_board = Frame(self._bottomframe)
self._pie_chart_frame.pack(side=RIGHT)
self._line_chart_frame.pack(side=RIGHT)
self._upper_dash_board.pack(side=LEFT)
self._lower_dash_board.pack(side=LEFT)
#######################
# WIDGET SETUP
#######################
#create start/stop buttons
self._startButton = Button(self._upper_dash_board, text="Start Bot", bg="green", fg="black", command=self._bot.start)
self._stopButton = Button(self._upper_dash_board, text="Stop Bot" , bg="red" , fg="white", command=self._bot.stop )
self._startButton.grid(row=0, column=0)
self._stopButton.grid( row=0, column=1)
##########################################
# Choose currency to trade (radio buttons)
##########################################
v = tk.StringVar()
v.set("LTC-USD")
myList = [("BTC-USD"), ("BCH-USD"), ("LTC-USD"), ("ETH-USD")]
tk.Radiobutton(self._upper_dash_board, text=myList[0], padx=20, variable=v, value=myList[0], command=lambda: self._bot.set_currency(myList[0])).grid(row=1, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[1], padx=20, variable=v, value=myList[1], command=lambda: self._bot.set_currency(myList[1])).grid(row=2, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[2], padx=20, variable=v, value=myList[2], command=lambda: self._bot.set_currency(myList[2])).grid(row=3, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[3], padx=20, variable=v, value=myList[3], command=lambda: self._bot.set_currency(myList[3])).grid(row=4, column=0)
###############################################################################################################
# Allows user to decide the duration of their investments. This is done by comparing different moving averages.
###############################################################################################################
duration = tk.StringVar()
duration.set("long")
tk.Label(self._upper_dash_board, text="Trade Duration").grid(row=1, column=2)
tk.Radiobutton(self._upper_dash_board, text="Short", variable=duration, value="short", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=2, column=2)
tk.Radiobutton(self._upper_dash_board, text="Medium",variable=duration, value="medium",command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=3, column=2)
tk.Radiobutton(self._upper_dash_board, text="Long", variable=duration, value="long", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=4, column=2)
################################################################
# Allows the user to decide how sensitive they want sells to be.
################################################################
self._sell_cushion_slider = Scale(self._upper_dash_board, from_=0, to=1, length=300, tickinterval=0.5, resolution=0.01, orient=HORIZONTAL, command=self._bot._trade_hands.set_sell_cushion)
self._sell_cushion_slider.grid(row=5, column=0, columnspan=3)
self._sell_cushion_slider.set(.3)
#####################################
# show position history in a list box
#####################################
scrollbar = Scrollbar(self._upper_dash_board, orient=VERTICAL)
scrollbar.grid(row=0, column=6, rowspan=5)
self._position_history_box = tk.Listbox(self._upper_dash_board, yscrollcommand=scrollbar.set)
self._position_history_box.grid(row=0, column=3, columnspan=3, rowspan=5)
######################################################
# Choose which averages to show on graph (check boxes)
######################################################
self._average_type = StringVar()
self._average_type.set("simple")
#This should be handled more gracefully eventually.
self._CheckVars = [IntVar(), IntVar(), IntVar(), IntVar()]
self._averages = [(" SMA 30", 30), (" SMA 10", 10), (" SMA 5", 5), (" SMA 1", 1)]
i=0;
#these widgets are check boxes for showing the individual average sizes.
for string, size in self._averages:
x = tk.Checkbutton(self._lower_dash_board, text = string, variable = self._CheckVars[i], onvalue = 1, offvalue = 0, height=1, width = 6, command= lambda:self.update_line_charts(self._CheckVars, self._averages, self._average_type))
x.pack(side=BOTTOM)
i+=1
########################################################
# Set up the price chart and portfolio/trading chart
########################################################
crypto_history = self._bot._data_center._crypto_history
self._line_chart_figure = Figure(figsize=(20, 3))
self._price_plot = self._line_chart_figure.add_subplot(111)
self._price_plot.set_xlabel("Time")
self._price_plot.set_ylabel("Dollars")
self._price_plot.set_title("Price vs. Time")
self._portfolio_chart_figure = Figure(figsize=(20,3))
self._portfolio_plot = self._portfolio_chart_figure.add_subplot(111)
self._portfolio_plot.set_xlabel("Time")
self._portfolio_plot.set_ylabel("Dollars")
self._portfolio_plot.set_title("Portfolio Value vs. Time")
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas3 = FigureCanvasTkAgg(self._portfolio_chart_figure, master=self._line_chart_frame)
canvas3.show()
canvas3.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar3 = NavigationToolbar2TkAgg(canvas3, self._line_chart_frame)
toolbar3.update()
canvas3._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas = FigureCanvasTkAgg(self._line_chart_figure, master=self._line_chart_frame)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, self._line_chart_frame)
toolbar.update()
canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
########################################################
# Set up the pie chart
########################################################
portfolio = self._bot._data_center.get_portfolio()
portfolio_keys = portfolio.keys()
labels = [key for key in portfolio_keys if "USD" in key]
amounts = [portfolio[key]["value"] for key in portfolio_keys if "USD" in key]
colors = ["gold", "green", "blue", "red", "purple"]
explode = [0,0,0,0,0]
self._pie_chart_figure = Figure(figsize=(5, 3.5), dpi=100) #we keep the pie chart figure
self._pie_plot = self._pie_chart_figure.add_subplot(111) #we also keep the sub plot
self._pie_plot.pie(amounts, explode=explode, labels=labels, colors=colors, autopct='%5.2f%%', shadow=True, startangle=140)[0] #plot the pie chart
self._pie_chart_figure.gca().add_artist(matplotlib.patches.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25)) #plot a circle over it to make a donut
self._pie_plot.axis('equal')
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas2 = FigureCanvasTkAgg(self._pie_chart_figure, master=self._pie_chart_frame)
canvas2.show()
canvas2.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar2 = NavigationToolbar2TkAgg(canvas2, self._pie_chart_frame)
toolbar2.update()
canvas2._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
#This is the refresh button. pressing this will reset the graph and pie chart, but you still have to click the chart for it to update.
self._refresh_button = Button(self._upper_dash_board, text="refresh graphics", bg="blue", fg="white", command= lambda: self.refresh_graphics(self._CheckVars, self._averages, self._average_type))
self._refresh_button.grid(row=0, column=2)
###################################################################################################
# function: automatic_update
# purpose: refresh graphics automatically
#
# description: This method will constantly call the refresh_graphics method while the bot is
# running. It will update the graphs a coule of times each second.
###################################################################################################
def automatic_update(self):
while True:
if self._bot._running:
time.sleep(5)
self.refresh_graphics(self._CheckVars, self._averages, self._average_type)
###################################################################################################
# function: refresh_graphics
# purpose: refresh both the line graph and the pie chart
#
# description: This method is called when the refresh button is clicked, and also should be
# called automatically by another thread causing the plots to update periodically
###################################################################################################
def refresh_graphics(self, CheckVars, Average_list, average_type):
self.update_line_charts(CheckVars, Average_list, average_type)
self.update_pie_chart()
self.update_positions_history()
###################################################################################################
# function: update_positions_history
# purpose: show all past and current holdings
#
# description: This method will check for any trades that have been posted in the trade
# history, but not posted in the listbox
###################################################################################################
def update_positions_history(self):
trade_history = self._bot._data_center._trade_history
current_position = self._bot._trade_hands._long_position
self._position_history_box.delete(0, END)
for past_position in trade_history:
entry = past_position["entry_price"]
exit = past_position["exit_price"]
gain = ((exit-entry)/entry) * 100
msg = "{} {} {}%".format(str(entry), str(exit), str(gain))
self._position_history_box.insert(END, msg)
if current_position != None:
msg = str(current_position["entry_price"])
self._position_history_box.insert(END, msg)
###################################################################################################
# function: update_line_chart
# purpose: shows new data that was not shown the last time the chart was updated, and
# reacts to the average checkboxes being selected/deselected.
#
# description: This will replot the entire graph, taking into account user preferences of
# averages they wish to see.
###################################################################################################
def update_line_charts(self, CheckVars, Average_list, average_type):
try:
###stuff dealing with the price plot
self._price_plot.clear()
self._portfolio_plot.clear()
ma_collection = self._bot._data_center._ma_collection
crypto_history = self._bot._data_center._crypto_history
portfolio_history = self._bot._data_center._portfolio_history
trade_history = self._bot._data_center._trade_history
for i in range(len(CheckVars)):
if CheckVars[i].get() == 1:
times = [j["time"] for j in ma_collection[Average_list[i][1]]]
#times = matplotlib.dates.date2num(times)
values = [j[average_type.get()] for j in ma_collection[Average_list[i][1]]]
if len(times) != len(values):
print("Could not update graph because x and y dimensions were not the same for the ", Average_list[i][0], ".")
return
self._price_plot.plot_date(times, values)[0]
else:
self._price_plot.plot_date([],[])
times = [i["time"] for i in crypto_history[self._bot.currency()]]
prices = [i["price"] for i in crypto_history[self._bot.currency()]]
if len(times) != len(prices):
print("Could not update graph because x and y dimensions were not the same for the price line")
return
self._prices_line = self._price_plot.plot_date(times, prices)[0]
#plot horizontal sell line
current_position = self._bot._trade_hands._long_position
if current_position != None:
self._price_plot.axhline(y=current_position["high_price"] * (1-self._bot._trade_hands._sell_cushion/100))
self._line_chart_figure.autofmt_xdate()
###stuff dealing with the portfolio plot
portfolio_history = self._bot._data_center._portfolio_history
portfolio_values = [element["total"] for element in portfolio_history if element["total"]!=0]
times = [element["time" ] for element in portfolio_history if element["total"]!=0]
if len(portfolio_values) != len(times):
return
self._portfolio_plot.clear()
self._portfolio_line = self._portfolio_plot.plot_date(times, portfolio_values)
self._portfolio_chart_figure.autofmt_xdate()
trade_history = self._bot._data_center._trade_history
for trade in trade_history:
self._portfolio_plot.axvline(x=trade["entry_time"], color="g")
self._portfolio_plot.axvline(x=trade["exit_time"], color="r")
if current_position != None:
self._portfolio_plot.axvline(x=current_position["entry_time"], color="g")
except:
x_max = crypto_history[self._bot.currency()][-1]
x_min = crypto_history[self._bot.currency()][0]
self._portfolio_plot.set_xlim([x_min, x_max])
self._price_plot.set_xlim([x_min, x_max])
return
###################################################################################################
# function: update_pie_chart
# purpose: re-plots the portfolio pie-chart
#
# description: re-plots the pie-chart by first clearing all data and then plotting again.
###################################################################################################
def update_pie_chart(self):
#----------------------------Setup up pie chart ----------------------------
try:
portfolio = self._bot._data_center._portfolio_history[-1]
except:
return
portfolio_keys = portfolio.keys()
labels = [key for key in portfolio_keys if "USD" in key]
amounts = [portfolio[key]["value"] for key in portfolio_keys if "USD" in key]
colors = ["gold", "green", "blue", "red", "purple"]
explode = [0,0,0,0,0]
self._pie_plot.clear()
self._pie_plot.pie(amounts, explode=explode, labels=labels, colors=colors, autopct='%5.2f%%', shadow=True, startangle=140)[0]
self._pie_chart_figure.gca().add_artist(matplotlib.patches.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25))
self._pie_plot.axis('equal')
def main():
GUI = BotGUI()
main()
|
utils.py | try:
from Crypto import Random
from Crypto.Cipher import AES
except:
from Cryptodome import Random
from Cryptodome.Cipher import AES
from colorama import init, Fore, Back, Style
from datetime import datetime
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from webhook import DiscordWebhook, DiscordEmbed
from chromedriver_py import binary_path as driver_path
import json, platform, darkdetect, random, settings, threading, hashlib, base64
normal_color = Fore.CYAN
e_key = "YnJ1aG1vbWVudA==".encode()
BLOCK_SIZE=16
if platform.system() == "Windows":
init(convert=True)
else:
init()
print(normal_color + "Welcome To Bird Bot")
class BirdLogger:
def ts(self):
return str(datetime.now())[:-7]
def normal(self,task_id,msg):
print(normal_color + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def alt(self,task_id,msg):
print(Fore.MAGENTA + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def error(self,task_id,msg):
print(Fore.RED + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def success(self,task_id,msg):
print(Fore.GREEN + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
class Encryption:
def encrypt(self,msg):
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(self.trans(e_key), AES.MODE_CFB, IV)
return base64.b64encode(IV + aes.encrypt(msg.encode("utf-8")))
def decrypt(self,msg):
msg = base64.b64decode(msg)
IV = msg[:BLOCK_SIZE]
aes = AES.new(self.trans(e_key), AES.MODE_CFB, IV)
return aes.decrypt(msg[BLOCK_SIZE:])
def trans(self,key):
return hashlib.md5(key).digest()
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
def write_data(path,data):
with open(path, "w") as file:
json.dump(data, file)
file.close()
def get_profile(profile_name):
profiles = return_data("./data/profiles.json")
for p in profiles:
if p["profile_name"] == profile_name:
try:
p["card_number"] = (Encryption().decrypt(p["card_number"].encode("utf-8"))).decode("utf-8")
except ValueError:
pass
return p
return None
def get_proxy(list_name):
if list_name == "Proxy List" or list_name == "None":
return False
proxies = return_data("./data/proxies.json")
for proxy_list in proxies:
if proxy_list["list_name"] == list_name:
return format_proxy(random.choice(proxy_list["proxies"].splitlines()))
return None
def format_proxy(proxy):
try:
proxy_parts = proxy.split(":")
ip, port, user, passw = proxy_parts[0], proxy_parts[1], proxy_parts[2], proxy_parts[3]
return {
"http": "http://{}:{}@{}:{}".format(user, passw, ip, port),
"https": "https://{}:{}@{}:{}".format(user, passw, ip, port)
}
except IndexError:
return {"http": "http://" + proxy, "https": "https://" + proxy}
def send_webhook(webhook_type,site,profile,task_id,image_url):
if settings.webhook !="":
webhook = DiscordWebhook(url=settings.webhook, username="Bird Bot", avatar_url="https://i.imgur.com/fy26LbM.png")
if webhook_type == "OP":
if not settings.webhook_on_order:
return
embed = DiscordEmbed(title="Order Placed",color=0x34c693)
elif webhook_type == "B":
if not settings.webhook_on_browser:
return
embed = DiscordEmbed(title="Complete Order in Browser",color=0xf2a689)
elif webhook_type == "PF":
if not settings.webhook_on_failed:
return
embed = DiscordEmbed(title="Payment Failed",color=0xfc5151)
embed.set_footer(text="Via Bird Bot",icon_url="https://i.imgur.com/fy26LbM.png")
embed.add_embed_field(name="Site", value=site,inline=True)
embed.add_embed_field(name="Profile", value=profile,inline=True)
embed.add_embed_field(name="Task ID", value=task_id,inline=True)
embed.set_thumbnail(url=image_url)
webhook.add_embed(embed)
try:
webhook.execute()
except:
pass
def open_browser(link,cookies):
threading.Thread(target = start_browser, args=(link,cookies)).start()
def start_browser(link,cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value" : cookie["value"],
"domain" : cookie["domain"]
})
driver.get(link)
|
test_wrapper.py | import os
import sys
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from threading import Thread
from time import sleep
from finorch.wrapper.wrapper import run
from tests.unit.local.test_local_client import SCRIPT
from finorch.utils.cd import cd
def test_run():
exc, stdout, stderr, orig_stdout, orig_stderr = None, None, None, None, None
def run_thread(argv, wd):
nonlocal exc, stdout, stderr, orig_stdout, orig_stderr
exc = None
# Save argv and output fds
orig_args = sys.argv
orig_stdout = sys.stdout
orig_stderr = sys.stderr
with NamedTemporaryFile(delete=False) as out, NamedTemporaryFile(delete=False) as err:
stdout = out.name
stderr = err.name
sys.stdout = open(out.name, 'w')
sys.stderr = open(err.name, 'w')
try:
sys.argv = argv
with cd(wd):
run()
except Exception as e:
exc = e
finally:
# Make sure output is flushed
sys.stdout.flush()
sys.stderr.flush()
# Restore argv and output fds
sys.argv = orig_args
sys.stdout = orig_stdout
sys.stderr = orig_stderr
for argv in [[None], [None, 'notreal', 'notreal']]:
with TemporaryDirectory() as tmpdir:
t = Thread(target=run_thread, args=(argv, tmpdir,))
t.start()
t.join()
# Should be no exception as it's caught internally
assert not exc
with open(str(Path(tmpdir) / 'wrapper.log'), 'r') as f:
lines = f.readlines()
assert lines[0].split('-')[-1].strip() == "Error starting wrapper"
assert lines[-2].split('-')[-1].strip() == "!! Exception: Incorrect number of parameters"
# Make sure output is flushed
sys.stdout.flush()
sys.stderr.flush()
# Read the stdout and stderr files
out = open(stdout, 'r').read()
# Clean up stdout/stderr
os.unlink(stdout)
os.unlink(stderr)
# Check the stdout/stderr outputs
out = out.splitlines()
assert out[0] == 'error'
assert out[-1] == '=EOF='
with TemporaryDirectory() as tmpdir:
t = Thread(target=run_thread, args=([None, 'notreal'], tmpdir,))
t.start()
t.join()
# Should be no exception as it's caught internally
assert not exc
with open(str(Path(tmpdir) / 'wrapper.log'), 'r') as f:
lines = f.readlines()
assert lines[0].split('-')[-1].strip() == "Error starting wrapper"
assert lines[-2].split('-')[-1].strip() == "!! Exception: Session type notreal does not exist."
# Read the stdout and stderr files
out = open(stdout, 'r').read()
# Clean up stdout/stderr
os.unlink(stdout)
os.unlink(stderr)
# Check the stdout/stderr outputs
out = out.splitlines()
assert out[0] == 'error'
assert out[-1] == '=EOF='
with TemporaryDirectory() as tmpdir:
t = Thread(target=run_thread, args=([None, 'local'], tmpdir,))
with open(str(Path(tmpdir) / 'script.k'), 'w') as f:
f.write(SCRIPT)
t.start()
t.join()
# Wait for the session to complete
sleep(0.5)
# Read the stdout and stderr files
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
# Clean up stdout/stderr
os.unlink(stdout)
os.unlink(stderr)
# Stderr should not be empty (no errors)
assert err
assert not out
|
test_server.py | # -*- coding: utf-8 -*-
"""Tests for pyss3.server."""
import pyss3.server as s
import threading
import argparse
import socket
import pytest
import pyss3
import json
import sys
from os import path
from pyss3 import SS3
from pyss3.util import Dataset, Print
HTTP_REQUEST = "%s %s HTTP/1.1\r\nContent-Length: %d\r\n\r\n%s"
RECV_BUFFER = 1024 * 1024 # 1MB
PYTHON3 = sys.version_info[0] >= 3
DATASET_FOLDER = "dataset"
DATASET_FOLDER_MR = "dataset_mr"
DATASET_MULTILABEL_FOLDER = "dataset_ml"
ADDRESS, PORT = "localhost", None
LT = s.Live_Test
dataset_path = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER)
dataset_path_mr = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER_MR)
dataset_path_multilabel = path.join(path.abspath(path.dirname(__file__)), DATASET_MULTILABEL_FOLDER)
x_train, y_train = None, None
clf = None
pyss3.set_verbosity(0)
x_train, y_train = Dataset.load_from_files(dataset_path_mr)
x_train, y_train = Dataset.load_from_files(dataset_path, folder_label=False)
clf = SS3()
clf.fit(x_train, y_train)
LT.serve() # no model error
LT.set_model(clf)
LT.get_port()
class MockCmdLineArgs:
"""Mocked command-line arguments."""
quiet = True
MODEL = "name"
path = dataset_path
path_labels = None
label = 'folder'
port = 0
@pytest.fixture()
def mockers(mocker):
"""Set mockers up."""
mocker.patch("webbrowser.open")
mocker.patch.object(LT, "serve")
mocker.patch.object(SS3, "load_model")
mocker.patch.object(argparse.ArgumentParser, "add_argument")
mocker.patch.object(argparse.ArgumentParser,
"parse_args").return_value = MockCmdLineArgs
@pytest.fixture(params=[0, 1, 2, 3, 4, 5, 6, 7, 8])
def test_case(request, mocker):
"""Argument values generator for test_live_test(test_case)."""
mocker.patch("webbrowser.open")
if request.param == 0:
LT.set_testset_from_files(dataset_path, folder_label=False)
elif request.param == 1:
LT.set_testset_from_files(dataset_path_mr, folder_label=True)
elif request.param == 2:
LT.set_testset(x_train, y_train)
elif request.param == 8:
LT.set_testset_from_files_multilabel(dataset_path_multilabel + "/train_files",
dataset_path_multilabel + "/file_labels.tsv")
else:
LT.__server_socket__ = None
yield request.param
def http_request(path, body='', get=False, as_bytes=False):
"""Create a basic HTTP request message."""
request = HTTP_REQUEST % ("GET" if get else "POST", path, len(body), body)
return request.encode() if as_bytes else request
def http_response_body(sock):
"""Return all HTTP message body."""
data = sock.recv(RECV_BUFFER).decode()
length = s.get_http_contlength(data)
body = s.get_http_body(data)
while len(body) < length and data:
data = sock.recv(RECV_BUFFER).decode()
body += data
return body # url_decode(body)
def send_http_request(path, body='', get=False, json_rsp=True):
"""Send an HTTP request to the Live Test Server."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(http_request(path, body, get, as_bytes=True))
r = http_response_body(sock)
sock.close()
return json.loads(r) if json_rsp and r else r
def test_http_helper_functions():
"""Test for pyss3.server HTTP helper function."""
assert s.content_type("js") == "application/javascript"
assert s.content_type("non-existing") == "application/octet-stream"
request_path = "/the/path"
request_body = "the body"
assert s.parse_and_sanitize("../../a/path/../../")[0][-17:] == "a/path/index.html"
assert s.parse_and_sanitize("/")[0][-10:] == "index.html"
assert s.get_http_path(http_request(request_path)) == request_path
assert s.get_http_body(http_request("", request_body)) == request_body
assert s.get_http_contlength(http_request("", request_body)) == len(request_body)
def test_live_test(test_case):
"""Test the HTTP Live Test Server."""
global PORT
if test_case < 3:
PORT = LT.start_listening()
else:
Print.error = lambda _: None # do nothing
serve_args = {
"x_test": x_train if test_case >= 2 else None,
"y_test": y_train if test_case == 2 else None,
"quiet": test_case != 0,
"browser": test_case == 0
}
if test_case == 4:
serve_args["y_test"] = [["labelA", "labelB"]] * (len(x_train) // 2)
serve_args["y_test"] += [["labelC"]] * (len(x_train) // 2)
elif test_case == 5:
serve_args["y_test"] = ["label"]
elif test_case == 6:
serve_args["y_test"] = y_train
serve_args["def_cat"] = 'most-probable'
elif test_case == 7:
serve_args["y_test"] = y_train
# serve_args["def_cat"] = 'xxxxx' # raise ValueError
elif test_case == 8:
serve_args["x_test"] = None
if PYTHON3:
threading.Thread(target=LT.serve, kwargs=serve_args, daemon=True).start()
else:
return
# threading.Thread(target=LT.serve, kwargs=serve_args).start()
if test_case >= 3:
return
# empty message
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(b'')
sock.close()
# decode error
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(b'\x01\x0E\xFF\xF0\x02\x0F\xE1')
sock.close()
# 404 error
send_http_request("/404")
# ack
send_http_request("/ack")
# get_info
r = send_http_request("/get_info")
assert r["model_name"] == clf.get_name()
cats = r["categories"]
docs = r["docs"]
assert len(cats) == 8 + 1
# assert len(docs) == len(cats) - 1
# assert len(docs[cats[0]]["path"]) == 100
# classify
r = send_http_request(
"/classify",
"this is an android mobile " * (1024 * 4 if test_case == 0 else 1)
)
assert r["ci"][r["cvns"][0][0]] == "science&technology"
# get_doc
for c in docs:
r = send_http_request("/get_doc", docs[c]["path"][1])
assert len(r["content"][:2]) == 2
# GET 404
send_http_request("/404", get=True, json_rsp=False)
# GET index.html
r = send_http_request("/", get=True, json_rsp=False)
assert "<html>" in r
def test_main(mockers, mocker):
"""Test the main() function."""
if not PYTHON3:
return
# s.main() # <- causing the test to randomly fail
|
notify.py | import os
import threading
import time
class Notify():
def __init__(self, context, for_test=False):
if for_test:
context.notify = self.fake_notify
self.messages = []
else:
context.notify = self.notify
self.last_notify = time.time()
threading.Thread(target=self.guard_timeout).start()
#TODO, bug where notify is called twice in loop and it overwrites
#concat messages as easiest solution, can use the afterware function, gives all mw chance to call notify
#also probably a good idea to use python package for dbus instead of this because of os.system (security or even accidental transcript of "&& rm file")
def notify(self, msg, method="setText"):
self.last_notify = time.time()
dbusCommand = "gdbus call --session --dest org.gnome.Shell --object-path /com/gweisman/TextInTaskBar --method com.gweisman.TextInTaskBar.%s '%s'" % (method, msg,)
os.system(dbusCommand)
def fake_notify(self, msg, method="setText"):
dbusCommand = "gdbus call --session --dest org.gnome.Shell --object-path /com/gweisman/TextInTaskBar --method com.gweisman.TextInTaskBar.%s '%s'" % (method, msg,)
self.messages.append(dbusCommand)
def guard_timeout(self):
while True:
#clear messages after 5 seconds
if time.time() - self.last_notify > 5:
self.notify('', method='setText')
time.sleep(1)
def middleware(self, nxt):
def handle(context):
msg = context.msg
if 'clear_notify' in msg:
self.notify('', method='setText')
self.notify('', method='setSecondary')
self.notify('', method='setParser')
if 'notify' in msg:
self.notify(msg['notify'], method='setText')
return nxt(context)
return handle
|
dupe_image_lib.py | import collections
import copy
import datetime
import imagehash
import json
import traceback
import multiprocessing as mp
from pathlib import Path
from PIL import Image
class ImageStruct:
def __init__(self, directory: Path, allowed_cpu_cores: int, pyqt_signals: dict):
"""
An ImageStruct object, manages all image data and metadata.
:param directory: pathlib Path object.
:param pyqt_signals: Dictionary that contains PyQt pyqtSignal signallers.
"""
self.directory = directory
self.metadata, self.image_data = None, None
self.allowed_cpu_cores = allowed_cpu_cores
self.pyqt_signal_dict = pyqt_signals
def load_data(self):
"""
Loads a json file from disk and stores the data in a ImageStruct object.
:return: No return value
"""
if Path(self.directory, "hash_data").is_dir() and \
Path(self.directory, "hash_data", "fp_hash_data.json").is_file():
try:
with open(Path(self.directory, "hash_data", "fp_hash_data.json"), "r") as json_file:
json_data = json.load(json_file)
json_file.close()
for image_data_dict in json_data["image_data"]:
json_data["image_data"][image_data_dict]["hash_list"] = \
list(map(lambda x: imagehash.hex_to_hash(x),
json_data["image_data"][image_data_dict]["hash_list"]))
# using imagehash.hex_to_hash to reverse the encoding.
self.metadata = json_data["metadata"]
self.image_data = copy.deepcopy(json_data["image_data"])
if Path(self.metadata["directory"]) != self.directory:
self.pyqt_signal_dict["text_log"].emit(
"Warning: the given directory and the loaded directory are not the same!")
except WindowsError as e:
print(f"Error loading hashes: {e}")
traceback.print_exc()
def generate_data(self, file_list: list, grid_density=10):
"""
Generates a dict that contains a list of hashes, and creates new metadata in the ImageStruct object.
Multithreading processing of images!
:param file_list: takes a list of files.
:param grid_density: takes an integer value as the density of grid squares of hashes generated.
:return: No return value
"""
self.pyqt_signal_dict["text_log"].emit("Loading image hashes...")
if mp.cpu_count() > 2:
proc_manager = ProcessManager(self.pyqt_signal_dict)
num_proc = self.allowed_cpu_cores
workload = split_list(file_list, num_proc)
self.pyqt_signal_dict["text_log"].emit(f"""{"-" * 30}
CPU COUNT: {mp.cpu_count()}, PROCESS COUNT: {num_proc}""")
for pid, work_list in enumerate(workload):
self.pyqt_signal_dict["text_log"].emit(f"Starting process {pid}...")
proc_manager.run(pid, self.generate_data_func, self.directory, work_list, grid_density)
self.pyqt_signal_dict["text_log"].emit(f"""Returning from processes...
{"-"*30}""")
return_dicts = proc_manager.wait()
img_data = merge_dicts(return_dicts)
else:
img_data = self.generate_data_func(self.directory, file_list,
grid_density, pyqt_signals=self.pyqt_signal_dict)
self.image_data = img_data
self.metadata = {"directory": str(self.directory),
"time_of_creation": f"{datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')}",
# dd/mm/yyyy hh:mm:ss
"last_time_modified": f"{datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')}",
"grid_density": grid_density}
@staticmethod
def generate_data_func(directory: Path, file_list: list, grid_density: int, queue=None, pid=None, pyqt_signals=None):
output_hashes = dict()
for num, image in enumerate(file_list, start=1):
_ = Image.open(Path(directory, image))
x, y = _.size
hash_list = list()
for x_grid in range(grid_density): # -1 since we're going by intersections of the grid, not the grid itself
for y_grid in range(grid_density):
hash_list.append(imagehash.average_hash(_.crop(
((x_grid * (x / grid_density)), (y_grid * (y / grid_density)),
((x_grid + 1) * (x / grid_density)), ((y_grid + 1) * (y / grid_density)))
)))
# [left, upper, right, lower] for the PIL Image.crop() method. OLD
# Now it generates a grid, where each part of the grid is averaged. NEW
# looking at small sections of the image vs. the whole image
# since the bg shouldn't change that much
output_hashes[image] = {"filename": image, "size": _.size, "average_hash": str(imagehash.average_hash(_)),
"hash_list": hash_list}
_.close()
if queue is not None:
queue.put(Msg("pyqt_signal", ["progress_bar", round(100 * num / len(file_list))]))
else:
pyqt_signals["progress_bar"].emit(round(100 * num / len(file_list)))
if queue is not None:
queue.put(Msg("return_data", output_hashes))
queue.put(Msg("proc_terminate", pid))
return output_hashes
def save_data(self):
"""
Saves the ImageStruct's member data into a json file, and saves it onto disk.
:return: No return value
"""
self.metadata["last_time_modified"] = f"{datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')}"
output_dict = {"metadata": self.metadata,
"image_data": self.image_data}
temp_dict = copy.deepcopy(self.image_data)
for image_data_dict in self.image_data:
temp_dict[image_data_dict]["hash_list"] = list(map(lambda x: str(x),
self.image_data[image_data_dict]["hash_list"]))
# converting binary arrays to hex for better readability in .json (with str())
output_dict["image_data"] = copy.deepcopy(temp_dict)
if not Path(self.directory, "hash_data").is_dir():
try:
Path(self.directory, "hash_data").mkdir(exist_ok=True)
except OSError:
print(f"Creation of the directory 'hash_data' failed.")
try:
self.pyqt_signal_dict["text_log"].emit("Saving json file...")
with open(Path(self.directory, "hash_data", "fp_hash_data.json"), "w") as json_file:
json.dump(output_dict, json_file, indent=4)
json_file.close()
self.pyqt_signal_dict["text_log"].emit("Saved!")
except:
self.pyqt_signal_dict["text_log"].emit("Could not save json file.")
traceback.print_exc()
Msg = collections.namedtuple('Msg', ['event', 'data'])
class ProcessManager:
def __init__(self, pyqt_signal: dict):
self.processes = {}
self.queue = mp.Queue()
self.pyqt_signal = pyqt_signal
@staticmethod
def _wrapper(func, pid, queue, args, kwargs):
func(*args, pid=pid, queue=queue, **kwargs) # function execution
def run(self, pid, func, *args, **kwargs):
args2 = (func, pid, self.queue, args, kwargs)
proc = mp.Process(target=self._wrapper, args=args2)
self.processes[pid] = {"pid": pid, "process": proc, "terminated": False} # saving processes in a dict
self.processes[pid]["process"].start()
def wait(self): # waiting for processes to finish work.
return_list = []
terminated = False
while not terminated:
for _ in self.processes:
event, data = self.queue.get()
if event == "return_data": # event conditionals
return_list.append(data)
elif event == "pyqt_signal":
self.pyqt_signal[data[0]].emit(data[1]) # can emit whatever PyQt signal depending on a list.
elif event == "proc_terminate":
self.processes[data]["process"].join() # process is terminated
self.processes[data]["terminated"] = True
if all([self.processes[pid]["terminated"] for pid in self.processes]):
terminated = True
break
return return_list
def split_list(item_list: list, divisor: int):
"""
Splits a list into equal chunks, that works with remainders.
:param item_list: List of items to be divided.
:param divisor: Number of chunks the list is to be divided into.
:return: Returns a list of divided chunks.
"""
divisor = min(divisor, len(item_list))
k, m = divmod(len(item_list), divisor)
return list((item_list[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(divisor)))
def merge_dicts(dict_args):
"""
Given any number of dictionaries, shallow copy and merge into a new dict,
precedence goes to key-value pairs in latter dictionaries.
"""
result = {}
for dictionary in dict_args:
result = result | dictionary
return result
def f_num0(value: int,
n: int): # formats the int value to have n zeros before it. (method name = fNum zero, but integer)
try:
return str(int(n - len(str(value))) * str(0) + str(value))
except TypeError as e:
print(f"Error in f_num0: {e}")
def display_folders():
"""
**Depreciated. PyQt5 allows the ability to use QComboBoxes which replace the functionality of this method.**
Prints all the folders in the current working directory and the number of files in each folder.
Returns a list of all the folders.
:return: List of folders in the current working directory.
"""
folders = [x.name for x in Path.cwd().glob("*") if x.is_dir()]
try:
for num, folder in enumerate(folders):
if Path(Path.cwd(), folder).is_dir():
file_num = len([_ for _ in Path(Path.cwd(), folder).glob("*") if _.is_file()])
num_formatted = f_num0(num, 3)
folder_formatted = folder + " " * (30 - len(str(folder)))
print(f"{num_formatted} | {folder_formatted} | Number of files: {file_num}")
except:
traceback.print_exc()
print("")
return folders
def choose_int(text: str, **kwargs):
max_value = None
input_list = None
if "max_value" in kwargs:
max_value = kwargs["max_value"]
if "input_list" in kwargs:
input_list = kwargs["input_list"]
while True:
try:
user_input = int(input(f"\n{text}"))
if user_input < 0: # if not negative, pass.
print("Invalid input (value is less than 0!)")
else:
if "max_value" not in kwargs: # if no limit to value
if "input_list" not in kwargs: # if no input_list, any positive int value.
output_choice = user_input
print("> " + str(output_choice))
return output_choice
else:
output_choice = input_list[user_input]
print("> " + str(output_choice))
return output_choice
elif user_input >= max_value:
print("Invalid input (value exceeds the max value)!")
else:
if "input_list" not in kwargs:
output_choice = user_input
print("> " + str(output_choice))
return output_choice
else:
output_choice = input_list[user_input]
print("> " + str(output_choice))
return output_choice
except Exception as e:
print(f"Invalid input (error encountered)! \nError: {e}")
def confirm_choice(text: str):
while True:
try:
_ = input(text + " (Y/N)?:").lower()
if _ in ["y", "ye", "yes"]:
return True
elif _ in ["n", "no"]:
return False
else:
print("Invalid input!")
except Exception as e:
print(e)
def f_type_return(file, file_type_list: list): # takes string and file type list as input.
for f_type in file_type_list:
if str(file).endswith(f_type):
return str(f_type)
def rename_to_num(directory: Path, file_list: list, format_string: str, file_type_list: list):
for num, file in enumerate(file_list):
try:
Path(directory, file).rename(
Path(directory, format_string + str(num) + f_type_return(file, file_type_list)))
except WindowsError:
try:
Path(directory, file).rename(Path(directory, "_temp_" + str(num) + f_type_return(file, file_type_list)))
except Exception as e:
print(f"An exception has occured on iteration {num}: {e}")
except Exception as e:
print(f"An exception has occured on iteration {num}: {e}")
def strfex(expression: str, **kwargs) -> str: # string format expression
# using % as a special character marker,
# %grp% marks the group series
# %grp_num% marks the iteration of a group series
# %num% marks the iteration of all the images. (not implemented yet, looking into making a metadata dict for this)
# only limit with this system is that it doesn't account for open-ended tags (invalid),
# or %tag%text_that_is_a_tag%tag%
"""
**A string formatting function that can take a user input expression and format an input string.**
%grp% - marks the group series
%grp_num% - marks the iteration of a group series
%num% - marks the iteration out of all the images
:param expression: A strfex expression.
:param kwargs: The data to be formatted into a string.
:return: The formatted string
"""
valid_kwargs = ["grp", "grp_num", "num"]
output_str = str()
expression_list = expression.split("%")
for str_chunk in expression_list:
if str_chunk in valid_kwargs:
if str_chunk in ["grp", "grp_num", "num"]: # this is to format special data, other tags like date
# wouldn't need to be formatted like this. (else)
output_str += f_num0(kwargs[str_chunk], 3)
else:
output_str += kwargs[str_chunk]
else:
output_str += str_chunk
return output_str
def check_json_exists(directory: Path):
return Path(directory, "hash_data").is_dir() and Path(directory, "hash_data", "fp_hash_data.json").is_file()
def compare_hashes(hash_input_1: dict, hash_input_2: dict, kwargs):
"""
Compares two dicts of hashes, and returns a true value if the number of successful matches exceed the success ratio.
The threshold for matching depends on the cutoff.
There is two modes: similar and identical.
:param hash_input_1: Hash dict 1
:param hash_input_2: Hash dict 2
:param kwargs: success ratio, cutoff, mode
:return: True or False
"""
score = 0
if "success_ratio" in kwargs:
success_ratio = kwargs["success_ratio"]
else:
success_ratio = 0.3
if "cutoff" in kwargs:
cutoff = kwargs["cutoff"]
else:
cutoff = 0
if "mode" in kwargs:
mode = kwargs["mode"]
else:
mode = "similar"
if mode == "identical":
return hash_input_1["average_hash"] == hash_input_2["average_hash"]
# return hash_input_1["hash_list"] == hash_input_2["hash_list"] <- original, if identical checking fails just
# reinstate this.
elif mode == "similar":
for index in range(len(hash_input_1["hash_list"])):
if abs(hash_input_1["hash_list"][index] - hash_input_2["hash_list"][index]) < cutoff:
score += 1
return score >= round(len(hash_input_1["hash_list"]) * success_ratio)
def cross_compare_list(image_struct: ImageStruct, comparison_function, **kwargs) -> list:
# cutoff 0, a 12 to 10 density is ideal, crosschecking a nested list of hashes.
# Potential improvements could be to rework this so it can take a function as the comparing function so it can
# compare with different data sets. Input the file list.
"""
Compares two lists for matches using a comparison function.
:param image_struct: Takes an ImageStruct object in
:param comparison_function: Takes a comparison function that compares the inputs
:param kwargs: Arguments that pass off into the comparison function.
:return: Returns a 2 element list of [duplicate items, grouped duplicate items]
"""
dupe_items = []
g_dupe_items = []
item_list = image_struct.image_data
image_struct.pyqt_signal_dict["text_log"].emit(f"Cross checking for {kwargs['mode']} duplicates...")
for num, item_1 in enumerate(item_list, start=1):
group = []
if item_1 not in dupe_items:
for item_2 in item_list:
if item_1 != item_2 and item_2 not in dupe_items:
if (comparison_function(item_list[item_1],
item_list[item_2], kwargs)):
if item_1 not in dupe_items:
dupe_items.append(item_1)
group.append(item_1)
dupe_items.append(item_2)
group.append(item_2)
if len(group) != 0:
g_dupe_items.append(group)
image_struct.pyqt_signal_dict["progress_bar"].emit(round(100 * num / len(item_list)))
image_struct.pyqt_signal_dict["text_log"].emit("Done!")
return [dupe_items, g_dupe_items]
def regroup_files(file_list: list, image_struct: ImageStruct,
type_list: list, expression="%grp%-%grp_num%"):
"""
:param file_list: Takes a list of files.
:param image_struct: Takes an ImageStruct object.
:param type_list: Takes a list of file types.
:param expression: A string formatted expression, or strfex. Check the strfex function for more info.
:return: No return value
"""
for i, file_list_2 in enumerate(file_list):
for i2, filename in enumerate(file_list_2):
try:
if f_type_return(filename, type_list) in type_list:
Path(image_struct.directory, filename).rename(Path(image_struct.directory,
strfex(expression, grp=i,
grp_num=i2) + f_type_return(filename,
type_list)))
image_struct.image_data[
strfex(expression, grp=i, grp_num=i2) + f_type_return(filename, type_list)] = \
image_struct.image_data[filename] # renaming the key associated with the filename.
del image_struct.image_data[filename] # deleting old reference
except Exception as e:
image_struct.pyqt_signal_dict["text_log"].emit("regroup_files error: " + str(e))
image_struct.pyqt_signal_dict["progress_bar"].emit(round(100 * (i + 1) / len(file_list)))
image_struct.save_data()
def product(x): return x[0] * x[1] # can be tuple/list
def move_files(new_folder: str, file_list: list, image_struct: ImageStruct):
"""
Moves a list of files to a subdirectory in the current working directory, then returns the altered ImageStruct
object, without the moved files in the concerned image_data dict.
:param new_folder: Name of the to-be-created subdirectory in the current working directory.
:param file_list: Takes a list of files to be moved.
:param image_struct: An ImageStruct object.
:return: Returns an ImageStruct object.
"""
try:
Path(image_struct.directory, new_folder).mkdir(exist_ok=True)
except OSError:
image_struct.pyqt_signal_dict["text_log"].emit(f"Creation of the directory '{new_folder}' failed.")
else:
image_struct.pyqt_signal_dict["text_log"].emit("Successfully created the directory.")
image_struct.pyqt_signal_dict["text_log"].emit("Moving duplicates...")
try:
if Path(image_struct.directory, new_folder).is_dir():
for num, grouped_files in enumerate(file_list, start=1):
HQ_image = grouped_files[0] # setting HQ image as first
for file in grouped_files:
if product(image_struct.image_data[file]["size"]) > product(
image_struct.image_data[HQ_image]["size"]):
HQ_image = file # higher quality, set new HQ file
for file in grouped_files:
if file != HQ_image: # If the file isn't the HQ one, it moves all exact dupes to another folder.
Path(image_struct.directory, file).rename(Path(image_struct.directory, new_folder, file))
for filename in image_struct.image_data: # deleting moved files' hashes
if filename == file:
del image_struct.image_data[filename]
break
image_struct.pyqt_signal_dict["progress_bar"].emit(round(100 * num / len(file_list)))
image_struct.pyqt_signal_dict["text_log"].emit("Done!")
except Exception as e:
print(e)
return image_struct
if __name__ == "__main__":
print("""dil: This is a module, made to be used to check for duplicate images, and identify and package images.
""")
_ = input()
|
multiprocessing_xrh.py |
from multiprocessing import *
import os, time, random
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
def long_time_task(name):
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 1. multiprocessing
# print('Parent process %s.' % os.getpid())
# p = Process(target=run_proc, args=('test',))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
# 2.进程池 Pool
# print('Parent process %s.' % os.getpid())
# p = Pool() # 默认为 CPU 的核心数
# # p = Pool(4)
#
# for i in range(10):
# p.apply_async(long_time_task, args=(i,))
# print('Waiting for all subprocesses done...')
# p.close()
# p.join()
# print('All subprocesses done.')
#3.进程通信 Queue
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
test_wikification.py | # # %%
# import sys
# print(sys.path)
# # %%
# import os
# import datetime
# import numpy as np
# import multiprocessing
# from wikification import _sum_page_rank, _recompute_on_anchor_text, _extraxt_concepts_from_wikires, _call_wikifier, _sum_classic_page_rank
# # %%
# import os
# import datetime
# corpuspath = "../../all_text_transcriptions/"
# directory = os.scandir(corpuspath)
# directory = list(directory)
#
# def __get_text(alltexts, file):
# with open(file.path, "r") as f:
# text = "".join(f.readlines())
# alltexts.append(text)
#
# manager = multiprocessing.Manager()
# alltexts = manager.list()
# jobs = [multiprocessing.Process(target=__get_text, args=(alltexts, file)) for file in directory[:100]]
# _ = [p.start() for p in jobs]
# _ = [p.join() for p in jobs]
#
# alltexts = list(alltexts)
# # %%
# texts = filter(lambda text: len(text) > 10000, alltexts)
# text = next(texts)
# # print(text)
# print("sum_page_rank:")
# print([(c["title"], c["pageRank"]) for c in _sum_page_rank(text)][:10])
# print("recompute_on_anchor_text:")
# print([(c["title"], c["pageRank"]) for c in _recompute_on_anchor_text(text)][:10])
# # %%
# texts = list(filter(lambda text: len(text) < 10000 and len(text) > 200, alltexts))
# res = {"missed": {"f_sum_page_rank": [], "f_recompute_on_anchor_text": []},
# "new": {"f_sum_page_rank": [], "f_recompute_on_anchor_text": []}}
#
# def n_diff_concepts(gt, xp):
# gt, xp = set(c["url"] for c in gt), set(c["url"] for c in xp)
# missed = len(gt - xp)
# new = len(xp - gt)
# return dict(missed=missed, new=new)
#
# for i, text in enumerate(texts):
# print(i + 1, "/", len(texts))
# groundtruth = _extraxt_concepts_from_wikires(_call_wikifier(text))
# for method in [_recompute_on_anchor_text, _sum_page_rank]:
# xp = method(text, bunchs_settings=dict(chunk_size=100, window_seek=10))
# compare = n_diff_concepts(groundtruth, xp)
# for feature in ["missed", "new"]:
# res[feature][f"f_{method.__name__}"].append(compare[feature])
#
# print(f"On {len(texts)} texts")
# for method in [_recompute_on_anchor_text, _sum_page_rank]:
# print(f"{method.__name__} result")
# for feature in ["missed", "new"]:
# vals = res[feature][f"f_{method.__name__}"]
# print(f"avg_{feature}:{np.mean(vals)}")
# print(f"std_{feature}:{np.std(vals)}")
# print(f"sum_{feature}:{sum(vals)}")
# %%
import pprint
import time
import os
import datetime
import numpy as np
import multiprocessing
from wikification import _sum_page_rank, _recompute_on_anchor_text, _extraxt_concepts_from_wikires, _call_wikifier, _sum_classic_page_rank, _get_chunks
# path = "../../../predict-series/res/YaleSeriesCorpus/20-The_Early_Middle_Ages,_284–1000-Paul_Freedman/578-Transformation_of_the_Roman_Empire"
path = "../../../predict-series/res/YaleSeriesCorpus/32-Capitalism:_Success,_Crisis,_and_Reform-Douglas_W._Rae/882-Policy_Targets_for_Capitalist_Development"
print(multiprocessing.cpu_count())
with open(os.path.join(path, "rawtext"), "r") as f:
text = " ".join(f.readlines())
# print("*" * 10)
# print("test __call_wikifier")
# start_time = time.time()
# res1 = _call_wikifier(text[:10000])
# elapsed_time = time.time() - start_time
# print(f"elapsed time: {elapsed_time:.2f} seconds")
# print("*" * 10)
# print("_sum_classic_page_rank")
# start_time = time.time()
# res2 = _sum_classic_page_rank(text)
# elapsed_time = time.time() - start_time
# print(f"elapsed time: {elapsed_time:.2f} seconds")
# print("*" * 10)
# print("_recompute_on_anchor_text")
# start_time = time.time()
# res3 = _recompute_on_anchor_text(text)
# elapsed_time = time.time() - start_time
# print(f"elapsed time: {elapsed_time:.2f} seconds")
print("*" * 10)
print("_sum_page_rank")
start_time = time.time()
res4 = _sum_page_rank(text)
elapsed_time = time.time() - start_time
print(f"elapsed time: {elapsed_time:.2f} seconds")
|
player.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import (
Any,
Callable,
Generic,
IO,
Optional,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Union,
)
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar("AT", bound="AudioSource")
FT = TypeVar("FT", bound="FFmpegOpusAudio")
_log = logging.getLogger(__name__)
__all__ = (
"AudioSource",
"PCMAudio",
"FFmpegAudio",
"FFmpegPCMAudio",
"FFmpegOpusAudio",
"PCMVolumeTransformer",
)
CREATE_NO_WINDOW: int
if sys.platform != "win32":
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
args: Any,
**subprocess_kwargs: Any,
):
piping = subprocess_kwargs.get("stdin") == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError(
"parameter conflict: 'source' parameter cannot be a string when piping to stdin"
)
args = [executable, *args]
kwargs = {"stdout": subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f"popen-stdin-writer:{id(self):#x}"
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(
target=self._pipe_writer, args=(source,), daemon=True, name=n
)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(
args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs
)
except FileNotFoundError:
executable = args.partition(" ")[0] if isinstance(args, str) else args[0]
raise ClientException(executable + " was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException(
f"Popen failed: {exc.__class__.__name__}: {exc}"
) from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
try:
proc.kill()
except Exception:
_log.exception(
"Ignoring error attempting to kill ffmpeg process %s", proc.pid
)
if proc.poll() is None:
_log.info(
"ffmpeg process %s has not terminated. Waiting to terminate...",
proc.pid,
)
proc.communicate()
_log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
_log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug(
"Write error for %s, this is probably not a problem",
self,
exc_info=True,
)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = "ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
codec = "copy" if codec in ("opus", "libopus") else "libopus"
args.extend(
(
"-map_metadata",
"-1",
"-f",
"opus",
"-c:a",
codec,
"-ar",
"48000",
"-ac",
"2",
"-b:a",
f"{bitrate}k",
"-loglevel",
"warning",
)
)
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discpy.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discpy.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discpy.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get("executable")
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or "native"
executable = executable or "ffmpeg"
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, "_probe_codec_" + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError(
"Expected str or callable for parameter 'probe', "
f"not '{method.__class__.__name__}'"
)
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception(
"Probe '%s' using '%s' failed, trying fallback", method, executable
)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
exe = (
executable[:2] + "probe"
if executable in ("ffmpeg", "avconv")
else executable
)
args = [
exe,
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-select_streams",
"a:0",
source,
]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data["streams"][0]
codec = streamdata.get("codec_name")
bitrate = int(streamdata.get("bit_rate", 0))
bitrate = max(round(bitrate / 1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
args = [executable, "-hide_banner", "-i", source]
proc = subprocess.Popen(
args,
creationflags=CREATE_NO_WINDOW,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate(timeout=20)
output = out.decode("utf8")
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b"")
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f"expected AudioSource not {original.__class__.__name__}.")
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception("Calling the after function failed.")
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f"Exception in voice thread {self.name}"
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(
self.client.ws.speak(speaking), self.client.loop
)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
core.py | # -*- coding: utf-8 -*-
import traceback
import json
import requests
from string import capwords
from .request import threading, Request
from . import source_utils
from .source_utils import tools
from .utils import beautifulSoup, encode, decode, now, time, clock_time_ms, safe_list_get, get_caller_name, replace_text_with_int, b32toHex, database
from .utils import get_all_relative_py_files, wait_threads, quote_plus, quote, DEV_MODE, DEV_MODE_ALL, CACHE_LOG, AWS_ADMIN
from .common_types import namedtuple, SearchResult, UrlParts, Filter, HosterResult, CancellationToken
from .scrapers import re, NoResultsScraper, GenericTorrentScraper, GenericExtraQueryTorrentScraper, MultiUrlScraper
from .urls import trackers, hosters, get_urls, update_urls, deprioritize_url
from .cache import check_cache_result, get_cache, get_config, set_config
from .test_utils import test_torrent, test_hoster
def get_scraper(
soup_filter,
title_filter,
info_request,
search_request,
cancellation_token,
request,
use_thread_for_info,
custom_filter,
caller_name,
url,
query_type
):
if caller_name is None:
caller_name = get_caller_name()
if request is None:
request = Request()
create_core_scraper = lambda urls, url: CoreScraper(
urls=urls,
single_url=url,
request=request,
search_request=search_request,
soup_filter=soup_filter,
title_filter=title_filter,
info_request=info_request,
cancellation_token=cancellation_token,
use_thread_for_info=use_thread_for_info,
custom_filter=custom_filter,
caller_name=caller_name
)
if url:
return create_core_scraper(urls=None, url=url)
scraper_urls = get_urls(caller_name, query_type)
if scraper_urls is None:
return NoResultsScraper()
urls = list(map(lambda t: UrlParts(base=t['base'], search=t['search'], default_search=t['default_search']), scraper_urls))
if DEV_MODE_ALL:
scrapers = []
for url in urls:
scraper = create_core_scraper(urls=None, url=url)
scrapers.append(scraper)
return MultiUrlScraper(scrapers)
return create_core_scraper(urls=urls, url=None)
class DefaultSources(object):
def __init__(self, module_name, request=None, single_query=False, url=None):
self._caller_name = module_name.split('.')[-1:][0]
self._request = request
self._single_query = single_query
self._cancellation_token = CancellationToken(is_cancellation_requested=False)
self._url = url
self.query_type = None
def _search_request(self, url, query):
if not query:
tools.log('a4kScrapers.%s.%s: %s' % (self.query_type, self._caller_name, 'empty query'), 'notice')
return None
if '=%s' in url.search:
query = quote_plus(query)
else:
query = query.decode('utf-8')
if '%%' in url.search:
query = query.replace(' ', '%2B')
return self._request.get((url.base + url.search) % query)
def _get_scraper(self, title, genericScraper=None, use_thread_for_info=False, custom_filter=None):
if genericScraper is None:
genericScraper = GenericTorrentScraper(title)
soup_filter = getattr(self, '_soup_filter', None)
if soup_filter is None:
soup_filter = genericScraper.soup_filter
title_filter = getattr(self, '_title_filter', None)
if title_filter is None:
title_filter = genericScraper.title_filter
info = getattr(self, '_info', None)
if info is None:
info = genericScraper.info
parse_magnet = getattr(self, '_parse_magnet', None)
if parse_magnet is not None:
genericScraper.parse_magnet = parse_magnet
parse_size = getattr(self, '_parse_size', None)
if parse_size is not None:
genericScraper.parse_size = parse_size
parse_seeds = getattr(self, '_parse_seeds', None)
if parse_seeds is not None:
genericScraper.parse_seeds = parse_seeds
self.genericScraper = genericScraper
self.scraper = get_scraper(soup_filter=soup_filter,
title_filter=title_filter,
info_request=info,
cancellation_token=self._cancellation_token,
search_request=self._search_request,
caller_name=self._caller_name,
request=self._request,
use_thread_for_info=use_thread_for_info,
custom_filter=custom_filter,
url=self._url,
query_type=self.query_type)
if self._request is None and not isinstance(self.scraper, NoResultsScraper):
self._request = self.scraper._request
return self.scraper
def cancel_operations(self):
if self.query_type is None:
self.query_type = 'unknown'
tools.log('a4kScrapers.%s.%s cancellation requested' % (self.query_type, self._caller_name), 'notice')
self._cancellation_token.is_cancellation_requested = True
def optimize_requests(self):
scraper = self._caller_name
scraper_module = lambda: None
if scraper in trackers:
urls = trackers[scraper]
scraper_module.sources = self.__class__
else:
urls = hosters[scraper]
scraper_module.source = self.__class__
url_tests = list()
for raw_url in urls:
url = UrlParts(base=raw_url['base'], search=raw_url['search'])
if scraper in trackers:
(results, time_ms) = test_torrent(None, scraper_module, scraper, url)
else:
(results, time_ms) = test_hoster(None, scraper_module, scraper, url)
if len(results) > 0:
url_tests.append((time_ms, raw_url))
url_tests.sort(key=lambda e: e[0])
update_urls(scraper, list(map(lambda e: e[1], url_tests)))
return url_tests
def is_movie_query(self):
return self.query_type == 'movie'
def movie(self, title, year, imdb=None, auto_query=True):
self.query_type = 'movie'
return self._get_scraper(title) \
.movie_query(title,
year,
caller_name=self._caller_name,
auto_query=auto_query,
single_query=self._single_query)
def episode(self, simple_info, all_info, auto_query=True, query_seasons=True, query_show_packs=True):
self.query_type = 'episode'
return self._get_scraper(simple_info['show_title']) \
.episode_query(simple_info,
caller_name=self._caller_name,
single_query=self._single_query,
auto_query=auto_query,
query_seasons=query_seasons,
query_show_packs=query_show_packs)
class DefaultExtraQuerySources(DefaultSources):
def __init__(self, module_name, single_query=False, request_timeout=None, url=None):
super(DefaultExtraQuerySources, self).__init__(module_name,
request=Request(sequental=True, timeout=request_timeout),
single_query=single_query,
url=url)
def _get_scraper(self, title, custom_filter=None):
genericScraper = GenericExtraQueryTorrentScraper(title,
context=self,
request=self._request)
return super(DefaultExtraQuerySources, self)._get_scraper(title,
genericScraper=genericScraper,
use_thread_for_info=True,
custom_filter=custom_filter)
class DefaultHosterSources(DefaultSources):
def movie(self, imdb, title, localtitle, aliases, year):
self.start_time = time.time()
self.query_type = 'movie'
if isinstance(self._get_scraper(title), NoResultsScraper):
return None
self._request = self.scraper._request
simple_info = {}
simple_info['title'] = source_utils.clean_title(title)
simple_info['query_title'] = simple_info['title']
simple_info['year'] = year
return simple_info
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
self.start_time = time.time()
self.query_type = 'episode'
if isinstance(self._get_scraper(tvshowtitle), NoResultsScraper):
return None
self._request = self.scraper._request
simple_info = {}
simple_info['show_title'] = re.sub(r'\s+', ' ', source_utils.clean_title(tvshowtitle).replace(year, ''))
simple_info['query_title'] = simple_info['show_title']
simple_info['year'] = year
return simple_info
def episode(self, simple_info, imdb, tvdb, title, premiered, season, episode):
if simple_info is None:
return None
simple_info['episode_title'] = title
simple_info['episode_number'] = episode
simple_info['season_number'] = season
simple_info['episode_number_xx'] = episode.zfill(2)
simple_info['season_number_xx'] = season.zfill(2)
simple_info['show_aliases'] = []
return simple_info
def resolve(self, url):
return url
def sources(self, simple_info, hostDict, hostprDict):
if simple_info is None:
return []
supported_hosts = hostDict + hostprDict
sources = []
try:
if self.is_movie_query():
query = '%s %s' % (source_utils.clean_title(simple_info['title']), simple_info['year'])
else:
query = '%s S%sE%s' % (source_utils.clean_title(simple_info['show_title']), simple_info['season_number_xx'], simple_info['episode_number_xx'])
if len(supported_hosts) > 0:
url = self.scraper._find_url()
def search(url):
if self._cancellation_token.is_cancellation_requested:
return []
try:
result = self.search(url, query)
if result is None:
raise requests.exceptions.RequestException()
return result
except requests.exceptions.RequestException:
if self._request.exc_msg:
deprioritize_url(self._caller_name)
return []
if self._request.request_time < 2:
url = self.scraper._find_next_url(url)
if url is None:
return []
return search(url)
return []
hoster_results = search(url) if url is not None else []
else:
hoster_results = []
if self.query_type == 'episode':
filter_single_episode_fn = source_utils.get_filter_single_episode_fn(simple_info)
for result in hoster_results:
quality = source_utils.get_quality(result.title)
release_title = source_utils.clean_release_title_with_simple_info(result.title, simple_info)
if self.query_type == 'movie' and not source_utils.filter_movie_title(result.title, release_title, simple_info['title'], simple_info):
continue
if self.query_type == 'episode' and not filter_single_episode_fn(release_title):
continue
for url in result.urls:
domain = re.findall(r"https?:\/\/(www\.)?(.*?)\/.*?", url)[0][1]
if domain not in supported_hosts:
continue
if any(x in url for x in ['.rar', '.zip', '.iso']):
continue
quality_from_url = source_utils.get_quality(url)
if quality_from_url != 'SD':
quality = quality_from_url
release_title = source_utils.strip_non_ascii_and_unprintable(result.title)
if DEV_MODE and len(sources) == 0:
tools.log(release_title, 'info')
sources.append({
'release_title': release_title,
'source': domain,
'quality': quality,
'language': 'en',
'url': url,
'info': [],
'direct': False,
'debridonly': False
})
sources.reverse()
result_count = len(sources) if len(supported_hosts) > 0 else 'disabled'
tools.log('a4kScrapers.%s.%s: %s' % (self.query_type, self._caller_name, result_count), 'notice')
self.end_time = time.time()
self.time_ms = clock_time_ms(self.start_time, self.end_time)
tools.log('a4kScrapers.%s.%s: took %s ms' % (self.query_type, self._caller_name, self.time_ms), 'notice')
return sources
except:
traceback.print_exc()
return sources
def search(self, hoster_url, query):
return []
class CoreScraper(object):
def __init__(self,
urls,
single_url,
request,
search_request,
soup_filter,
title_filter,
info_request,
cancellation_token,
use_thread_for_info,
custom_filter,
caller_name
):
self._results = []
self._cache_result = {}
self._url = single_url
self._urls = urls
self._request = request
self._search_request = search_request
self._soup_filter = soup_filter
self._title_filter = title_filter
self._info = info_request
self._use_thread_for_info = use_thread_for_info
self._custom_filter = custom_filter
self._cancellation_token = cancellation_token
self.caller_name = caller_name
self.start_time = None
self.end_time = None
self.time_ms = None
filter_movie_title = lambda t, clean_t: source_utils.filter_movie_title(t, clean_t, self.title, self.simple_info)
self.filter_movie_title = Filter(fn=filter_movie_title, type='single')
self.filter_single_episode_by_simple_info = None
filter_single_episode = lambda t, clean_t: self.filter_single_episode_by_simple_info(clean_t)
self.filter_single_episode = Filter(fn=filter_single_episode, type='single')
filter_single_special_episode = lambda t, clean_t: source_utils.filter_single_special_episode(self.simple_info, clean_t)
self.filter_single_special_episode = Filter(fn=filter_single_special_episode, type='single')
self.filter_season_pack_by_simple_info = None
filter_season_pack = lambda t, clean_t: self.filter_season_pack_by_simple_info(clean_t)
self.filter_season_pack = Filter(fn=filter_season_pack, type='season')
self.filter_show_pack_by_simple_info = None
filter_show_pack = lambda t, clean_t: self.filter_show_pack_by_simple_info(clean_t)
self.filter_show_pack = Filter(fn=filter_show_pack, type='show')
def _search_core(self, query, url=None):
if url is None:
url = self._url
empty_result = ([], url)
if self._cancellation_token.is_cancellation_requested:
return empty_result
try:
response = self._search_request(url, query)
if response is None:
raise requests.exceptions.RequestException()
try:
status_code = response.status_code
except:
status_code = 200
if status_code != 200:
raise requests.exceptions.RequestException()
if self._soup_filter is None:
search_results = response
else:
search_results = self._soup_filter(response)
except requests.exceptions.RequestException:
if self._request.exc_msg:
self._deprioritize_url = True
return empty_result
url = self._find_next_url(url)
if url is None:
return empty_result
return self._search_core(query, url)
except:
exc = traceback.format_exc(limit=1)
if 'PreemptiveCancellation' not in exc:
traceback.print_exc()
return empty_result
results = []
for el in search_results:
try:
title = self._title_filter(el)
results.append(SearchResult(el=el, title=title))
except:
continue
return (results, url)
def _info_core(self, el, torrent, url=None):
if url is None:
url = self._url
try:
result = self._info(el, url, torrent)
if result is not None and (result['hash'] != '' or result.get('magnet', '').startswith('magnet:?')):
if result['hash'] == '':
result['hash'] = re.findall(r'btih:(.*?)\&', result['magnet'])[0]
self._results.append(result)
except:
pass
def _get(self, query, filters):
if self._cancellation_token.is_cancellation_requested:
return
(results, url) = self._search_core(query.encode('utf-8'))
threads = []
max_threads = 5
if self.simple_info.get('show_title', None) != None:
max_threads = 2
for result in results:
el = result.el
title = result.title
clean_title = source_utils.clean_release_title_with_simple_info(title, self.simple_info)
custom_filter = False
packageType = None
if self._custom_filter is not None:
if self._custom_filter.fn(title, clean_title):
custom_filter = True
packageType = self._custom_filter.type
for filter in filters:
if self._cancellation_token.is_cancellation_requested:
return
if custom_filter or filter.fn(title, clean_title):
torrent = {}
torrent['scraper'] = self.caller_name
torrent['hash'] = ''
torrent['package'] = packageType if custom_filter else filter.type
torrent['release_title'] = title
torrent['size'] = None
torrent['seeds'] = None
if self._use_thread_for_info:
threads.append(threading.Thread(target=self._info_core, args=(el, torrent, url)))
if DEV_MODE:
wait_threads(threads)
threads = []
if len(threads) >= max_threads:
wait_threads(threads)
return
else:
self._info_core(el, torrent, url)
if DEV_MODE and len(self._results) > 0 or self._request.exc_msg:
return
break
wait_threads(threads)
def _query_thread(self, query, filters):
return threading.Thread(target=self._get, args=(query, filters))
def _get_cache(self, query):
if self.caller_name != 'cached':
return False
cache_result = get_cache(query)
self._cache_result = cache_result
if cache_result is None:
return True
if not check_cache_result(cache_result):
return True
parsed_result = cache_result['parsed_result']
self._results = parsed_result['cached_results']
if DEV_MODE and len(self._results) > 1:
self._results = [self._results[0]]
def filter_torrent(torrent):
title = torrent['release_title']
title = source_utils.clean_release_title_with_simple_info(title, self.simple_info)
return self.filter_movie_title.fn(None, title)
self._results = list(filter(filter_torrent, self._results))
return True
def _find_next_url(self, curr_url):
if self._urls is None:
return None
url_index = None
for idx, url in enumerate(self._urls):
if curr_url.base == url.base:
url_index = idx
break
if url_index is None or len(self._urls) <= url_index + 1:
return None
return self._urls[url_index + 1]
def _find_url(self):
if self._url is not None:
return self._url
if self.caller_name in ['showrss', 'skytorrents', 'bt4g', 'btscene', 'glodls', 'ext', 'torrentapi', 'torrentz2', 'scenerls', 'piratebay']:
self._request.skip_head = True
return self._request.find_url(self._urls)
def _sanitize_and_get_status(self):
additional_info = ''
missing_size = 0
missing_seeds = 0
for torrent in self._results:
torrent['hash'] = torrent['hash'].strip('"\'\\/')
torrent['magnet'] = 'magnet:?xt=urn:btih:%s&' % torrent['hash']
torrent['release_title'] = source_utils.strip_non_ascii_and_unprintable(torrent['release_title'])
if DEV_MODE:
tools.log(torrent['release_title'], 'notice')
if torrent['size'] is None:
missing_size += 1
if not DEV_MODE:
torrent['size'] = 0
if torrent['seeds'] is None:
missing_seeds += 1
if not DEV_MODE:
torrent['seeds'] = 0
if missing_size > 0:
additional_info += ', %s missing size info' % missing_size
if missing_seeds > 0:
additional_info += ', %s missing seeds info' % missing_seeds
results = {}
for result in self._results:
item_key = result['hash']
if len(item_key) < 40:
item_key = b32toHex(item_key)
item = results.get(result['hash'], None)
if item is None:
results[item_key] = result
continue
if item['size'] == 0 and result['size'] > 0:
item['size'] = result['size']
if item['seeds'] == 0 and result['seeds'] > 0:
item['seeds'] = result['seeds']
self._results = list(results.values())
stats = str(len(self._results))
if self.caller_name != 'showrss':
stats += additional_info
self.end_time = time.time()
self.time_ms = clock_time_ms(self.start_time, self.end_time)
return stats
def _get_movie_results(self):
tools.log('a4kScrapers.movie.%s: %s' % (self.caller_name, self._sanitize_and_get_status()), 'notice')
tools.log('a4kScrapers.movie.%s: took %s ms' % (self.caller_name, self.time_ms), 'notice')
return self._results
def _get_episode_results(self):
tools.log('a4kScrapers.episode.%s: %s' % (self.caller_name, self._sanitize_and_get_status()), 'notice')
tools.log('a4kScrapers.episode.%s: took %s ms' % (self.caller_name, self.time_ms), 'notice')
return self._results
def _episode(self, query):
return self._query_thread(query, [self.filter_single_episode])
def _episode_special(self, query):
return self._query_thread(query, [self.filter_single_special_episode])
def _pack_and_season(self, query):
return self._query_thread(query, [self.filter_show_pack, self.filter_season_pack])
def movie_query(self, title, year, auto_query=True, single_query=False, caller_name=None):
self.start_time = time.time()
self._deprioritize_url = False
if self.caller_name is None:
if caller_name is None:
caller_name = get_caller_name()
self.caller_name = caller_name
self.title = source_utils.clean_title(title)
self.year = str(year)
self.simple_info = {'query_title': self.title, 'year':self.year}
self.full_query = '%s %s' % (source_utils.strip_accents(title), year)
try:
use_cache_only = self._get_cache(self.full_query)
if use_cache_only:
return
self._url = self._find_url()
if self._url is None:
return
movie = lambda query: self._query_thread(query, [self.filter_movie_title])
if auto_query is False:
wait_threads([movie('')])
return
queries = [movie(self.title + ' ' + self.year)]
try:
alternative_title = replace_text_with_int(self.title)
if not single_query and self.title != alternative_title:
queries.append(movie(alternative_title + ' ' + self.year))
except:
pass
wait_threads(queries)
if not single_query and len(self._results) == 0 and not self._request.exc_msg:
wait_threads([movie(self.title)])
except:
pass
finally:
if self._deprioritize_url:
deprioritize_url(self.caller_name)
return self._get_movie_results()
def episode_query(self, simple_info, auto_query=True, single_query=False, caller_name=None, query_seasons=True, query_show_packs=True):
self.start_time = time.time()
self._deprioritize_url = False
simple_info['show_title'] = source_utils.clean_title(simple_info['show_title'])
simple_info['query_title'] = simple_info['show_title']
simple_info['year'] = str(simple_info['year'])
if self.caller_name is None:
if caller_name is None:
caller_name = get_caller_name()
self.caller_name = caller_name
simple_info['show_aliases'] = list(set(simple_info['show_aliases']))
for alias in simple_info['show_aliases']:
if '.' in alias:
simple_info['show_aliases'].append(alias.replace('.', ''))
self.simple_info = simple_info
self.filter_single_episode_by_simple_info = source_utils.get_filter_single_episode_fn(simple_info)
self.filter_season_pack_by_simple_info = source_utils.get_filter_season_pack_fn(simple_info)
self.filter_show_pack_by_simple_info = source_utils.get_filter_show_pack_fn(simple_info)
self.year = simple_info['year']
self.country = simple_info['country']
self.show_title = simple_info['show_title']
if self.year in self.show_title:
self.show_title_fallback = re.sub(r'\s+', ' ', self.show_title.replace(self.year, ''))
else:
self.show_title_fallback = None
self.episode_title = source_utils.clean_title(simple_info['episode_title'])
self.season_x = simple_info['season_number']
self.episode_x = simple_info['episode_number']
self.season_xx = self.season_x.zfill(2)
self.episode_xx = self.episode_x.zfill(2)
try:
self._url = self._find_url()
if self._url is None:
return
if auto_query is False:
wait_threads([self._episode('')])
return
def query_results():
single_episode_query = self.show_title + ' S%sE%s' % (self.season_xx, self.episode_xx)
season_query = self.show_title + ' S%s' % self.season_xx
if DEV_MODE:
if self.caller_name != 'eztv':
wait_threads([ self._pack_and_season(season_query) ])
else:
wait_threads([ self._episode(single_episode_query) ])
return
# specials
if self.season_x == '0':
wait_threads([self._episode_special(self.show_title + ' %s' % self.episode_title)])
return
queries = [
self._episode(single_episode_query)
]
if single_query or simple_info.get('is_airing', False):
wait_threads(queries)
return
if query_seasons:
queries = queries + [
self._pack_and_season(season_query),
]
if query_show_packs:
queries = queries + [
self._pack_and_season(self.show_title + ' Season %s' % self.season_x),
self._pack_and_season(self.show_title + ' Season'),
self._pack_and_season(self.show_title + ' Complete'),
]
if simple_info.get('isanime', False) and simple_info.get('absolute_number', None) is not None:
queries.insert(0, self._episode(self.show_title + ' %s' % simple_info['absolute_number']))
if self._use_thread_for_info:
wait_threads([
queries[0],
queries[2] if simple_info.get('isanime', False) else queries[1]
])
else:
wait_threads(queries)
query_results()
if not single_query and len(self._results) == 0 and self.show_title_fallback is not None:
self.show_title = self.show_title_fallback
self.simple_info['show_title'] = self.show_title_fallback
query_results()
except:
pass
finally:
if self._deprioritize_url:
deprioritize_url(self.caller_name)
return self._get_episode_results()
|
collect_training_data.py | __author__ = 'yxt'
import numpy as np
import cv2
import pygame
from pygame.locals import *
import time
import os
import threading
import socketserver
class ControlHandler(socketserver.BaseRequestHandler):
def handle(self):
print('New connection for Control:', self.client_address)
try:
while True:
command = str(prediction)
command = command.encode()
self.request.send(command)
if prediction == 0:
print("stopping")
if prediction == 1:
print("going forward")
if prediction == 2:
print("going back")
if prediction == 3:
print("right")
if prediction == 4:
print("left")
if prediction == 6:
print("forward_right")
if prediction == 7:
print("forward_left")
if prediction == 8:
print("back_right")
if prediction == 9:
print("back_left")
#发送指令间隔
time.sleep(0.2)
finally:
print("Connection closed on control thread")
class DataHandler(socketserver.StreamRequestHandler):
def handle(self):
self.k = np.zeros((4, 4), 'float')
for i in range(4):
self.k[i, i] = 1
self.send_inst = True
pygame.init()
pygame.display.set_mode((250, 250))
self.collect_image()
def collect_image(self):
global prediction
prediction = 0
saved_frame = 0
total_frame = 0
# collect images for training
print('Start collecting images...')
print("Press 'q' or 'x' to finish...")
e1 = cv2.getTickCount()
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 4), 'float')
# stream video frames one by one
try:
stream_bytes = b' '
frame = 1
while self.send_inst:
stream_bytes += self.rfile.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
# select lower half of the image
height, width = image.shape
roi = image[int(height / 2):height, :]
# cv2.imshow('roi_image', roi)
cv2.imshow('image', image)
# reshape the roi image into one row array
temp_array = roi.reshape(1, 38400).astype(np.float32)
frame += 1
total_frame += 1
# get input from human driver
for event in pygame.event.get():
if event.type == KEYDOWN:
key_input = pygame.key.get_pressed()
# complex orders
if key_input[pygame.K_UP] and key_input[pygame.K_RIGHT]:
print("Forward Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
prediction = 6
elif key_input[pygame.K_UP] and key_input[pygame.K_LEFT]:
print("Forward Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
prediction = 7
elif key_input[pygame.K_DOWN] and key_input[pygame.K_RIGHT]:
print("Reverse Right")
prediction = 8
elif key_input[pygame.K_DOWN] and key_input[pygame.K_LEFT]:
print("Reverse Left")
prediction = 9
# simple orders
elif key_input[pygame.K_UP]:
print("Forward")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[2]))
prediction = 1
elif key_input[pygame.K_DOWN]:
print("Reverse")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[3]))
prediction = 2
elif key_input[pygame.K_RIGHT]:
print("Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
prediction = 3
elif key_input[pygame.K_LEFT]:
print("Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
prediction = 4
elif key_input[pygame.K_x] or key_input[pygame.K_q]:
print('exit')
self.send_inst = False
prediction = 0
break
elif event.type == pygame.KEYUP:
prediction = 0
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
# save training data as a numpy file
file_name = str(int(time.time()))
directory = "training_data"
if not os.path.exists(directory):
os.makedirs(directory)
try:
np.savez(directory + '/' + file_name + '.npz', train=image_array, train_labels=label_array)
except IOError as e:
print(e)
e2 = cv2.getTickCount()
# calculate streaming duration
time0 = (e2 - e1) / cv2.getTickFrequency()
print('Streaming duration:', time0)
print(image_array.shape)
print(label_array.shape)
print('Total frame:', total_frame)
print('Saved frame:', saved_frame)
print('Dropped frame', total_frame - saved_frame)
finally:
print("Connection closed on data thread")
class Server(object):
def __init__(self, host, port1, port2):
self.host = host
self.port1 = port1
self.port2 = port2
def collect_data(self, host, port):
s = socketserver.TCPServer((host, port), DataHandler)
s.serve_forever()
def control_stream(self, host, port):
s = socketserver.TCPServer((host, port), ControlHandler)
s.serve_forever()
def start(self):
video_thread = threading.Thread(target=self.collect_data, args=(self.host, self.port1))
video_thread.daemon = True
video_thread.start()
control_thread = threading.Thread(target=self.control_stream, args=(self.host, self.port2))
control_thread.daemon = True
control_thread.start()
#self.video_stream(self.host, self.port1)
if __name__ == '__main__':
h, p1, p2 = "192.168.137.1", 8000, 8004
ts = Server(h, p1, p2)
ts.start() |
tracker.py | """
from https://github.com/dmlc/dmlc-core/blob/master/tracker/dmlc_tracker/tracker.py
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = sock.getsockname()[1]
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
logging.info("Waiting for workers")
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
logging.info("New worker")
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env,
shell=True, executable='/bin/bash')),
args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def alive(self):
if self.cmd is not None:
return self.thread.isAlive()
else:
return False
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER': nworker,
'DMLC_NUM_SERVER': nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
if rabit.alive():
fun_submit(nworker, nserver, envs)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
if pserver.alive():
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER': args.num_workers,
'DMLC_NUM_SERVER': args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
data_plane_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.worker.data_plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import threading
import unittest
from concurrent import futures
import grpc
from future.utils import raise_
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
def timeout(timeout_secs):
def decorate(fn):
exc_info = []
def wrapper(*args, **kwargs):
def call_fn():
try:
fn(*args, **kwargs)
except: # pylint: disable=bare-except
exc_info[:] = sys.exc_info()
thread = threading.Thread(target=call_fn)
thread.daemon = True
thread.start()
thread.join(timeout_secs)
if exc_info:
t, v, tb = exc_info # pylint: disable=unbalanced-tuple-unpacking
raise_(t, v, tb)
assert not thread.is_alive(), 'timed out after %s seconds' % timeout_secs
return wrapper
return decorate
class DataChannelTest(unittest.TestCase):
@timeout(5)
def test_grpc_data_channel(self):
data_servicer = data_plane.BeamFnDataServicer()
worker_id = 'worker_0'
data_channel_service = \
data_servicer.get_conn_by_worker_id(worker_id)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
data_servicer, server)
test_port = server.add_insecure_port('[::]:0')
server.start()
grpc_channel = grpc.insecure_channel('localhost:%s' % test_port)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor(worker_id))
data_channel_stub = beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel)
data_channel_client = data_plane.GrpcClientDataChannel(data_channel_stub)
try:
self._data_channel_test(data_channel_service, data_channel_client)
finally:
data_channel_client.close()
data_channel_service.close()
data_channel_client.wait()
data_channel_service.wait()
def test_in_memory_data_channel(self):
channel = data_plane.InMemoryDataChannel()
self._data_channel_test(channel, channel.inverse())
def _data_channel_test(self, server, client):
self._data_channel_test_one_direction(server, client)
self._data_channel_test_one_direction(client, server)
def _data_channel_test_one_direction(self, from_channel, to_channel):
def send(instruction_id, transform_id, data):
stream = from_channel.output_stream(instruction_id, transform_id)
stream.write(data)
stream.close()
transform_1 = '1'
transform_2 = '2'
# Single write.
send('0', transform_1, b'abc')
self.assertEqual(
list(to_channel.input_elements('0', [transform_1])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='0',
ptransform_id=transform_1,
data=b'abc')])
# Multiple interleaved writes to multiple instructions.
send('1', transform_1, b'abc')
send('2', transform_1, b'def')
self.assertEqual(
list(to_channel.input_elements('1', [transform_1])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='1',
ptransform_id=transform_1,
data=b'abc')])
send('2', transform_2, b'ghi')
self.assertEqual(
list(to_channel.input_elements('2', [transform_1, transform_2])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='2',
ptransform_id=transform_1,
data=b'def'),
beam_fn_api_pb2.Elements.Data(
instruction_reference='2',
ptransform_id=transform_2,
data=b'ghi')])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
mmalobj.py | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import ctypes as ct
import warnings
import weakref
from threading import Thread, Event
from collections import namedtuple
from fractions import Fraction
from itertools import cycle
from functools import reduce
from operator import mul
from . import bcm_host, mmal
from .streams import BufferIO
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraMMALError,
PiCameraPortDisabled,
PiCameraDeprecated,
)
# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether
# the order needs fixing (it is set during MMALCamera.__init__).
FIX_RGB_BGR_ORDER = None
# Mapping of parameters to the C-structure they expect / return. If a parameter
# does not appear in this mapping, it cannot be queried / set with the
# MMALControlPort.params attribute.
PARAM_TYPES = {
mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T,
mmal.MMAL_PARAMETER_ANALOG_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev
mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T,
mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T,
mmal.MMAL_PARAMETER_BLACK_LEVEL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T,
mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T,
mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev
mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T,
mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T,
mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T,
mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T,
mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T,
mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T,
mmal.MMAL_PARAMETER_CCM_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T,
mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T,
mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T,
mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T,
mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T,
mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T,
# mmal.MMAL_PARAMETER_CROP: mmal.MMAL_PARAMETER_CROP_T,
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T,
# mmal.MMAL_PARAMETER_CUSTOM_CCM: mmal.MMAL_PARAMETER_CUSTOM_CCM_T,
mmal.MMAL_PARAMETER_DIGITAL_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T,
mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T,
mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T,
mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T,
mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T,
mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T,
mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T,
mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T,
mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T,
mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T,
mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T,
mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T,
mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T,
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T,
mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T,
mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_LENS_SHADING_OVERRIDE: mmal.MMAL_PARAMETER_LENS_SHADING_T,
mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T,
mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T,
mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32
mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T,
mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_OUTPUT_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T,
mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T,
mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T,
# mmal.MMAL_PARAMETER_RESIZE_PARAMS: mmal.MMAL_PARAMETER_RESIZE_T,
mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T,
mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T,
mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T,
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T,
mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T,
mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T,
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T,
mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T,
mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T,
mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T,
mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T,
mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T,
mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T,
mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T,
}
class PiCameraFraction(Fraction):
"""
Extends :class:`~fractions.Fraction` to act as a (numerator, denominator)
tuple when required.
"""
def __len__(self):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
return 2
def __getitem__(self, index):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
if index == 0:
return self.numerator
elif index == 1:
return self.denominator
else:
raise IndexError('invalid index %d' % index)
def __contains__(self, value):
return value in (self.numerator, self.denominator)
class PiResolution(namedtuple('PiResolution', ('width', 'height'))):
"""
A :func:`~collections.namedtuple` derivative which represents a resolution
with a :attr:`width` and :attr:`height`.
.. attribute:: width
The width of the resolution in pixels
.. attribute:: height
The height of the resolution in pixels
.. versionadded:: 1.11
"""
__slots__ = () # workaround python issue #24931
def pad(self, width=32, height=16):
"""
Returns the resolution padded up to the nearest multiple of *width*
and *height* which default to 32 and 16 respectively (the camera's
native block size for most operations). For example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).pad()
PiResolution(width=1920, height=1088)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=128, height=112)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=112, height=112)
"""
return PiResolution(
width=((self.width + (width - 1)) // width) * width,
height=((self.height + (height - 1)) // height) * height,
)
def transpose(self):
"""
Returns the resolution with the width and height transposed. For
example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).transpose()
PiResolution(width=1080, height=1920)
"""
return PiResolution(self.height, self.width)
def __str__(self):
return '%dx%d' % (self.width, self.height)
class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the low and high limits of a range of framerates. It is recommended that
you access the information stored by this class by attribute rather than
position (for example: ``camera.framerate_range.low`` rather than
``camera.framerate_range[0]``).
.. attribute:: low
The lowest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamerax.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. attribute:: high
The highest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamerax.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. versionadded:: 1.13
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, low, high):
return super(PiFramerateRange, cls).__new__(cls, to_fraction(low),
to_fraction(high))
def __str__(self):
return '%s..%s' % (self.low, self.high)
class PiSensorMode(namedtuple('PiSensorMode', ('resolution', 'framerates',
'video', 'still', 'full_fov'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the attributes describing a camera sensor mode.
.. attribute:: resolution
A :class:`PiResolution` specifying the size of frames output by the
camera in this mode.
.. attribute:: framerates
A :class:`PiFramerateRange` specifying the minimum and maximum
framerates supported by this sensor mode. Typically the low value is
exclusive and high value inclusive.
.. attribute:: video
A :class:`bool` indicating whether or not the mode is capable of
recording video. Currently this is always ``True``.
.. attribute:: still
A :class:`bool` indicating whether the mode can be used for still
captures (cases where a capture method is called with
``use_video_port`` set to ``False``).
.. attribute:: full_fov
A :class:`bool` indicating whether the full width of the sensor
area is used to capture frames. This can be ``True`` even when the
resolution is less than the camera's maximum resolution due to binning
and skipping. See :ref:`camera_modes` for a diagram of the available
fields of view.
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, resolution, framerates, video=True, still=False,
full_fov=True):
return super(PiSensorMode, cls).__new__(
cls,
resolution
if isinstance(resolution, PiResolution) else
to_resolution(resolution),
framerates
if isinstance(framerates, PiFramerateRange) else
PiFramerateRange(*framerates),
video, still, full_fov)
def open_stream(stream, output=True, buffering=65536):
"""
This is the core of picamerax's IO-semantics. It returns a tuple of a
file-like object and a bool indicating whether the stream requires closing
once the caller is finished with it.
* If *stream* is a string, it is opened as a file object (with mode 'wb' if
*output* is ``True``, and the specified amount of *bufffering*). In this
case the function returns ``(stream, True)``.
* If *stream* is a stream with a ``write`` method, it is returned as
``(stream, False)``.
* Otherwise *stream* is assumed to be a writeable buffer and is wrapped
with :class:`BufferIO`. The function returns ``(stream, True)``.
"""
if isinstance(stream, bytes):
stream = stream.decode('ascii')
opened = isinstance(stream, str)
if opened:
stream = io.open(stream, 'wb' if output else 'rb', buffering)
else:
try:
if output:
stream.write
else:
stream.read
except AttributeError:
# Assume the stream is actually a buffer
opened = True
stream = BufferIO(stream)
if output and not stream.writable:
raise IOError('writeable buffer required for output')
return (stream, opened)
def close_stream(stream, opened):
"""
If *opened* is ``True``, then the ``close`` method of *stream* will be
called. Otherwise, the function will attempt to call the ``flush`` method
on *stream* (if one exists). This function essentially takes the output
of :func:`open_stream` and finalizes the result.
"""
if opened:
stream.close()
else:
try:
stream.flush()
except AttributeError:
pass
def to_resolution(value):
"""
Converts *value* which may be a (width, height) tuple or a string
containing a representation of a resolution (e.g. "1024x768" or "1080p") to
a (width, height) tuple.
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
try:
# A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution
# Feel free to suggest additions
w, h = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'HD': (1280, 720),
'FHD': (1920, 1080),
'1080P': (1920, 1080),
'720P': (1280, 720),
}[value.strip().upper()]
except KeyError:
w, h = (int(i.strip()) for i in value.upper().split('X', 1))
else:
try:
w, h = value
except (TypeError, ValueError):
raise PiCameraValueError("Invalid resolution tuple: %r" % value)
return PiResolution(w, h)
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
def to_rational(value):
"""
Converts *value* (which can be anything accepted by :func:`to_fraction`) to
an MMAL_RATIONAL_T structure.
"""
value = to_fraction(value)
return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator)
def buffer_bytes(buf):
"""
Given an object which implements the :ref:`buffer protocol
<bufferobjects>`, this function returns the size of the object in bytes.
The object can be multi-dimensional or include items larger than byte-size.
"""
if not isinstance(buf, memoryview):
buf = memoryview(buf)
return buf.itemsize * reduce(mul, buf.shape)
def debug_pipeline(port):
"""
Given an :class:`MMALVideoPort` *port*, this traces all objects in the
pipeline feeding it (including components and connections) and yields each
object in turn. Hence the generator typically yields something like:
* :class:`MMALVideoPort` (the specified output port)
* :class:`MMALEncoder` (the encoder which owns the output port)
* :class:`MMALVideoPort` (the encoder's input port)
* :class:`MMALConnection` (the connection between the splitter and encoder)
* :class:`MMALVideoPort` (the splitter's output port)
* :class:`MMALSplitter` (the splitter on the camera's video port)
* :class:`MMALVideoPort` (the splitter's input port)
* :class:`MMALConnection` (the connection between the splitter and camera)
* :class:`MMALVideoPort` (the camera's video port)
* :class:`MMALCamera` (the camera component)
"""
def find_port(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALControlPort):
if ct.addressof(obj._port[0]) == addr:
return obj
raise IndexError('unable to locate port with address %x' % addr)
def find_component(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALBaseComponent) and obj._component is not None:
if ct.addressof(obj._component[0]) == addr:
return obj
raise IndexError('unable to locate component with address %x' % addr)
assert isinstance(port, (MMALControlPort, MMALPythonPort))
while True:
if port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
yield port
if isinstance(port, MMALPythonPort):
comp = port._owner()
else:
comp = find_component(ct.addressof(port._port[0].component[0]))
yield comp
if not isinstance(comp, (MMALComponent, MMALPythonComponent)):
break
if comp.connection is None:
break
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._target
else:
port = find_port(ct.addressof(comp.connection._connection[0].in_[0]))
yield port
yield comp.connection
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._source
else:
port = find_port(ct.addressof(comp.connection._connection[0].out[0]))
def print_pipeline(port):
"""
Prints a human readable representation of the pipeline feeding the
specified :class:`MMALVideoPort` *port*.
"""
rows = [[], [], [], [], [], []]
under_comp = False
for obj in reversed(list(debug_pipeline(port))):
if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)):
rows[0].append(obj.name)
under_comp = True
elif isinstance(obj, MMALVideoPort):
rows[0].append('[%d]' % obj._port[0].index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,))
if under_comp:
rows[4].append('frame')
rows[4].append('%dx%d@%sfps' % (
obj._port[0].format[0].es[0].video.width,
obj._port[0].format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
under_comp = False
rows[5].append(mmal.FOURCC_str(obj._port[0].format[0].es[0].video.color_space))
elif isinstance(obj, MMALPythonPort):
rows[0].append('[%d]' % obj._index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._format[0].es[0].video.width,
obj._format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
rows[5].append('???')
elif isinstance(obj, (MMALConnection, MMALPythonConnection)):
rows[0].append('')
rows[1].append('')
rows[2].append('-->')
rows[3].append('')
rows[4].append('')
rows[5].append('')
if under_comp:
rows[1].append('encoding')
rows[2].append('buf')
rows[3].append('bitrate')
rows[4].append('frame')
rows[5].append('colorspc')
cols = list(zip(*rows))
max_lens = [max(len(s) for s in col) + 2 for col in cols]
rows = [
''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len)
for s, max_len, align in zip(row, max_lens, cycle('^<^>')))
for row in rows
]
for row in rows:
print(row)
class MMALObject(object):
"""
Represents an object wrapper around an MMAL object (component, port,
connection, etc). This base class maintains a registry of all MMAL objects
currently alive (via weakrefs) which permits object lookup by name and
listing all used MMAL objects.
"""
__slots__ = ('__weakref__',)
REGISTRY = weakref.WeakSet()
def __init__(self):
super(MMALObject, self).__init__()
MMALObject.REGISTRY.add(self)
class MMALBaseComponent(MMALObject):
"""
Represents a generic MMAL component. Class attributes are read to determine
the component type, and the OPAQUE sub-formats of each connectable port.
"""
__slots__ = ('_component', '_control', '_inputs', '_outputs')
component_type = b'none'
opaque_input_subformats = ()
opaque_output_subformats = ()
def __init__(self):
super(MMALBaseComponent, self).__init__()
self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.component_type, self._component),
prefix="Failed to create MMAL component %s" % self.component_type)
if self._component[0].input_num != len(self.opaque_input_subformats):
raise PiCameraRuntimeError(
'Expected %d inputs but found %d on component %s' % (
len(self.opaque_input_subformats),
self._component[0].input_num,
self.component_type))
if self._component[0].output_num != len(self.opaque_output_subformats):
raise PiCameraRuntimeError(
'Expected %d outputs but found %d on component %s' % (
len(self.opaque_output_subformats),
self._component[0].output_num,
self.component_type))
self._control = MMALControlPort(self._component[0].control)
port_class = {
mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort,
mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort,
mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort,
mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort,
mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort,
}
self._inputs = tuple(
port_class[self._component[0].input[n][0].format[0].type](
self._component[0].input[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_input_subformats))
self._outputs = tuple(
port_class[self._component[0].output[n][0].format[0].type](
self._component[0].output[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_output_subformats))
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
if self._component is not None:
# ensure we free any pools associated with input/output ports
for output in self.outputs:
output.disable()
for input in self.inputs:
input.disable()
mmal.mmal_component_destroy(self._component)
self._component = None
self._inputs = ()
self._outputs = ()
self._control = None
@property
def name(self):
return self._component[0].name.decode('ascii')
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return self._control
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return bool(self._component[0].is_enabled)
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
mmal_check(
mmal.mmal_component_enable(self._component),
prefix="Failed to enable component")
def disable(self):
"""
Disables the component.
"""
mmal_check(
mmal.mmal_component_disable(self._component),
prefix="Failed to disable component")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __repr__(self):
if self._component is not None:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALControlPort(MMALObject):
"""
Represents an MMAL port with properties to configure the port's parameters.
"""
__slots__ = ('_port', '_params', '_wrapper')
def __init__(self, port):
super(MMALControlPort, self).__init__()
self._port = port
self._params = MMALPortParams(port)
self._wrapper = None
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._port[0].index
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return bool(self._port[0].is_enabled)
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
callback(self, buf)
finally:
buf.release()
if callback:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
else:
self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
def disable(self):
"""
Disable the port.
"""
# NOTE: The test here only exists to avoid spamming the console; when
# disabling an already disabled port MMAL dumps errors to stderr. If
# this test isn't here closing a camera results in half a dozen lines
# of ignored errors
if self.enabled:
try:
mmal_check(
mmal.mmal_port_disable(self._port),
prefix="Unable to disable port %s" % self.name)
except PiCameraMMALError as e:
# Ignore the error if we're disabling an already disabled port
if not (e.status == mmal.MMAL_EINVAL and not self.enabled):
raise e
self._wrapper = None
@property
def name(self):
result = self._port[0].name.decode('ascii')
if result.endswith(')'):
try:
# strip (format) from port names as it doesn't really belong
# there (it doesn't identify the port in any way) and makes
# matching some of the correctional cases a pain
return result[:result.rindex('(')]
except ValueError:
return result
else:
return result
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._port[0].type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return self._port[0].capabilities
@property
def params(self):
"""
The configurable parameters for the port. This is presented as a
mutable mapping of parameter numbers to values, implemented by the
:class:`MMALPortParams` class.
"""
return self._params
def __repr__(self):
if self._port is not None:
return '<MMALControlPort "%s">' % self.name
else:
return '<MMALControlPort closed>'
class MMALPort(MMALControlPort):
"""
Represents an MMAL port with properties to configure and update the port's
format. This is the base class of :class:`MMALVideoPort`,
:class:`MMALAudioPort`, and :class:`MMALSubPicturePort`.
"""
__slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection')
# A mapping of corrected definitions of supported_formats for ports with
# particular names. Older firmwares either raised EINVAL, ENOSYS, or just
# reported the wrong things for various ports; these lists are derived from
# querying newer firmwares or in some cases guessing sensible defaults
# (for ports where even the newer firmwares get stuff wrong).
_supported_formats_patch = {
'vc.ril.camera:out:2': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_NV21,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
],
'vc.ril.image_encode:in:0': [
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
],
'vc.ril.image_encode:out:0': [
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_PPM,
mmal.MMAL_ENCODING_TGA,
],
'vc.ril.resize:in:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# several invalid encodings (lowercase versions of the priors)
# appear here in modern firmwares but since they don't map to any
# constants they're excluded
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.resize:out:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# same invalid encodings as above here
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.isp:in:0': [
mmal.MMAL_ENCODING_BAYER_SBGGR8,
mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8,
mmal.MMAL_ENCODING_BAYER_SBGGR10P,
mmal.MMAL_ENCODING_BAYER_SBGGR12P,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.ril.isp:out:0': [
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.null_sink:in:0': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
],
}
def __init__(self, port, opaque_subformat='OPQV'):
super(MMALPort, self).__init__(port)
self.opaque_subformat = opaque_subformat
self._pool = None
self._stopped = True
self._connection = None
def __repr__(self):
if self._port is not None:
return '<MMALPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self.buffer_count, self.buffer_size)
else:
return '<MMALPort closed>'
def _get_opaque_subformat(self):
return self._opaque_subformat
def _set_opaque_subformat(self, value):
self._opaque_subformat = value
opaque_subformat = property(
_get_opaque_subformat, _set_opaque_subformat, doc="""\
Retrieves or sets the opaque sub-format that the port speaks. While
most formats (I420, RGBA, etc.) mean one thing, the opaque format is
special; different ports produce different sorts of data when
configured for OPQV format. This property stores a string which
uniquely identifies what the associated port means for OPQV format.
If the port does not support opaque format at all, set this property to
``None``.
:class:`MMALConnection` uses this information when negotiating formats
for a connection between two ports.
""")
def _get_format(self):
result = self._port[0].format[0].encoding
if FIX_RGB_BGR_ORDER:
return {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(result, result)
else:
return result
def _set_format(self, value):
if FIX_RGB_BGR_ORDER:
value = {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(value, value)
self._port[0].format[0].encoding = value
if value == mmal.MMAL_ENCODING_OPAQUE:
self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
After setting this attribute, call :meth:`commit` to make the changes
effective.
""")
@property
def supported_formats(self):
"""
Retrieves a sequence of supported encodings on this port.
"""
try:
mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS]
except PiCameraMMALError as e:
if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS):
# Workaround: old firmwares raise EINVAL or ENOSYS when various
# ports are queried for supported formats. The following is the
# correct sequence for old firmwares (note: swapped RGB24 and
# BGR24 order in still port) ... probably (vc.ril.camera:out:2
# is definitely right, the rest are largely guessed based on
# queries of later firmwares)
try:
return MMALPort._supported_formats_patch[self.name]
except KeyError:
raise e
else:
raise
else:
result = [
v for v in mp.encoding if v != 0
][:mp.hdr.size // ct.sizeof(ct.c_uint32)]
# Workaround: Fix incorrect result on MMALImageEncoder.outputs[0]
# from modern firmwares
if self.name == 'vc.ril.image_encode:out:0' and result == [
mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V,
mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7,
mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]:
return MMALPort._supported_formats_patch[self.name]
else:
return result
def _get_bitrate(self):
return self._port[0].format[0].bitrate
def _set_bitrate(self, value):
self._port[0].format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._port[0].format, source._format)
else:
mmal.mmal_format_copy(self._port[0].format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers according to the recommendations of the
MMAL library. This is typically called after adjusting the port's
format and/or associated settings (like width and height for video
ports).
"""
mmal_check(
mmal.mmal_port_format_commit(self._port),
prefix="Format couldn't be set on port %s" % self.name)
# Workaround: Unfortunately, there is an upstream issue with the
# buffer_num_recommended which means it can't currently be used (see
# discussion in raspberrypi/userland#167). There's another upstream
# issue with buffer_num_min which means we need to guard against 0
# values...
self._port[0].buffer_num = max(1, self._port[0].buffer_num_min)
self._port[0].buffer_size = (
self._port[0].buffer_size_recommended
if self._port[0].buffer_size_recommended > 0 else
self._port[0].buffer_size_min)
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self.enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
return self.pool.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
isinstance(self._connection, MMALPythonConnection) and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
return
else:
buf = modified_buf
try:
mmal_check(
mmal.mmal_port_send_buffer(self._port, buf._buf),
prefix="cannot send buffer to port %s" % self.name)
except PiCameraMMALError as e:
# If port is disabled, convert exception for convenience
if e.status == mmal.MMAL_EINVAL and not self.enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
else:
raise
def flush(self):
"""
Flush the port.
"""
mmal_check(
mmal.mmal_port_flush(self._port),
prefix="Unable to flush port %s" % self.name)
def _get_buffer_count(self):
return self._port[0].buffer_num
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._port[0].buffer_num = value
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port.
The ``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def _get_buffer_size(self):
return self._port[0].buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._port[0].buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers is typically dictated by the port's format. The
``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. The callback should return ``True`` when processing is
complete and no further calls are expected (e.g. at frame-end for an
image encoder), and ``False`` otherwise.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
if not self._stopped and callback(self, buf):
self._stopped = True
finally:
buf.release()
try:
self._pool.send_buffer(block=False)
except PiCameraPortDisabled:
# The port was disabled, no point trying again
pass
# Workaround: There is a bug in the MJPEG encoder that causes a
# deadlock if the FIFO is full on shutdown. Increasing the encoder
# buffer size makes this less likely to happen. See
# raspberrypi/userland#208. Connecting the encoder component resets the
# output port's buffer size, hence why we correct this here, just
# before enabling the port.
if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG:
self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended)
if callback:
assert self._stopped
assert self._pool is None
self._stopped = False
self._pool = MMALPortPool(self)
try:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
# If this port is an output port, send it all the buffers
# in the pool. If it's an input port, don't bother: the user
# will presumably want to feed buffers to it manually
if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT:
self._pool.send_all_buffers(block=False)
except:
self._pool.close()
self._pool = None
self._stopped = True
raise
else:
super(MMALPort, self).enable()
def disable(self):
"""
Disable the port.
"""
self._stopped = True
super(MMALPort, self).disable()
if self._pool is not None:
self._pool.close()
self._pool = None
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection *options* can be specified as keyword arguments.
These will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
if isinstance(other, MMALPythonPort):
return MMALPythonConnection(self, other, **options)
else:
return MMALConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALVideoPort(MMALPort):
"""
Represents an MMAL port used to pass video data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return (
'<MMALVideoPort "%s": format=MMAL_FOURCC("%s") buffers=%dx%d '
'frames=%s@%sfps colorspace=MMAL_FOURCC("%s")>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size,
self.framesize, self.framerate,
mmal.FOURCC_str(self.colorspace)))
else:
return '<MMALVideoPort closed>'
def _get_framesize(self):
return PiResolution(
self._port[0].format[0].es[0].video.crop.width,
self._port[0].format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._port[0].format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = int(value.width)
video.crop.height = int(value.height)
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the port's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_framerate(self):
video = self._port[0].format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
assert video.frame_rate.num == 0
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._port[0].format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_colorspace(self):
return self._port[0].format[0].es[0].video.color_space
def _set_colorspace(self, value):
self._port[0].format[0].es[0].video.color_space = value
colorspace = property(_get_colorspace, _set_colorspace, doc="""\
Retrieves or sets the color-space of the port's frames.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
class MMALAudioPort(MMALPort):
"""
Represents an MMAL port used to pass audio data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALAudioPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALAudioPort closed>'
class MMALSubPicturePort(MMALPort):
"""
Represents an MMAL port used to pass sub-picture (caption) data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALSubPicturePort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALSubPicturePort closed>'
class MMALPortParams(object):
"""
Represents the parameters of an MMAL port. This class implements the
:attr:`MMALControlPort.params` attribute.
Internally, the class understands how to convert certain structures to more
common Python data-types. For example, parameters that expect an
MMAL_RATIONAL_T type will return and accept Python's
:class:`~fractions.Fraction` class (or any other numeric types), while
parameters that expect an MMAL_BOOL_T type will treat anything as a truthy
value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be
treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar
structures will be treated as plain ints.
Parameters that expect more complex structures will return and expect
those structures verbatim.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPortParams, self).__init__()
self._port = port
def __getitem__(self, key):
dtype = PARAM_TYPES[key]
# Use the short-cut functions where possible (teeny bit faster if we
# get some C to do the structure wrapping for us)
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64,
}.get(dtype, mmal.mmal_port_parameter_get)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_get:
result = dtype(
mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype))
)
mmal_check(
func(self._port, result.hdr),
prefix="Failed to get parameter %d" % key)
else:
dtype = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T,
mmal.MMAL_PARAMETER_INT32_T: ct.c_int32,
mmal.MMAL_PARAMETER_INT64_T: ct.c_int64,
mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32,
mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64,
}[dtype]
result = dtype()
mmal_check(
func(self._port, key, result),
prefix="Failed to get parameter %d" % key)
return conv(result)
def __setitem__(self, key, value):
dtype = PARAM_TYPES[key]
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64,
mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string,
}.get(dtype, mmal.mmal_port_parameter_set)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_set:
mp = conv(value)
assert mp.hdr.id == key
assert mp.hdr.size >= ct.sizeof(dtype)
mmal_check(
func(self._port, mp.hdr),
prefix="Failed to set parameter %d to %r" % (key, value))
else:
mmal_check(
func(self._port, key, conv(value)),
prefix="Failed to set parameter %d to %r" % (key, value))
class MMALBuffer(object):
"""
Represents an MMAL buffer header. This is usually constructed from the
buffer header pointer and is largely supplied to make working with
the buffer's data a bit simpler. Using the buffer as a context manager
implicitly locks the buffer's memory and returns the :mod:`ctypes`
buffer object itself::
def callback(port, buf):
with buf as data:
# data is a ctypes uint8 array with size entries
print(len(data))
Alternatively you can use the :attr:`data` property directly, which returns
and modifies the buffer's data as a :class:`bytes` object (note this is
generally slower than using the buffer object unless you are simply
replacing the entire buffer)::
def callback(port, buf):
# the buffer contents as a byte-string
print(buf.data)
"""
__slots__ = ('_buf',)
def __init__(self, buf):
super(MMALBuffer, self).__init__()
self._buf = buf
def _get_command(self):
return self._buf[0].cmd
def _set_command(self, value):
self._buf[0].cmd = value
command = property(_get_command, _set_command, doc="""\
The command set in the buffer's meta-data. This is usually 0 for
buffers returned by an encoder; typically this is only used by buffers
sent to the callback of a control port.
""")
def _get_flags(self):
return self._buf[0].flags
def _set_flags(self, value):
self._buf[0].flags = value
flags = property(_get_flags, _set_flags, doc="""\
The flags set in the buffer's meta-data, returned as a bitmapped
integer. Typical flags include:
* ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data
* ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame
* ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data
* ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data
""")
def _get_pts(self):
return self._buf[0].pts
def _set_pts(self, value):
self._buf[0].pts = value
pts = property(_get_pts, _set_pts, doc="""\
The presentation timestamp (PTS) of the buffer, as an integer number
of microseconds or ``MMAL_TIME_UNKNOWN``.
""")
def _get_dts(self):
return self._buf[0].dts
def _set_dts(self, value):
self._buf[0].dts = value
dts = property(_get_dts, _set_dts, doc="""\
The decoding timestamp (DTS) of the buffer, as an integer number of
microseconds or ``MMAL_TIME_UNKNOWN``.
""")
@property
def size(self):
"""
Returns the length of the buffer's data area in bytes. This will be
greater than or equal to :attr:`length` and is fixed in value.
"""
return self._buf[0].alloc_size
def _get_offset(self):
return self._buf[0].offset
def _set_offset(self, value):
assert 0 <= value <= self.size
self._buf[0].offset = value
self.length = min(self.size - self.offset, self.length)
offset = property(_get_offset, _set_offset, doc="""\
The offset from the start of the buffer at which the data actually
begins. Defaults to 0. If this is set to a value which would force the
current :attr:`length` off the end of the buffer's :attr:`size`, then
:attr:`length` will be decreased automatically.
""")
def _get_length(self):
return self._buf[0].length
def _set_length(self, value):
assert 0 <= value <= self.size - self.offset
self._buf[0].length = value
length = property(_get_length, _set_length, doc="""\
The length of data held in the buffer. Must be less than or equal to
the allocated size of data held in :attr:`size` minus the data
:attr:`offset`. This attribute can be used to effectively blank the
buffer by setting it to zero.
""")
def _get_data(self):
with self as buf:
return ct.string_at(
ct.byref(buf, self._buf[0].offset),
self._buf[0].length)
def _set_data(self, value):
value_len = buffer_bytes(value)
if value_len:
if value_len > self.size:
raise PiCameraValueError(
'data is too large for buffer (%d > %d)' % (
value_len, self.size))
bp = ct.c_uint8 * value_len
try:
sp = bp.from_buffer(value)
except TypeError:
sp = bp.from_buffer_copy(value)
with self as buf:
ct.memmove(buf, sp, value_len)
self._buf[0].offset = 0
self._buf[0].length = value_len
data = property(_get_data, _set_data, doc="""\
The data held in the buffer as a :class:`bytes` string. You can set
this attribute to modify the data in the buffer. Acceptable values
are anything that supports the buffer protocol, and which contains
:attr:`size` bytes or less. Setting this attribute implicitly modifies
the :attr:`length` attribute to the length of the specified value and
sets :attr:`offset` to zero.
.. note::
Accessing a buffer's data via this attribute is relatively slow
(as it copies the buffer's data to/from Python objects). See the
:class:`MMALBuffer` documentation for details of a faster (but
more complex) method.
""")
def replicate(self, source):
"""
Replicates the *source* :class:`MMALBuffer`. This copies all fields
from the *source* buffer, including the internal :attr:`data` pointer.
In other words, after replication this buffer and the *source* buffer
will share the same block of memory for *data*.
The *source* buffer will also be referenced internally by this buffer
and will only be recycled once this buffer is released.
.. note::
This is fundamentally different to the operation of the
:meth:`copy_from` method. It is much faster, but imposes the burden
that two buffers now share data (the *source* cannot be released
until the replicant has been released).
"""
mmal_check(
mmal.mmal_buffer_header_replicate(self._buf, source._buf),
prefix='unable to replicate buffer')
def copy_from(self, source):
"""
Copies all fields (including data) from the *source*
:class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to
store :attr:`length` bytes from the *source* buffer. This method
implicitly sets :attr:`offset` to zero, and :attr:`length` to the
number of bytes copied.
.. note::
This is fundamentally different to the operation of the
:meth:`replicate` method. It is much slower, but afterward the
copied buffer is entirely independent of the *source*.
"""
assert self.size >= source.length
source_len = source._buf[0].length
if source_len:
with self as target_buf, source as source_buf:
ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len)
self._buf[0].offset = 0
self._buf[0].length = source_len
self.copy_meta(source)
def copy_meta(self, source):
"""
Copy meta-data from the *source* :class:`MMALBuffer`; specifically this
copies all buffer fields with the exception of :attr:`data`,
:attr:`length` and :attr:`offset`.
"""
self._buf[0].cmd = source._buf[0].cmd
self._buf[0].flags = source._buf[0].flags
self._buf[0].dts = source._buf[0].dts
self._buf[0].pts = source._buf[0].pts
self._buf[0].type[0] = source._buf[0].type[0]
def acquire(self):
"""
Acquire a reference to the buffer. This will prevent the buffer from
being recycled until :meth:`release` is called. This method can be
called multiple times in which case an equivalent number of calls
to :meth:`release` must be made before the buffer will actually be
released.
"""
mmal.mmal_buffer_header_acquire(self._buf)
def release(self):
"""
Release a reference to the buffer. This is the opposing call to
:meth:`acquire`. Once all references have been released, the buffer
will be recycled.
"""
mmal.mmal_buffer_header_release(self._buf)
def reset(self):
"""
Resets all buffer header fields to default values.
"""
mmal.mmal_buffer_header_reset(self._buf)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
return ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
def __repr__(self):
if self._buf is not None:
return '<MMALBuffer object: flags=%s command=%s length=%d>' % (
''.join((
'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_',
'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
)), {
0: 'none',
mmal.MMAL_EVENT_ERROR: 'error',
mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change',
mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change',
mmal.MMAL_EVENT_EOS: 'end-of-stream',
}[self.command], self.length)
else:
return '<MMALBuffer object: ???>'
class MMALQueue(object):
"""
Represents an MMAL buffer queue. Buffers can be added to the queue with the
:meth:`put` method, and retrieved from the queue (with optional wait
timeout) with the :meth:`get` method.
"""
__slots__ = ('_queue', '_created')
def __init__(self, queue):
self._created = False
self._queue = queue
@classmethod
def create(cls):
self = cls(mmal.mmal_queue_create())
self._created = True
return self
def close(self):
if self._created:
mmal_queue_destroy(self._queue)
self._queue = None
def __len__(self):
return mmal.mmal_queue_length(self._queue)
def get(self, block=True, timeout=None):
"""
Get the next buffer from the queue. If *block* is ``True`` (the default)
and *timeout* is ``None`` (the default) then the method will block
until a buffer is available. Otherwise *timeout* is the maximum time to
wait (in seconds) for a buffer to become available. If a buffer is not
available before the timeout expires, the method returns ``None``.
Likewise, if *block* is ``False`` and no buffer is immediately
available then ``None`` is returned.
"""
if block and timeout is None:
buf = mmal.mmal_queue_wait(self._queue)
elif block and timeout is not None:
buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000))
else:
buf = mmal.mmal_queue_get(self._queue)
if buf:
return MMALBuffer(buf)
def put(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the back of the queue.
"""
mmal.mmal_queue_put(self._queue, buf._buf)
def put_back(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the front of the queue. This is
used when a buffer was removed from the queue but needs to be put
back at the front where it was originally taken from.
"""
mmal.mmal_queue_put_back(self._queue, buf._buf)
class MMALPool(object):
"""
Represents an MMAL pool containing :class:`MMALBuffer` objects. All active
ports are associated with a pool of buffers, and a queue. Instances can be
treated as a sequence of :class:`MMALBuffer` objects but this is only
recommended for debugging purposes; otherwise, use the :meth:`get_buffer`,
:meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with
the encapsulated :class:`MMALQueue`.
"""
__slots__ = ('_pool', '_queue')
def __init__(self, pool):
self._pool = pool
super(MMALPool, self).__init__()
self._queue = MMALQueue(pool[0].queue)
def __len__(self):
return self._pool[0].headers_num
def __getitem__(self, index):
return MMALBuffer(self._pool[0].header[index])
@property
def queue(self):
"""
The :class:`MMALQueue` associated with the pool.
"""
return self._queue
def close(self):
if self._pool is not None:
mmal.mmal_pool_destroy(self._pool)
self._pool = None
def resize(self, new_count, new_size):
"""
Resizes the pool to contain *new_count* buffers with *new_size* bytes
allocated to each buffer.
*new_count* must be 1 or more (you cannot resize a pool to contain
no headers). However, *new_size* can be 0 which causes all payload
buffers to be released.
.. warning::
If the pool is associated with a port, the port must be disabled
when resizing the pool.
"""
mmal_check(
mmal.mmal_pool_resize(self._pool, new_count, new_size),
prefix='unable to resize pool')
def get_buffer(self, block=True, timeout=None):
"""
Get the next buffer from the pool's queue. See :meth:`MMALQueue.get`
for the meaning of the parameters.
"""
return self._queue.get(block, timeout)
def send_buffer(self, port, block=True, timeout=None):
"""
Get a buffer from the pool's queue and send it to *port*. *block* and
*timeout* act as they do in :meth:`get_buffer`. If no buffer is
available (for the values of *block* and *timeout*,
:exc:`~picamerax.PiCameraMMALError` is raised).
"""
buf = self.get_buffer(block, timeout)
if buf is None:
raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available')
port.send_buffer(buf)
def send_all_buffers(self, port, block=True, timeout=None):
"""
Send all buffers from the queue to *port*. *block* and *timeout* act as
they do in :meth:`get_buffer`. If no buffer is available (for the
values of *block* and *timeout*, :exc:`~picamerax.PiCameraMMALError` is
raised).
"""
for i in range(len(self._queue)):
self.send_buffer(port, block, timeout)
class MMALPortPool(MMALPool):
"""
Construct an MMAL pool for the number and size of buffers required by
the :class:`MMALPort` *port*.
"""
__slots__ = ('_port',)
def __init__(self, port):
pool = mmal.mmal_port_pool_create(
port._port, port._port[0].buffer_num, port._port[0].buffer_size)
if not pool:
raise PiCameraMMALError(
mmal.MMAL_ENOSPC,
'failed to create buffer header pool for port %s' % port.name)
super(MMALPortPool, self).__init__(pool)
self._port = port
def close(self):
if self._pool is not None:
mmal.mmal_port_pool_destroy(self._port._port, self._pool)
self._port = None
self._pool = None
super(MMALPortPool, self).close()
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_all_buffers(port, block, timeout)
class MMALBaseConnection(MMALObject):
"""
Abstract base class for :class:`MMALConnection` and
:class:`MMALPythonConnection`. Handles weakrefs to the source and
target ports, and format negotiation. All other connection details are
handled by the descendent classes.
"""
__slots__ = ('_source', '_target')
default_formats = ()
compatible_opaque_formats = {
('OPQV-single', 'OPQV-single'),
('OPQV-dual', 'OPQV-dual'),
('OPQV-strips', 'OPQV-strips'),
('OPQV-dual', 'OPQV-single'),
('OPQV-single', 'OPQV-dual'), # recent firmwares permit this
}
def __init__(
self, source, target, formats=default_formats):
super(MMALBaseConnection, self).__init__()
if not isinstance(source, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('source is not a port')
if not isinstance(target, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('target is not a port')
if source.type != mmal.MMAL_PORT_TYPE_OUTPUT:
raise PiCameraValueError('source is not an output port')
if target.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError('target is not an input port')
if source.connection is not None:
raise PiCameraValueError('source port is already connected')
if target.connection is not None:
raise PiCameraValueError('target port is already connected')
if formats is None:
formats = ()
self._source = source
self._target = target
try:
iter(formats)
except TypeError:
formats = (formats,)
self._negotiate_format(formats)
source._connection = self
target._connection = self
# Descendents continue with connection implementation...
def close(self):
if self._source is not None:
self._source._connection = None
self._source = None
if self._target is not None:
self._target._connection = None
self._target = None
def _negotiate_format(self, formats):
def copy_format():
self._source.commit()
self._target.copy_from(self._source)
self._target.commit()
def max_buffers():
self._source.buffer_count = self._target.buffer_count = max(
self._source.buffer_count, self._target.buffer_count)
self._source.buffer_size = self._target.buffer_size = max(
self._source.buffer_size, self._target.buffer_size)
# Filter out formats that aren't supported on both source and target
# ports. This is a little tricky as ports that support OPAQUE never
# claim they do (so we have to assume it's mutually supported)
mutually_supported = (
set(self._source.supported_formats) &
set(self._target.supported_formats)
) | {mmal.MMAL_ENCODING_OPAQUE}
formats = [f for f in formats if f in mutually_supported]
if formats:
# If there are any formats left to try, perform the negotiation
# with the filtered list. Again, there's some special casing to
# deal with the incompatible OPAQUE sub-formats
for f in formats:
if f == mmal.MMAL_ENCODING_OPAQUE:
if (self._source.opaque_subformat,
self._target.opaque_subformat) in self.compatible_opaque_formats:
self._source.format = mmal.MMAL_ENCODING_OPAQUE
else:
continue
else:
self._source.format = f
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
continue
else:
max_buffers()
return
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to negotiate port format')
else:
# If no formats are available to try (either from filtering or
# because none were given), assume the source port is set up
# properly. Just copy the format to the target and hope the caller
# knows what they're doing
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to copy source format to target port')
else:
max_buffers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def source(self):
"""
The source :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._source
@property
def target(self):
"""
The target :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._target
class MMALConnection(MMALBaseConnection):
"""
Represents an MMAL internal connection between two components. The
constructor accepts arguments providing the *source* :class:`MMALPort` and
*target* :class:`MMALPort`.
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
.. note::
The default *formats* list starts with OPAQUE; the class understands
the different OPAQUE sub-formats (see :ref:`mmal` for more information)
and will only select OPAQUE if compatible sub-formats can be used on
both ports.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALConnection` object
sending the data, and the :class:`MMALBuffer` object containing data. The
callable may optionally manipulate the :class:`MMALBuffer` and return it
to permit it to continue traversing the connection, or return ``None``
in which case the buffer will be released.
.. note::
There is a significant performance penalty for specifying a
callback between MMAL components as it requires buffers to be
copied from the GPU's memory to the CPU's memory and back again.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between MMAL components.
"""
__slots__ = ('_connection', '_callback', '_wrapper')
default_formats = (
mmal.MMAL_ENCODING_OPAQUE,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not isinstance(source, MMALPort):
raise PiCameraValueError('source is not an MMAL port')
if not isinstance(target, MMALPort):
raise PiCameraValueError('target is not an MMAL port')
super(MMALConnection, self).__init__(source, target, formats)
self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)()
self._callback = callback
flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT
if callback is None:
flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING
try:
mmal_check(
mmal.mmal_connection_create(
self._connection, source._port, target._port, flags),
prefix="Failed to create connection")
except:
self._connection = None
raise
def close(self):
if self._connection is not None:
mmal.mmal_connection_destroy(self._connection)
self._connection = None
self._wrapper = None
super(MMALConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return bool(self._connection[0].is_enabled)
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
def wrapper(connection):
buf = mmal.mmal_queue_get(connection[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
modified_buf = self._callback(self, buf)
except:
buf.release()
raise
else:
if modified_buf is not None:
try:
self._target.send_buffer(modified_buf)
except PiCameraPortDisabled:
# Target port disabled; ignore the error
pass
else:
buf.release()
return
buf = mmal.mmal_queue_get(connection[0].pool[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
self._source.send_buffer(buf)
except PiCameraPortDisabled:
# Source port has been disabled; ignore the error
pass
if self._callback is not None:
self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper)
self._connection[0].callback = self._wrapper
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
mmal_check(
mmal.mmal_connection_enable(self._connection),
prefix="Failed to enable connection")
if self._callback is not None:
MMALPool(self._connection[0].pool).send_all_buffers(self._source)
def disable(self):
"""
Disables the connection.
"""
mmal_check(
mmal.mmal_connection_disable(self._connection),
prefix="Failed to disable connection")
self._wrapper = None
@property
def name(self):
return self._connection[0].name.decode('ascii')
def __repr__(self):
if self._connection is not None:
return '<MMALConnection "%s">' % self.name
else:
return '<MMALConnection closed>'
class MMALRawCamera(MMALBaseComponent):
"""
The MMAL "raw camera" component.
Don't use this! If you insist on using this anyway, read the forum post
about `raw sensor access`_ first.
.. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_RAW_CAMERA
opaque_input_subformats = ()
opaque_output_subformats = ('OPQV-single',)
class MMALCamera(MMALBaseComponent):
"""
Represents the MMAL camera component. This component has 0 input ports and
3 output ports. The intended use of the output ports (which in turn
determines the behaviour of those ports) is as follows:
* Port 0 is intended for preview renderers
* Port 1 is intended for video recording
* Port 2 is intended for still image capture
Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to
obtain and manipulate the camera's configuration.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA
opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips')
annotate_structs = (
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T,
)
def __init__(self):
global FIX_RGB_BGR_ORDER
super(MMALCamera, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None:
found = False
# try largest struct to smallest as later firmwares still happily
# accept earlier revision structures
# XXX do old firmwares reject too-large structs?
for struct in reversed(MMALCamera.annotate_structs):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct
self.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
if FIX_RGB_BGR_ORDER is None:
# old firmware lists BGR24 before RGB24 in supported_formats
for f in self.outputs[1].supported_formats:
if f == mmal.MMAL_ENCODING_BGR24:
FIX_RGB_BGR_ORDER = True
break
elif f == mmal.MMAL_ENCODING_RGB24:
FIX_RGB_BGR_ORDER = False
break
def _get_annotate_rev(self):
try:
return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
def _set_annotate_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera annotation structure revision")
annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\
The annotation capabilities of the firmware have evolved over time and
several structures are available for querying and setting video
annotations. By default the :class:`MMALCamera` class will pick the
latest annotation structure supported by the current firmware but you
can select older revisions with :attr:`annotate_rev` for other purposes
(e.g. testing).
""")
class MMALCameraInfo(MMALBaseComponent):
"""
Represents the MMAL camera-info component. Query the
``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain
information about the connected camera module.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO
info_structs = (
mmal.MMAL_PARAMETER_CAMERA_INFO_T,
mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T,
)
def __init__(self):
super(MMALCameraInfo, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None:
found = False
# try smallest structure to largest as later firmwares reject
# older structures
for struct in MMALCameraInfo.info_structs:
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct
self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _get_info_rev(self):
try:
return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _set_info_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera info structure revision")
info_rev = property(_get_info_rev, _set_info_rev, doc="""\
The camera information capabilities of the firmware have evolved over
time and several structures are available for querying camera
information. When initialized, :class:`MMALCameraInfo` will attempt
to discover which structure is in use by the extant firmware. This
property can be used to discover the structure version and to modify
the version in use for other purposes (e.g. testing).
""")
class MMALComponent(MMALBaseComponent):
"""
Represents an MMAL component that acts as a filter of some sort, with a
single input that connects to an upstream source port. This is an asbtract
base class.
"""
__slots__ = ()
def __init__(self):
super(MMALComponent, self).__init__()
assert len(self.opaque_input_subformats) == 1
def close(self):
self.disconnect()
super(MMALComponent, self).close()
def enable(self):
super(MMALComponent, self).enable()
if self.connection is not None:
self.connection.enable()
def disable(self):
if self.connection is not None:
self.connection.disable()
super(MMALComponent, self).disable()
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
class MMALSplitter(MMALComponent):
"""
Represents the MMAL splitter component. This component has 1 input port
and 4 output ports which all generate duplicates of buffers passed to the
input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('OPQV-single',) * 4
class MMALISPResizer(MMALComponent):
"""
Represents the MMAL ISP resizer component. This component has 1 input port
and 1 output port, and supports resizing via the VideoCore ISP, along with
conversion of numerous formats into numerous other formats (e.g. OPAQUE to
RGB, etc). This is more efficient than :class:`MMALResizer` but is only
available on later firmware versions.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = (None,)
class MMALResizer(MMALComponent):
"""
Represents the MMAL VPU resizer component. This component has 1 input port
and 1 output port. This supports resizing via the VPU. This is not as
efficient as :class:`MMALISPResizer` but is available on all firmware
verions. The output port can (and usually should) have a different frame
size to the input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER
opaque_input_subformats = (None,)
opaque_output_subformats = (None,)
class MMALEncoder(MMALComponent):
"""
Represents a generic MMAL encoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoEncoder(MMALEncoder):
"""
Represents the MMAL video encoder component. This component has 1 input
port and 1 output port. The output port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
opaque_input_subformats = ('OPQV-dual',)
opaque_output_subformats = (None,)
class MMALImageEncoder(MMALEncoder):
"""
Represents the MMAL image encoder component. This component has 1 input
port and 1 output port. The output port is typically configured with
``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``,
``MMAL_ENCODING_GIF``, etc.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
opaque_input_subformats = ('OPQV-strips',)
opaque_output_subformats = (None,)
class MMALDecoder(MMALComponent):
"""
Represents a generic MMAL decoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoDecoder(MMALDecoder):
"""
Represents the MMAL video decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALImageDecoder(MMALDecoder):
"""
Represents the MMAL iamge decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_JPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALRenderer(MMALComponent):
"""
Represents the MMAL renderer component. This component has 1 input port and
0 output ports. It is used to implement the camera preview and overlays.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER
opaque_input_subformats = ('OPQV-single',)
class MMALNullSink(MMALComponent):
"""
Represents the MMAL null-sink component. This component has 1 input port
and 0 output ports. It is used to keep the preview port "alive" (and thus
calculating white-balance and exposure) when the camera preview is not
required.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK
opaque_input_subformats = ('OPQV-single',)
class MMALPythonPort(MMALObject):
"""
Implements ports for Python-based MMAL components.
"""
__slots__ = (
'_buffer_count',
'_buffer_size',
'_connection',
'_enabled',
'_owner',
'_pool',
'_type',
'_index',
'_supported_formats',
'_format',
'_callback',
)
_FORMAT_BPP = {
'I420': 1.5,
'RGB3': 3,
'RGBA': 4,
'BGR3': 3,
'BGRA': 4,
}
def __init__(self, owner, port_type, index):
self._buffer_count = 2
self._buffer_size = 0
self._connection = None
self._enabled = False
self._owner = weakref.ref(owner)
self._pool = None
self._callback = None
self._type = port_type
self._index = index
self._supported_formats = {
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T(
type=mmal.MMAL_ES_TYPE_VIDEO,
encoding=mmal.MMAL_ENCODING_I420,
es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T())))
def close(self):
self.disconnect()
self.disable()
self._format = None
def __repr__(self):
return '<MMALPythonPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format), self.buffer_count,
self.buffer_size, self.framesize, self.framerate)
def _get_bitrate(self):
return self._format[0].bitrate
def _set_bitrate(self, value):
self._format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def _get_supported_formats(self):
return self._supported_formats
def _set_supported_formats(self, value):
try:
value = {f for f in value}
except TypeError:
value = {value}
if not value:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "port must have at least one valid format")
self._supported_formats = value
supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\
Retrieves or sets the set of valid formats for this port. The set must
always contain at least one valid format. A single format can be
specified; it will be converted implicitly to a singleton set.
If the current port :attr:`format` is not a member of the new set, no
error is raised. An error will be raised when :meth:`commit` is next
called if :attr:`format` is still not a member of the set.
""")
def _get_format(self):
return self._format[0].encoding
def _set_format(self, value):
self._format[0].encoding = value
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
""")
def _get_framesize(self):
return PiResolution(
self._format[0].es[0].video.crop.width,
self._format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the source's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
""")
def _get_framerate(self):
video = self._format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
""")
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
@property
def opaque_subformat(self):
return None
def _get_buffer_count(self):
return self._buffer_count
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._buffer_count = int(value)
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port. The
default is 2 but more may be required in the case of long pipelines
with replicated buffers.
""")
def _get_buffer_size(self):
return self._buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers defaults to a value dictated by the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._format, source._format)
else:
mmal.mmal_format_copy(self._format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers. This is typically called after
adjusting the port's format and/or associated settings (like width and
height for video ports).
"""
if self.format not in self.supported_formats:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'invalid format for port %r' % self)
self._buffer_count = 2
video = self._format[0].es[0].video
try:
self._buffer_size = int(
MMALPythonPort._FORMAT_BPP[str(self.format)]
* video.width
* video.height)
except KeyError:
# If it's an unknown / encoded format just leave the buffer size
# alone and hope the owning component knows what to set
pass
self._owner()._commit_port(self)
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return self._enabled
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
if self._connection is not None:
if callback is not None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'connected ports must be enabled without callback')
else:
if callback is None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'unconnected ports must be enabled with callback')
if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None:
self._pool = MMALPythonPortPool(self)
self._callback = callback
self._enabled = True
def disable(self):
"""
Disable the port.
"""
self._enabled = False
if self._pool is not None:
# Release any unprocessed buffers from the owner's queue before
# we destroy them all
while True:
buf = self._owner()._queue.get(False)
if buf:
buf.release()
else:
break
self._pool.close()
self._pool = None
self._callback = None
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self._enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
if self._pool is not None:
# Unconnected port or input port case; retrieve buffer from the
# allocated pool
return self._pool.get_buffer(block, timeout)
else:
# Connected output port case; get a buffer from the target input
# port (in this case the port is just a thin proxy for the
# corresponding input port)
assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT
return self._connection.target.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
# NOTE: The MMALPythonConnection callback must occur *before* the test
# for the port being enabled; it's meant to be the connection making
# the callback prior to the buffer getting to the port after all
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
else:
buf = modified_buf
if not self._enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
if self._callback is not None:
# but what about output ports?
try:
# XXX Return value? If it's an input port we should ignore it,
self._callback(self, buf)
except:
buf.release()
raise
if self._type == mmal.MMAL_PORT_TYPE_INPUT:
# Input port case; queue the buffer for processing on the
# owning component
self._owner()._queue.put(buf)
elif self._connection is None:
# Unconnected output port case; release the buffer back to the
# pool
buf.release()
else:
# Connected output port case; forward the buffer to the
# connected component's input port
# XXX If it's a format-change event?
self._connection.target.send_buffer(buf)
@property
def name(self):
return '%s:%s:%d' % (self._owner().name, {
mmal.MMAL_PORT_TYPE_OUTPUT: 'out',
mmal.MMAL_PORT_TYPE_INPUT: 'in',
mmal.MMAL_PORT_TYPE_CONTROL: 'control',
mmal.MMAL_PORT_TYPE_CLOCK: 'clock',
}[self.type], self._index)
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._index
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection options can be specified as keyword arguments. These
will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
return MMALPythonConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALPythonPortPool(MMALPool):
"""
Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is
only used when a fake port is used without a corresponding
:class:`MMALPythonConnection`.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPythonPortPool, self).__init__(
mmal.mmal_pool_create(port.buffer_count, port.buffer_size))
self._port = port
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout)
class MMALPythonBaseComponent(MMALObject):
"""
Base class for Python-implemented MMAL components. This class provides the
:meth:`_commit_port` method used by descendents to control their ports'
behaviour, and the :attr:`enabled` property. However, it is unlikely that
users will want to sub-class this directly. See
:class:`MMALPythonComponent` for a more useful starting point.
"""
__slots__ = ('_inputs', '_outputs', '_enabled',)
def __init__(self):
super(MMALPythonBaseComponent, self).__init__()
self._enabled = False
self._inputs = ()
self._outputs = ()
# TODO Control port?
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
self.disable()
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return self._enabled
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
self._enabled = True
def disable(self):
"""
Disables the component.
"""
self._enabled = False
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return None
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
def _commit_port(self, port):
"""
Called by ports when their format is committed. Descendents may
override this to reconfigure output ports when input ports are
committed, or to raise errors if the new port configuration is
unacceptable.
.. warning::
This method must *not* reconfigure input ports when called; however
it can reconfigure *output* ports when input ports are committed.
"""
pass
def __repr__(self):
if self._outputs:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALPythonSource(MMALPythonBaseComponent):
"""
Provides a source for other :class:`MMALComponent` instances. The
specified *input* is read in chunks the size of the configured output
buffer(s) until the input is exhausted. The :meth:`wait` method can be
used to block until this occurs. If the output buffer is configured to
use a full-frame unencoded format (like I420 or RGB), frame-end flags will
be automatically generated by the source. When the input is exhausted an
empty buffer with the End Of Stream (EOS) flag will be sent.
The component provides all picamerax's usual IO-handling characteristics; if
*input* is a string, a file with that name will be opened as the input and
closed implicitly when the component is closed. Otherwise, the input will
not be closed implicitly (the component did not open it, so the assumption
is that closing *input* is the caller's responsibility). If *input* is an
object with a ``read`` method it is assumed to be a file-like object and is
used as is. Otherwise, *input* is assumed to be a readable object
supporting the buffer protocol (which is wrapped in a :class:`BufferIO`
stream).
"""
__slots__ = ('_stream', '_opened', '_thread')
def __init__(self, input):
super(MMALPythonSource, self).__init__()
self._inputs = ()
self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),)
self._stream, self._opened = open_stream(input, output=False)
self._thread = None
def close(self):
super(MMALPythonSource, self).close()
if self._outputs:
self._outputs[0].close()
self._outputs = ()
if self._stream:
close_stream(self._stream, self._opened)
self._stream = None
def enable(self):
super(MMALPythonSource, self).enable()
self._thread = Thread(target=self._send_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonSource, self).disable()
if self._thread:
self._thread.join()
self._thread = None
def wait(self, timeout=None):
"""
Wait for the source to send all bytes from the specified input. If
*timeout* is specified, it is the number of seconds to wait for
completion. The method returns ``True`` if the source completed within
the specified timeout and ``False`` otherwise.
"""
if not self.enabled:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'cannot wait on disabled component')
self._thread.join(timeout)
return not self._thread.is_alive()
def _send_run(self):
# Calculate the size of a frame if possible (i.e. when the output
# format is an unencoded full frame format). If it's an unknown /
# encoded format, we've no idea what the framesize is (this would
# presumably require decoding the stream) so leave framesize as None.
video = self._outputs[0]._format[0].es[0].video
try:
framesize = (
MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)]
* video.width
* video.height)
except KeyError:
framesize = None
frameleft = framesize
while self.enabled:
buf = self._outputs[0].get_buffer(timeout=0.1)
if buf:
try:
if frameleft is None:
send = buf.size
else:
send = min(frameleft, buf.size)
with buf as data:
if send == buf.size:
try:
# readinto() is by far the fastest method of
# getting data into the buffer
buf.length = self._stream.readinto(data)
except AttributeError:
# if there's no readinto() method, fallback on
# read() and the data setter (memmove)
buf.data = self._stream.read(buf.size)
else:
buf.data = self._stream.read(send)
if frameleft is not None:
frameleft -= buf.length
if not frameleft:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
frameleft = framesize
if not buf.length:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS
break
finally:
self._outputs[0].send_buffer(buf)
@property
def name(self):
return 'py.source'
class MMALPythonComponent(MMALPythonBaseComponent):
"""
Provides a Python-based MMAL component with a *name*, a single input and
the specified number of *outputs* (default 1). The :meth:`connect` and
:meth:`disconnect` methods can be used to establish or break a connection
from the input port to an upstream component.
Typically descendents will override the :meth:`_handle_frame` method to
respond to buffers sent to the input port, and will set
:attr:`MMALPythonPort.supported_formats` in the constructor to define the
formats that the component will work with.
"""
__slots__ = ('_name', '_thread', '_queue', '_error')
def __init__(self, name='py.component', outputs=1):
super(MMALPythonComponent, self).__init__()
self._name = name
self._thread = None
self._error = None
self._queue = MMALQueue.create()
self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),)
self._outputs = tuple(
MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n)
for n in range(outputs)
)
def close(self):
super(MMALPythonComponent, self).close()
self.disconnect()
if self._inputs:
self._inputs[0].close()
self._inputs = ()
for output in self._outputs:
output.disable()
self._outputs = ()
self._queue.close()
self._queue = None
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
@property
def name(self):
return self._name
def _commit_port(self, port):
"""
Overridden to to copy the input port's configuration to the output
port(s), and to ensure that the output port(s)' format(s) match
the input port's format.
"""
super(MMALPythonComponent, self)._commit_port(port)
if port.type == mmal.MMAL_PORT_TYPE_INPUT:
for output in self.outputs:
output.copy_from(port)
elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
if port.format != self.inputs[0].format:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch')
def enable(self):
super(MMALPythonComponent, self).enable()
if not self._thread:
self._thread = Thread(target=self._thread_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonComponent, self).disable()
if self._thread:
self._thread.join()
self._thread = None
if self._error:
raise self._error
def _thread_run(self):
try:
while self._enabled:
buf = self._queue.get(timeout=0.1)
if buf:
try:
handler = {
0: self._handle_frame,
mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed,
mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed,
mmal.MMAL_EVENT_ERROR: self._handle_error,
mmal.MMAL_EVENT_EOS: self._handle_end_of_stream,
}[buf.command]
if handler(self.inputs[0], buf):
self._enabled = False
finally:
buf.release()
except Exception as e:
self._error = e
self._enabled = False
def _handle_frame(self, port, buf):
"""
Handles frame data buffers (where :attr:`MMALBuffer.command` is set to
0).
Typically, if the component has output ports, the method is expected to
fetch a buffer from the output port(s), write data into them, and send
them back to their respective ports.
Return values are as for normal event handlers (``True`` when no more
buffers are expected, ``False`` otherwise).
"""
return False
def _handle_format_changed(self, port, buf):
"""
Handles format change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED).
The default implementation re-configures the input port of the
component and emits the event on all output ports for downstream
processing. Override this method if you wish to do something else in
response to format change events.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T
structure). Use ``mmal_event_format_changed_get`` on the buffer's data
to extract the event.
"""
with buf as data:
event = mmal.mmal_event_format_changed_get(buf._buf)
if port.connection:
# Handle format change on the source output port, if any. We
# don't check the output port capabilities because it was the
# port that emitted the format change in the first case so it'd
# be odd if it didn't support them (or the format requested)!
output = port.connection._source
output.disable()
if isinstance(output, MMALPythonPort):
mmal.mmal_format_copy(output._format, event[0].format)
else:
mmal.mmal_format_copy(output._port[0].format, event[0].format)
output.commit()
output.buffer_count = (
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min)
output.buffer_size = (
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
if isinstance(output, MMALPythonPort):
output.enable()
else:
output.enable(port.connection._transfer)
# Now deal with the format change on this input port (this is only
# called from _thread_run so port must be an input port)
try:
if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE):
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'port %s does not support event change' % self.name)
mmal.mmal_format_copy(port._format, event[0].format)
self._commit_port(port)
port.pool.resize(
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min,
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
port.buffer_count = len(port.pool)
port.buffer_size = port.pool[0].size
except:
# If this port can't handle the format change, or if anything goes
# wrong (like the owning component doesn't like the new format)
# stop the pipeline (from here at least)
if port.connection:
port.connection.disable()
raise
# Chain the format-change onward so everything downstream sees it.
# NOTE: the callback isn't given the format-change because there's no
# image data in it
for output in self.outputs:
out_buf = output.get_buffer()
out_buf.copy_from(buf)
output.send_buffer(out_buf)
return False
def _handle_parameter_changed(self, port, buf):
"""
Handles parameter change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED).
The default implementation does nothing but return ``False``
(indicating that processing should continue). Override this in
descendents to respond to parameter changes.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T
structure).
"""
return False
def _handle_error(self, port, buf):
"""
Handles error notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to error events.
The *port* parameter is the port into which the event arrived.
"""
return True
def _handle_end_of_stream(self, port, buf):
"""
Handles end-of-stream notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to the end of stream.
The *port* parameter is the port into which the event arrived.
"""
return True
class MMALPythonTarget(MMALPythonComponent):
"""
Provides a simple component that writes all received buffers to the
specified *output* until a frame with the *done* flag is seen (defaults to
MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream).
The component provides all picamerax's usual IO-handling characteristics; if
*output* is a string, a file with that name will be opened as the output
and closed implicitly when the component is closed. Otherwise, the output
will not be closed implicitly (the component did not open it, so the
assumption is that closing *output* is the caller's responsibility). If
*output* is an object with a ``write`` method it is assumed to be a
file-like object and is used as is. Otherwise, *output* is assumed to be a
writeable object supporting the buffer protocol (which is wrapped in a
:class:`BufferIO` stream).
"""
__slots__ = ('_opened', '_stream', '_done', '_event')
def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS):
super(MMALPythonTarget, self).__init__(name='py.target', outputs=0)
self._stream, self._opened = open_stream(output)
self._done = done
self._event = Event()
# Accept all the formats picamerax generally produces (user can add
# other esoteric stuff if they need to)
self.inputs[0].supported_formats = {
mmal.MMAL_ENCODING_MJPEG,
mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
def close(self):
super(MMALPythonTarget, self).close()
close_stream(self._stream, self._opened)
def enable(self):
self._event.clear()
super(MMALPythonTarget, self).enable()
def wait(self, timeout=None):
"""
Wait for the output to be "complete" as defined by the constructor's
*done* parameter. If *timeout* is specified it is the number of seconds
to wait for completion. The method returns ``True`` if the target
completed within the specified timeout and ``False`` otherwise.
"""
return self._event.wait(timeout)
def _handle_frame(self, port, buf):
self._stream.write(buf.data)
if buf.flags & self._done:
self._event.set()
return True
return False
class MMALPythonConnection(MMALBaseConnection):
"""
Represents a connection between an :class:`MMALPythonBaseComponent` and a
:class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`.
The constructor accepts arguments providing the *source* :class:`MMALPort`
(or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or
:class:`MMALPythonPort`).
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALPythonConnection`
object sending the data, and the :class:`MMALBuffer` object containing
data. The callable may optionally manipulate the :class:`MMALBuffer` and
return it to permit it to continue traversing the connection, or return
``None`` in which case the buffer will be released.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between Python and and MMAL components, in preference
order. Note that OPAQUE is not present in contrast with the default
formats in :class:`MMALConnection`.
"""
__slots__ = ('_enabled', '_callback')
default_formats = (
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not (
isinstance(source, MMALPythonPort) or
isinstance(target, MMALPythonPort)
):
raise PiCameraValueError('use a real MMAL connection')
super(MMALPythonConnection, self).__init__(source, target, formats)
self._enabled = False
self._callback = callback
def close(self):
self.disable()
super(MMALPythonConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return self._enabled
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
if not self._enabled:
self._enabled = True
if isinstance(self._target, MMALPythonPort):
# Connected python input ports require no callback
self._target.enable()
else:
# Connected MMAL input ports don't know they're connected so
# provide a dummy callback
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.enable(lambda port, buf: True)
if isinstance(self._source, MMALPythonPort):
# Connected python output ports are nothing more than thin
# proxies for the target input port; no callback required
self._source.enable()
else:
# Connected MMAL output ports are made to transfer their
# data to the Python input port
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._source.enable(self._transfer)
def disable(self):
"""
Disables the connection.
"""
self._enabled = False
self._source.disable()
self._target.disable()
def _transfer(self, port, buf):
while self._enabled:
try:
dest = self._target.get_buffer(timeout=0.01)
except PiCameraPortDisabled:
dest = None
if dest:
dest.copy_from(buf)
try:
self._target.send_buffer(dest)
except PiCameraPortDisabled:
pass
return False
@property
def name(self):
return '%s/%s' % (self._source.name, self._target.name)
def __repr__(self):
try:
return '<MMALPythonConnection "%s">' % self.name
except NameError:
return '<MMALPythonConnection closed>'
|
uart_provider.py | import os
import re
import time
import json
import struct
import datetime
import threading
from ..base import OpenDeviceBase
from ..decorator import with_device_message
from ...framework.utils import (helper, resource)
from . import dmu_helper
from .configuration_field import CONFIGURATION_FIELD_DEFINES_SINGLETON
from .eeprom_field import EEPROM_FIELD_DEFINES_SINGLETON
from ..upgrade_workers import (
FirmwareUpgradeWorker,
JumpBootloaderWorker,
JumpApplicationWorker,
UPGRADE_EVENT
)
ID = [0x49, 0x44]
VR = [0x56, 0x52]
S0 = [0x53, 0x30]
class Provider(OpenDeviceBase):
'''
DMU UART provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'DMU'
self.server_update_rate = 50
self.is_logging = False
self.is_mag_align = False
self.bootloader_baudrate = 57600
# self.device_info = None
# self.app_info = None
self.app_config_folder = ''
self.parameters = None
self.enable_data_log = True
self.is_backup = False
self.is_restore = False
self.is_app_matched = False
self.is_conf_loaded = False
self.connected = True
self.device_info = None
self.app_info = None
self.prepare_folders()
def prepare_folders(self):
'''
Prepare folder
'''
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
config_file_name = 'dmu.json'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'dmu')
config_file_path = os.path.join(
self.setting_folder_path, config_file_name)
if not os.path.isfile(config_file_path):
if not os.path.isdir(self.setting_folder_path):
os.makedirs(self.setting_folder_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join('dmu', config_file_name))
with open(config_file_path, "wb") as code:
code.write(app_config_content)
@property
def is_in_bootloader(self):
''' Check if the connected device is in bootloader mode
'''
if not self.device_info or not self.device_info.__contains__('name'):
return False
if 'bootloader' in self.device_info['name'].lower():
return True
return False
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
device_string = '{0} {1} {2}'.format(
self.device_info['name'], self.device_info['pn'], self.device_info['sn'])
return '# Connected {0} #\n\rDevice: {1} \n\rFirmware: {2}'\
.format('DMU', device_string, self.device_info['firmware_version'])
def _build_device_info(self, data_buffer):
'''
Build device info
'''
if data_buffer is None:
return False
serial_num = int.from_bytes(struct.pack(
'4B', *data_buffer[0:4]), byteorder='big')
mode_string_len = len(data_buffer[4:])
model_string = struct.pack('{0}B'.format(
mode_string_len), *data_buffer[4:]).decode()
split_text = model_string.split(' ')
self.device_info = {
'name': split_text[0],
'pn': split_text[1],
'firmware_version': split_text[2],
'sn': serial_num
}
def _build_app_info(self, data_buffer):
'''
Build app info
'''
if data_buffer is None:
return False
version_string = '{0}.{1}.{2}.{3}.{4}'.format(*data_buffer)
self.app_info = {
'app_name': 'DMU',
'version': version_string
}
return True
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'dmu.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
app_file_path = os.path.join(
self.setting_folder_path, 'dmu.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
CONFIGURATION_FIELD_DEFINES_SINGLETON.load(
self.properties['userConfiguration'])
EEPROM_FIELD_DEFINES_SINGLETON.load()
def after_setup(self):
self.is_conf_loaded = False
if hasattr(self.communicator, 'serial_port'):
self.original_baudrate = self.communicator.serial_port.baudrate
def on_read_raw(self, data):
pass
def on_receive_output_packet(self, packet_type, data, *args, **kwargs):
'''
Listener for getting output packet
'''
self.add_output_packet(packet_type, data)
def get_log_info(self):
'''
Build information for log
'''
if not self.parameters:
self.get_params()
input_params = self.properties['userConfiguration']
packet_rate = next(
(item['value'] for item in self.parameters if item['name'] == 'Packet Rate'), '100')
value_mapping = next(
(item['options'] for item in input_params if item['name'] == 'Packet Rate'), [])
packet_rate_value = next(
(item['value'] for item in value_mapping if item['key'] == str(packet_rate)), '0')
return {
"type": 'IMU',
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"sampleRate": packet_rate_value,
"appVersion": self.device_info['firmware_version'],
"imuProperties": json.dumps(self.properties)
}
}
def before_jump_app_command(self):
self.communicator.serial_port.baudrate = self.bootloader_baudrate
def after_jump_app_command(self):
self.communicator.serial_port.baudrate = self.original_baudrate
def before_write_content(self):
self.communicator.serial_port.baudrate = self.bootloader_baudrate
self.communicator.serial_port.reset_input_buffer()
def firmware_write_command_generator(self, data_len, current, data):
command_WA = 'WA'
message_bytes = []
message_bytes.extend(struct.pack('>I', current))
message_bytes.extend(struct.pack('B', data_len))
message_bytes.extend(data)
return helper.build_packet(command_WA, message_bytes)
def get_upgrade_workers(self, firmware_content):
firmware_worker = FirmwareUpgradeWorker(
self.communicator, firmware_content,
self.firmware_write_command_generator)
firmware_worker.on(UPGRADE_EVENT.BEFORE_WRITE,
lambda: self.before_write_content())
firmware_worker.on(
UPGRADE_EVENT.FIRST_PACKET, lambda: time.sleep(8))
jump_bootloader_command = helper.build_bootloader_input_packet(
'JI')
jump_bootloader_worker = JumpBootloaderWorker(
self.communicator,
command=jump_bootloader_command,
listen_packet='JI',
wait_timeout_after_command=3)
jump_application_command = helper.build_bootloader_input_packet('JA')
jump_application_worker = JumpApplicationWorker(
self.communicator,
command=jump_application_command,
listen_packet='JA',
wait_timeout_after_command=3)
jump_application_worker.on(UPGRADE_EVENT.BEFORE_COMMAND, self.before_jump_app_command)
jump_application_worker.on(UPGRADE_EVENT.AFTER_COMMAND, self.after_jump_app_command)
return [jump_bootloader_worker, firmware_worker, jump_application_worker]
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
def get_operation_status(self):
if self.is_logging:
return 'LOGGING'
if self.is_upgrading:
return 'UPGRADING'
if self.is_mag_align:
return 'MAG_ALIGN'
return 'IDLE'
def get_device_info(self, *args): # pylint: disable=unused-argument
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name',
'value': self.device_info['name']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']}
]
}
@with_device_message
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
outputs = self.properties['userMessages']['outputPackets']
input_params = self.properties['userConfiguration']
if self.is_conf_loaded:
yield {
'packetType': 'conf',
'data': {
'outputs': outputs,
'inputParams': input_params
}
}
# read product configuration
eeprom_field = EEPROM_FIELD_DEFINES_SINGLETON.find(0x71C)
command_line = dmu_helper.build_read_eeprom_cli(eeprom_field)
result = yield self._message_center.build(command=command_line, timeout=3)
data = result['data']
if data:
packet_types = dmu_helper.build_continous_packet_types(
data['value']['architechture'],
data['value']['algorithm'],
data['value']['mags'])
if self.device_info['name'].__contains__('INS330BI'):
packet_types.append('E3')
for item in input_params:
if item['name'] == 'Packet Type':
# product_configuration['continuous_packet_types']
item['options'] = packet_types
self.is_conf_loaded = True
break
yield {
'packetType': 'conf',
'data': {
'outputs': outputs,
'inputParams': input_params
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
fields = CONFIGURATION_FIELD_DEFINES_SINGLETON.get_fields()
command_line = dmu_helper.build_read_fields_packets(fields)
result = yield self._message_center.build(command=command_line, timeout=3)
data = result['data']
if data:
self.parameters = data
yield {
'packetType': 'inputParams',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
field = CONFIGURATION_FIELD_DEFINES_SINGLETON.find(params['paramId'])
if field is None:
yield {
'packetType': 'error',
'data': 'Invalid Parameter'
}
command_line = dmu_helper.build_read_fields_packets([field])
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
yield {
'packetType': 'inputParam',
'data': data[0]
}
yield {
'packetType': 'error',
'data': 'No Response'
}
def set_params(self, params, *args): # pylint: disable=unused-argument
raise Exception('Not implement set params.')
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
configuration_field = CONFIGURATION_FIELD_DEFINES_SINGLETON.find(
params['paramId'])
if configuration_field.name == 'Unknown':
yield {
'packetType': 'error'
}
if configuration_field is None:
yield {
'packetType': 'error'
}
command_line = dmu_helper.build_write_filed_cli(
configuration_field, params['value'])
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
# read current configuration fields, then write field to eeprom
fields = CONFIGURATION_FIELD_DEFINES_SINGLETON.get_fields()
command_line = dmu_helper.build_read_fields_packets(fields)
result = yield self._message_center.build(command=command_line, timeout=3)
data = result['data']
values = [item['value'] for item in data]
# print('saved values', values)
command_line = dmu_helper.build_write_fileds_cli(fields, values, True)
result = yield self._message_center.build(command=command_line, timeout=3)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': data
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def run_command(self, params, *args):
''' run raw command
'''
bytes_str_in_array = re.findall('([a-f|0-9|A-F]{2})', params)
command_line = bytes([int(item, 16) for item in bytes_str_in_array])
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
raw = result['raw']
if error:
yield {
'packetType': 'error',
'data': {
'error': 'Runtime Error',
'message': 'The device cannot response the command'
}
}
yield {
'packetType': 'success',
'data': raw
}
def upgrade_framework(self, params, *args): # pylint: disable=invalid-name
'''
upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thead = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thead.start()
print("Upgrade DMU firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
|
main.py | from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue, Empty
import atexit
import os
import sys
agent_processes = [None, None]
t = None
q = None
def cleanup_process():
global agent_processes
for proc in agent_processes:
if proc is not None:
proc.kill()
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def dotnet_agent(observation, configuration):
"""
a wrapper around a dotnet agent
"""
global agent_processes, t, q
agent_process = agent_processes[observation.player]
### Do not edit ###
if agent_process is None:
if "__raw_path__" in configuration:
cwd = os.path.dirname(configuration["__raw_path__"])
else:
cwd = os.path.dirname(__file__)
agent_process = Popen(["./run.sh"], stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd)
agent_processes[observation.player] = agent_process
atexit.register(cleanup_process)
# following 4 lines from https://stackoverflow.com/questions/375427/a-non-blocking-read-on-a-subprocess-pipe-in-python
q = Queue()
t = Thread(target=enqueue_output, args=(agent_process.stderr, q))
t.daemon = True # thread dies with the program
t.start()
if observation.step == 0:
# fixes bug where updates array is shared, but the first update is agent dependent actually
observation["updates"][0] = f"{observation.player}"
# print observations to agent
agent_process.stdin.write(("\n".join(observation["updates"]) + "\n").encode())
agent_process.stdin.flush()
# wait for data written to stdout
agent1res = (agent_process.stdout.readline()).decode()
_end_res = (agent_process.stdout.readline()).decode()
while True:
try: line = q.get_nowait()
except Empty:
# no standard error received, break
break
else:
# standard error output received, print it out
print(line.decode(), file=sys.stderr, end='')
outputs = agent1res.split("\n")[0].split(",")
actions = []
for cmd in outputs:
if cmd != "":
actions.append(cmd)
return actions |
count_down_latch_test.py | import threading
from time import sleep
from tests.base import SingleMemberTestCase
from tests.util import random_string
from hazelcast import six
from hazelcast.six.moves import range
class CountDownLatchTest(SingleMemberTestCase):
def setUp(self):
self.latch = self.client.get_count_down_latch(random_string()).blocking()
def test_latch(self):
self.latch.try_set_count(20)
self.assertEqual(self.latch.get_count(), 20)
def test_run():
for i in range(0, 20):
self.latch.count_down()
sleep(0.06)
_thread = threading.Thread(target=test_run)
_thread.start()
if six.PY2:
six.exec_("""self.assertFalse(self.latch.await(1))""")
six.exec_("""self.assertTrue(self.latch.await(15))""")
else:
self.assertFalse(self.latch.await_latch(1))
self.assertTrue(self.latch.await_latch(15))
def test_str(self):
self.assertTrue(str(self.latch).startswith("CountDownLatch"))
|
ionosphere.py | from __future__ import division
import logging
import os
from os import kill, getpid, listdir
from os.path import join, isfile
from sys import version_info
# @modified 20191115 - Branch #3262: py3
# try:
# from Queue import Empty
# except:
# from queue import Empty
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
import re
from shutil import rmtree
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
from shutil import move as shutil_move
# import csv
from ast import literal_eval
from datetime import datetime
# from redis import StrictRedis
import traceback
from timeit import default_timer as timer
import mysql.connector
# from mysql.connector import errorcode
from sqlalchemy.sql import select
# @added 20180715 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
from sqlalchemy.sql import desc
# @added 20161213 - Branch #1790: test_tsfresh
# To match the new order introduced via the test_tsfresh method
import numpy as np
# import pandas as pd
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
from tsfresh import __version__ as tsfresh_version
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
from pymemcache.client.base import Client as pymemcache_Client
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
import pandas as pd
from tsfresh.feature_extraction import (
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# extract_features, ReasonableFeatureExtractionSettings)
extract_features, EfficientFCParameters)
import settings
from skyline_functions import (
fail_check, mysql_select, write_data_to_file, send_graphite_metric,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# get_memcache_metric_object)
mkdir_p,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
is_batch_metric)
# @added 20161221 - calculate features for every anomaly, instead of making the
# user do it in the frontend or calling the webapp constantly in a cron like
# manner. Decouple Ionosphere from the webapp.
from features_profile import calculate_features_profile
# @modified 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched_meta
from database import (
get_engine, ionosphere_table_meta,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# metrics_table_meta,
ionosphere_matched_table_meta,
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# Readded metrics_table to set ionosphere_enabled to 0 if a metric has no
# fps enabled and has been willy nillied
metrics_table_meta,
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
motifs_matched_table_meta,
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
not_anomalous_motifs_table_meta,
)
# @added 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
from functions.numpy.percent_different import get_percent_different
from tsfresh_feature_names import TSFRESH_FEATURES
# @added 20170114 - Feature #1854: Ionosphere learn
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# from learn import learn
from learn import ionosphere_learn
# @added 20170306 - Feature #1960: ionosphere_layers
from layers import run_layer_algorithms
# @added 20190322 - Feature #2484: FULL_DURATION feature profiles
from common_functions import (
get_metrics_db_object, get_calculated_features)
# @added 20190327 - Feature #2484
from echo import ionosphere_echo
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings')
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# Number of processes to assign to Ionosphere, however Ionosphere should never
# need more than 1 and is effectively hard coded as such currently. This
# variable is only declared for the purpose of maintaining a standard set up in
# each module and to possibly enable more than one processor on Ionosphere in
# the future, should there be a requirement for Ionosphere to analyse the
# metrics quicker. Running Ionosphere with more than one process is untested
# and currently it is hard coded to be 1
# (https://github.com/earthgecko/skyline/issues/69)
try:
IONOSPHERE_PROCESSES = settings.IONOSPHERE_PROCESSES
if IONOSPHERE_PROCESSES != 1:
IONOSPHERE_PROCESSES = 1
except:
IONOSPHERE_PROCESSES = 1
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
SKYLINE_FEEDBACK_NAMESPACES = list(settings.SKYLINE_FEEDBACK_NAMESPACES)
except:
# Let us take a guess
try:
graphite_host = str(settings.GRAPHITE_HOST)
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
# @added 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
try:
IONOSPHERE_MANAGE_PURGE = settings.IONOSPHERE_MANAGE_PURGE
except:
IONOSPHERE_MANAGE_PURGE = True
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
# from settings import BATCH_PROCESSING_NAMESPACES
BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES)
except:
BATCH_PROCESSING_NAMESPACES = []
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
try:
IONOSPHERE_HISTORICAL_DATA_FOLDER = settings.IONOSPHERE_HISTORICAL_DATA_FOLDER
except:
IONOSPHERE_HISTORICAL_DATA_FOLDER = '/opt/skyline/ionosphere/historical_data'
try:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = settings.IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
except:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = []
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
try:
IONOSPHERE_INFERENCE_MOTIFS_ENABLED = settings.IONOSPHERE_INFERENCE_MOTIFS_ENABLED
except:
IONOSPHERE_INFERENCE_MOTIFS_ENABLED = True
if IONOSPHERE_INFERENCE_MOTIFS_ENABLED:
from inference import ionosphere_motif_inference
else:
ionosphere_motif_inference = None
try:
IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY = settings.IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY
except:
IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY = False
# @added 20210419 - Feature #4014: Ionosphere - inference
# Only store motif data in the database if specifically enabled
try:
IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS = settings.IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS
except:
IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS = False
# @added 20210512 - Feature #4064: VERBOSE_LOGGING
try:
VERBOSE_LOGGING = settings.IONOSPHERE_VERBOSE_LOGGING
except:
VERBOSE_LOGGING = False
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
max_age_seconds = settings.IONOSPHERE_CHECK_MAX_AGE
# Database configuration
config = {'user': settings.PANORAMA_DBUSER,
'password': settings.PANORAMA_DBUSERPASS,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
failed_checks_dir = '%s_failed' % settings.IONOSPHERE_CHECK_PATH
last_purge_key = '%s.last_purge_ts' % skyline_app
LOCAL_DEBUG = False
class Ionosphere(Thread):
"""
The Ionosphere class which controls the ionosphere thread and spawned
processes.
"""
def __init__(self, parent_pid):
"""
Initialize Ionosphere
Define Redis, mysql and memcached connections
"""
super(Ionosphere, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Use get_redis_conn and get_redis_conn_decoded
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.mysql_conn = mysql.connector.connect(**config)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager().list() below and replaced with Redis sets
# self.anomalous_metrics = Manager().list()
# self.not_anomalous = Manager().list()
# self.features_profiles_checked = Manager().list()
# self.training_metrics = Manager().list()
# self.sent_to_panorama = Manager().list()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Added lists of ionosphere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# self.ionosphere_smtp_alerter_metrics = Manager().list()
# self.ionosphere_non_smtp_alerter_metrics = Manager().list()
# @added 20170306 - Feature #1960: ionosphere_layers
# self.layers_checked = Manager().list()
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
self.memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
self.memcache_client = None
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
# @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics
# Log warning
logger.warn('warning :: parent or current process dead')
exit(0)
"""
These are the ionosphere mysql functions used to surface and input
ionosphere data for timeseries.
"""
def mysql_insert(self, insert):
"""
Insert data into mysql table
:param insert: the insert string
:type insert: str
:return: int
:rtype: int or boolean
- **Example usage**::
query = 'insert into host (host) VALUES (\'this_host\')'
result = self.mysql_insert(query)
.. note::
- If the MySQL query fails a boolean will be returned not a tuple
* ``False``
* ``None``
"""
try:
cnx = mysql.connector.connect(**config)
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to mysql')
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('error :: failed to connect to mysql')
raise
if cnx:
try:
cursor = cnx.cursor()
cursor.execute(insert)
inserted_id = cursor.lastrowid
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
return inserted_id
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('Failed to insert record')
cnx.close()
raise
else:
cnx.close()
return False
return False
def purge_old_data_dirs(self, dir_path, older_than):
time_now = time()
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('Cleaning old training data from %s older than %s seconds' % (
dir_path, str(older_than)))
else:
logger.info('IONOSPHERE_MANAGE_PURGE set to False managing ionosphere.training_data only, not purging')
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# If training_data is not purged and contains the correct training_data
# files, add it to the list to be added to the Redis set
training_data_list = []
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
last_log_time = int(time_now)
try:
for path, folders, files in os.walk(dir_path):
for folder in folders[:]:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('still purging')
else:
logger.info('still managing ionosphere.training_data')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
folder_path = os.path.join(path, folder)
# Only timestamped directories are removed
if re.match('\d{10}', folder):
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: matched - %s' % folder_path)
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
if (time_now - os.path.getmtime(folder_path)) > older_than:
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
if IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
for rpath, rfolders, rfiles in os.walk(folder_path):
for rfolder in rfolders[:]:
current_folder = os.path.join(rpath, rfolder)
for rrpath, rrfolders, rrfiles in os.walk(current_folder):
move_files = False
training_files_dirs = []
if len(rrfiles) > 0:
for rfile in rrfiles:
for include_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if include_namespace in rfile:
move_files = True
if move_files:
training_files_dirs.append(rrpath)
if training_files_dirs:
try:
dest_path = rrpath.replace(dir_path, IONOSPHERE_HISTORICAL_DATA_FOLDER)
if not os.path.exists(dest_path):
mkdir_p(dest_path)
training_files = []
for training_files_dir in training_files_dirs:
training_files = os.listdir(training_files_dir)
for f in training_files:
src_file = '%s/%s' % (training_files_dir, f)
dest_file = '%s/%s' % (dest_path, f)
shutil_move(src_file, dest_file)
files_moved = True
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to move files from %s to %s' % (current_folder, IONOSPHERE_HISTORICAL_DATA_FOLDER))
files_moved = False
if files_moved:
try:
rmtree(rrpath)
logger.info('removed - %s as files were moved to %s' % (rrpath, dest_path))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to rmtree %s' % rrpath)
try:
rmtree(folder_path)
logger.info('removed - %s' % folder_path)
except:
logger.error('error :: failed to rmtree %s' % folder_path)
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
except:
logger.info(traceback.format_exc())
logger.error('error :: purge_old_data_dirs - os.walk')
# @added 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('cleaned old training data')
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declare training_data_instances even if no training_data_list exists
# as it can be appended to by the historical training data
training_data_instances = []
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
if training_data_list:
training_data_instances = []
for training_data_dir in training_data_list:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(training_data_dir):
# @modified 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Wrapped in try and except
try:
add_folder = False
metric = None
timestamp = None
if files:
add_folder = False
metric = None
timestamp = None
# @added 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declared these first for all
metric_file = None
metric_file_path = None
if '/learn/' in path:
# @modified 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# metric_file = None
# metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
if ifile.startswith('adtk_'):
continue
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
elif ifile.endswith('.fp.created.txt'):
continue
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
elif ifile.startswith('adtk_'):
continue
elif ifile == 'data.txt':
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine and add resolution
resolution_seconds = settings.FULL_DURATION
for ifile in files:
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
if ifile.startswith('adtk_'):
continue
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# training_data_instances.append([metric, timestamp])
training_data_instances.append([metric, timestamp, resolution_seconds])
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# If the IONOSPHERE_HISTORICAL_DATA_FOLDER dir exist iterate it and
# and historical training data to the list.
if os.path.exists(IONOSPHERE_HISTORICAL_DATA_FOLDER) and IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
historical_training_data_added = 0
if training_data_instances:
training_data_count = len(training_data_instances)
logger.info('There are %s training_data instances before iterating histroical training data' % (str(training_data_count)))
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(IONOSPHERE_HISTORICAL_DATA_FOLDER):
try:
add_folder = False
metric = None
timestamp = None
historical_metric_data = False
if files:
for historical_metric_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if historical_metric_data:
continue
for ifile in files:
if historical_metric_namespace in ifile:
historical_metric_data = True
break
if historical_metric_data:
add_folder = False
metric = None
timestamp = None
if '/learn/' in path:
metric_file = None
metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
if ifile.endswith('.fp.created.txt'):
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
resolution_seconds = settings.FULL_DURATION
for ifile in files:
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
training_data_instances.append([metric, timestamp, resolution_seconds])
historical_training_data_added += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
logger.info('added %s historical training data instances' % (str(historical_training_data_added)))
if training_data_instances:
training_data_count = len(training_data_instances)
redis_set = 'ionosphere.training_data.new'
logger.info('creating Redis set %s with %s training_data instances' % (redis_set, str(training_data_count)))
try:
# Delete it if it exists and was not renamed for some reason
self.redis_conn.delete(redis_set)
logger.info(
'deleted Redis set - %s' % (redis_set))
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# for metric, timestamp in training_data_instances:
for metric, timestamp, resolution_seconds in training_data_instances:
try:
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# data = [metric, int(timestamp)]
data = [metric, int(timestamp), resolution_seconds]
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to %s Redis set' % (str(data), redis_set))
try:
# Rename works to overwrite existing key fine
# and ... https://redis.io/commands/rename
# > when this happens RENAME executes an implicit DEL operation, so if the
# > deleted key contains a very big value it may cause high latency even if RENAME
# > itself is usually a constant-time operation.
# Does not apply, not as it is not MASSIVE set
self.redis_conn.rename('ionosphere.training_data.new', 'ionosphere.training_data')
logger.info('replaced Redis ionosphere.training_data via a rename of ionosphere.training_data.new')
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to rename ionosphere.training_data.new to ionosphere.training_data')
last_purge_ts = int(time())
try:
self.redis_conn.setex(last_purge_key, 1800, last_purge_ts)
logger.info('updated Redis key for %s' % last_purge_key)
except:
logger.error('error :: failed to update Redis key for %s' % last_purge_key)
backup_purge_ts_file = '%s/last_purge_ts.txt' % (settings.IONOSPHERE_DATA_FOLDER)
try:
write_data_to_file(skyline_app, backup_purge_ts_file, 'w', last_purge_ts)
logger.info('updated the backup_purge_ts_file with %s' % str(last_purge_ts))
except:
logger.error('error :: failed to update the backup_purge_ts_file - %s' % backup_purge_ts_file)
return
def remove_metric_check_file(self, metric_check_file):
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
def manage_ionosphere_unique_metrics(self):
"""
- Create a Redis set of all Ionosphere enabled metrics.
- Manage the ionosphere.untrainable_metrics set, removing items when
they 'expire'
:param i: python process id
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
log_msg = 'error :: failed to get MySQL engine for manage_ionosphere_unique_metrics'
logger.error('%s' % log_msg)
return None, log_msg, trace
ionosphere_unique_metrics_count = 0
redis_ionosphere_unique_metrics = None
ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
manage_ionosphere_unique_metrics = True
manage_ionosphere_unique_metrics_key = []
try:
manage_ionosphere_unique_metrics_key = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
if LOCAL_DEBUG:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics key: %s' % str(e))
if manage_ionosphere_unique_metrics_key is not None:
manage_ionosphere_unique_metrics = False
logger.info('getting MySQL engine for ionosphere_enabled_metrics')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for ionosphere_enabled_metrics')
return False
if not engine:
logger.error('error :: MySQL engine not obtained for ionosphere_enabled_metrics')
return False
# Determine the metrics that have ionosphere_enabled
# @added 20170103 - Task #1658: Patterning Skyline Ionosphere
# TODO: We need 2 sets not just ionosphere.unique_metrics otherwise
# if a metric is switch from Analyzer to Mirage will send all
# matched anomalies to Ionosphere even if there is no features
# profile at the specified duration.
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
# @modified 20170108 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Yes those ^^ are needed, MySQL join?
ionosphere_enabled_metrics = []
ionosphere_metrics_count = 0
query_ok = False
try:
stmt = 'select metric from metrics where ionosphere_enabled=1'
connection = engine.connect()
for row in engine.execute(stmt):
metric_basename = row['metric']
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric_basename))
ionosphere_enabled_metrics.append(metric_name)
connection.close()
query_ok = True
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled metrics from the DB to manage ionosphere.unique_metrics Redis set')
ionosphere_metrics_count = len(ionosphere_enabled_metrics)
logger.info('db has %s ionosphere_enabled metrics' % (str(ionosphere_metrics_count)))
# @added 20190528 - Branch #3002: docker
if ionosphere_metrics_count == 0:
ionosphere_enabled_metrics = ['none']
if manage_ionosphere_unique_metrics:
# Testing the query was fine and Ionosphere metrics can go to 0 if
# all were disabled
if query_ok:
manage_ionosphere_unique_metrics = True
else:
manage_ionosphere_unique_metrics = False
if manage_ionosphere_unique_metrics:
for metric_name in ionosphere_enabled_metrics:
try:
self.redis_conn.sadd('ionosphere.new_unique_metrics', metric_name)
# logger.info('added %s to ionosphere.new_unique_metrics Redis set' % metric_name)
except:
logger.error(traceback.format_exc())
logger.info('error :: failed to add %s to ionosphere.new_unique_metrics Redis set' % metric_name)
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
try:
logger.info('replacing Redis ionosphere.unique_metrics via rename of ionosphere.new_unique_metrics')
self.redis_conn.rename('ionosphere.new_unique_metrics', 'ionosphere.unique_metrics')
manage_ionosphere_unique_metrics = False
ionosphere_unique_metrics = []
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
else:
logger.error('error :: could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
redis_ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('the new Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Manage ionosphere_untrainable_metrics Redis set
ionosphere_untrainable_metrics = []
ionosphere_untrainable_metrics_redis_set = 'ionosphere.untrainable_metrics'
try:
ionosphere_untrainable_metrics = list(self.redis_conn_decoded.smembers(ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the ionosphere.untrainable_metrics set from Redis')
if ionosphere_untrainable_metrics:
ionosphere_untrainable_metrics_check_time = int(time())
for ionosphere_untrainable_metric_str in ionosphere_untrainable_metrics:
try:
ionosphere_untrainable_metric = literal_eval(ionosphere_untrainable_metric_str)
ium_remove_after_timestamp = int(ionosphere_untrainable_metric[6])
if ionosphere_untrainable_metrics_check_time >= ium_remove_after_timestamp:
try:
self.redis_conn.srem(ionosphere_untrainable_metrics_redis_set, str(ionosphere_untrainable_metric))
logger.info('removed item - %s - from Redis set - %s' % (str(ionosphere_untrainable_metric), ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove item list from Redis set - %s' % ionosphere_untrainable_metrics_redis_set)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to manage Redis set %s' % ionosphere_untrainable_metrics_redis_set)
return True
# @added 20161230 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
int_keys = [
'from_timestamp', 'metric_timestamp', 'added_at', 'full_duration',
'ionosphere_parent_id']
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
array_keys = ['triggered_algorithms', 'algorithms', 'algorithms_run']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the spawn_learn_process after determining to is not fit to bolt learn
# inside of ionosphere.py in its entirety, no point in more conditional nesting
# and bulking up ionosphere.py with more learn parameter to spin_process etc
# ionosphere.py works, as good as it gets, so extended with learn.py. This uses
# the same no memory leak pattern that was adopted for smtp_alerts.
def spawn_learn_process(self, i, timestamp):
"""
Spawn a process to learn.
This is used for Ionosphere to learn if anomalous metrics remain
anomalous over time, as the resolution decreases. It follows the
multiprocessing methodology the was introduced in Analyzer and Mirage
in the context of the process objects being cleared down and the learn
processes cannot create memory leaks as the process always terminates or
is terminated this prevents any memory leaks in the parent.
"""
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# learn(timestamp)
ionosphere_learn(timestamp)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
def process_ionosphere_echo(self, i, metric_check_file):
"""
Spawn a process_ionosphere_echo check to create features profiles at
settings.FULL_DURATION for Mirage metrics
:param i: python process id
:param metric_check_file: full path to the metric check file
:type i: object
:type metric_check_file: str
:return: boolean
:rtype: boolean
"""
try:
# Load and validate metric variables
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: process_ionosphere_echo :: failed to load metric variables from check file - %s' % (metric_check_file))
return
added_by = None
try:
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: process_ionosphere_echo failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
return
if added_by != 'mirage':
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow to be added by webapp
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: metric added_by %s OK' % added_by)
else:
logger.info('process_ionosphere_echo :: only mirage metrics are processed not metrics added_by %s' % added_by)
return
metric = None
try:
# metric_vars.metric
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: process_ionosphere_echo failed to load metric variable from check file - %s' % (metric_check_file))
return
# @added 20190413 - Feature #2484: FULL_DURATION feature profiles
# Only process if it is an ionosphere enabled metric
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
ionosphere_unique_metrics = []
if ionosphere_unique_metrics:
# @modified 20190413 - Bug #2942: process_ionosphere_echo metric mismatch
# Feature #2484: FULL_DURATION feature profiles
# Matching bug for not in list comprehension it must be an absolute
# match
# if not metric in ionosphere_unique_metrics:
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric))
# @modified 20190522: Task #3034: Reduce multiprocessing Manager list usage
# if not metric_name in ionosphere_unique_metrics:
if metric_name not in ionosphere_unique_metrics:
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow metrics added by webapp to skip this check as they may
# be new ionosphere metrics and not be in the ionosphere.unique_metrics
# set yet
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: %s is not in ionosphere.unique_metrics but added by webapp so possibly a new metric' % metric)
else:
logger.info('process_ionosphere_echo :: only ionosphere enabled metrics are processed, skipping %s' % metric)
return
full_duration = None
try:
# metric_vars.full_duration
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: process_ionosphere_echo failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
return
logger.info('process_ionosphere_echo :: processing - %s' % (metric))
ionosphere_echo(metric, full_duration)
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added the ionosphere_busy parameter
# def spin_process(self, i, metric_check_file):
def spin_process(self, i, metric_check_file, ionosphere_busy):
"""
Assign an anomalous metric to check against features profiles.
:param i: python process id
:param metric_check_file: full path to the metric check file
:param ionosphere_busy: whether to Ionosphere manage and alternate
between normal Ionosphere and echo analysis
:type i: object
:type metric_check_file: str
:type ionosphere_busy: boolen
:return: int
:rtype: int or boolean
"""
dev_null = None
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = 'error :: failed to get MySQL engine in spin_process'
logger.error('error :: failed to get MySQL engine in spin_process')
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
def remove_waterfall_alert(added_by, metric_timestamp, base_name):
redis_waterfall_alert_set = '%s.waterfall_alerts.sent_to_ionosphere' % added_by
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_waterfall_alert_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_timestamp):
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is newer than an existing mirage
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item with older timestamp from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
return
# @added 20200908 - Feature #3734: waterfall alerts
# Added a common return_to_sender_to_alert function
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
def return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run):
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration, algorithms_run]
try:
self.redis_conn.setex(cache_key, 300, str(cache_key_value))
logger.info('added Redis alert key - %s - %s' % (
cache_key, str(cache_key_value)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms), str(full_duration), str(algorithms_run)))
return
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous. This provides
# data for /panorama?not_anomalous and /panorama?not_anomalous_metric
# method which are used for plots in the webapp and json response.
# The ionosphere.panorama.not_anomalous_metrics Redis hash is managed in
# analyzer/metrics_manager
def add_not_anomalous_to_redis_hash(base_name, timestamp, value, full_duration):
redis_hash = 'ionosphere.panorama.not_anomalous_metrics'
try:
data = {
base_name: {
'timestamp': timestamp,
'value': value,
'hours_to_resolve': int(full_duration / 3600),
}
}
self.redis_conn.hset(redis_hash, time(), str(data))
logger.info('added entry to the %s Redis hash' % redis_hash)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis hash %s - %s' % (
str(data), str(redis_hash), e))
child_process_pid = os.getpid()
logger.info('child_process_pid - %s' % str(child_process_pid))
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
engine = None
anomalous_timeseries = False
dev_null = None
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: failed_check_file - %s' % failed_check_file)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# From batch_processing metrics the learn check is being added and
# removed as the learn check for batch metrics happens immediately as
# the learn after duration can have passed. To the check file needs to
# be loaded to determine if was added by ionosphere_learn before the
# check is just removed.
removed_check_file_work_done = False
# @added 20170307 - Feature #1960: ionosphere_layers - ionosphere_check_cache_key
# This Redis cache key check was added to prevent Ionosphere from
# running riot on checks if for some reason the check_file is not
# removed which happens if some exception is not handled as found out
# again during yesterday's development of run_layer_algorithms. It was
# a good reminder of how fast Skyline can iterate.
ionosphere_check_cache_key = 'ionosphere.check.%s' % check_file_name
check_done = False
try:
check_done = self.redis_conn.get(ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('check done check - no check cache key - %s' % ionosphere_check_cache_key)
else:
# @modified 20181113 - Task #2680: Remove Ionosphere check files is key exists
# This was here for initially debugging, no longer needed
# logger.error('error :: a check cache key exists - %s' % ionosphere_check_cache_key)
# logger.error('error :: failing check to prevent multiple iterations over this check')
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
logger.info('a check cache key exists - %s' % (ionosphere_check_cache_key))
# @modified 20200807 - Feature #3480: batch_processing
# logger.info('to prevent multiple iterations over this check removing %s' % (
logger.info('to prevent multiple iterations over this check it will be removed if not added by ionosphere_learn - %s' % (
str(metric_check_file)))
# self.remove_metric_check_file(str(metric_check_file))
# return
# @added 20200807 - Feature #3480: batch_processing
removed_check_file_work_done = True
try:
check_process_start = int(time())
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Added cache_key_value
# self.redis_conn.setex(
# ionosphere_check_cache_key, 300, [check_process_start])
self.redis_conn.setex(
ionosphere_check_cache_key, 300, check_process_start)
logger.info(
'added Redis check key - %s' % (ionosphere_check_cache_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis check key - %s' % (ionosphere_check_cache_key))
logger.error('error :: failing check to prevent multiple iterations over this check')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
anomalous_value = value
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - value - %s' % str(value))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
value = None
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = 'from_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - from_timestamp - %s' % str(from_timestamp))
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric_timestamp - %s' % str(metric_timestamp))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
metric_timestamp = None
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = 'algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms - %s' % str(algorithms))
except:
logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file))
algorithms = 'all'
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms))
except:
logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file))
triggered_algorithms = 'all'
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
try:
key = 'algorithms_run'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms_run = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms_run - %s' % str(algorithms_run))
except:
logger.error('error :: failed to read algorithms_run variable from check file setting to all - %s' % (metric_check_file))
if added_by == 'mirage':
algorithms_run = settings.MIRAGE_ALGORITHMS
else:
algorithms_run = settings.ALGORITHMS
# @added 20170117 - Feature #1854: Ionosphere learn - generations
if str(added_by) == 'ionosphere_learn':
logger.info('debug :: metric variable - added_by - %s' % added_by)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('this check was added by ionosphere_learn so not removing check even though a check done Redis key exists')
removed_check_file_work_done = False
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('a check cache key exists and the check was not added by ionosphere_learn - %s' % (ionosphere_check_cache_key))
logger.info('to prevent multiple iterations over this check removing %s' % (
str(metric_check_file)))
# @added 20200908 - Feature #3734: waterfall alerts
# Remove waterfall alert item
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If added_by is analyzer_batch, log and change to analyzer so that
# Ionosphere routes any alerts back to anaylzer
if str(added_by) == 'analyzer_batch':
logger.info('metric variable - added_by - %s, now switching to analyzer to route alerts to anlayzer, thanks analyzer_batch' % added_by)
added_by = 'analzyer'
logger.info('metric variable - added_by - %s, analyzer_batch checks will have alerts routed to analyzer' % added_by)
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = 'added_at'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_at - %s' % str(added_at))
except:
logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file))
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = None
ionosphere_parent_id_determined = False
try:
key = 'ionosphere_parent_id'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
ionosphere_parent_id = int(value_list[0])
ionosphere_parent_id_determined = True
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - ionosphere_parent_id - %s' % str(ionosphere_parent_id))
except:
logger.error('error :: failed to read ionosphere_parent_id variable from check file - %s' % (metric_check_file))
ionosphere_parent_id = None
if not ionosphere_parent_id_determined:
logger.error('error :: failed to determine ionosphere_parent_id variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @modified 20170116 - Feature #1854: Ionosphere learn
# Do not check the cache key or anomaly age if added by ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20170101 - Feature #1830: Ionosphere alerts
# Remove check file if an alert key exists
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
logger.info('no alert cache key - %s' % cache_key)
else:
logger.info('removing check - alert cache key exists - %s' % cache_key)
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Do not evaluate batch metrics against max_age_seconds
analyzer_batch_anomaly = None
if BATCH_PROCESSING:
# Is this a analyzer_batch related anomaly?
analyzer_batch_anomaly = None
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(metric_timestamp), metric)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('batch processing - not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if analyzer_batch_anomaly:
logger.info('batch anomaly not checking max_age_seconds for %s' % analyzer_batch_metric_anomaly_key)
else:
# @modified 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Wrapped in if analyzer_batch_anomaly
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
'Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % (
metric, str(anomaly_age), str(max_age_seconds)))
with open(metric_check_file, 'rt') as fr:
metric_check_file_contents = fr.readlines()
logger.info(
'debug :: metric check file contents\n%s' % (str(metric_check_file_contents)))
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info('processing check_file for ionosphere_learn - %s' % str(metric_check_file))
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
metrics_id = None
metric_ionosphere_enabled = None
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it. Here we go! Learn!
metrics_db_object = None
# @modified 20190325 - Feature #2484: FULL_DURATION feature profiles
# Moved get_metrics_db_object block to common_functions.py
try:
metrics_db_object = get_metrics_db_object(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics_db_object from get_metrics_db_object for %s' % base_name)
if metrics_db_object:
metrics_id = None
try:
metrics_id = int(metrics_db_object['id'])
except:
# @added 20190509 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# Added a traceback here to debug an issue
logger.error(traceback.format_exc())
logger.error('error :: could not determine id from metrics_db_object for %s' % base_name)
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
if metrics_id:
# @modified 20190510 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# metric_ionosphere_enabled = int(metrics_db_object['ionosphere_enabled'])
metric_ionosphere_enabled = None
try:
metric_ionosphere_enabled = metrics_db_object['ionosphere_enabled']
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled from metrics_db_object for %s' % base_name)
if metric_ionosphere_enabled is not None:
training_metric = False
else:
training_metric = True
if metric_ionosphere_enabled == 1:
training_metric = False
if metric_ionosphere_enabled == 0:
training_metric = True
else:
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
logger.error('error :: could not determine metric id from memcache or metrics tables for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
feedback_metric = False
if ionosphere_busy:
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
break
if feedback_metric:
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
logger.info('feedback metric identified adding Redis key with 600 TTL - %s' % cache_key)
try:
self.redis_conn.setex(cache_key, 600, int(time()))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s key to Redis' % (
str(cache_key)))
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is a match.
metric_max_generations = None
if added_by == 'ionosphere_learn':
try:
metric_max_generations = int(metrics_db_object['max_generations'])
logger.info('determing max_generations for ionosphere_learn check - %s - %s' % (str(metric_max_generations), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error ::ionosphere_learn check could not determine the metric max_generations from the metrics_db_object for %s' % base_name)
if not metric_max_generations:
logger.error('error ::ionosphere_learn check cannot continue without max_generations for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis ionosphere.ionosphere_non_smtp_alerter_metrics list is created here to
# replace the self.ionosphere_non_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere.ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only process smtp_alerter_metrics
if training_metric:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name in self.ionosphere_non_smtp_alerter_metrics:
if base_name in ionosphere_non_smtp_alerter_metrics:
# @modified 20191114 - Feature #: forward_alert
# Allow ionosphere to check any metrics that have an alerter other than smtp set, apart from syslog
# logger.error('error :: Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s' % (base_name))
logger.info('Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s which is a training_metric' % (base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.training_metrics.append(base_name)
redis_set = 'ionosphere.training_metrics'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info(
'ionosphere_enabled is %s for metric id %s - %s' % (
str(metric_ionosphere_enabled), str(metrics_id),
base_name))
if training_metric:
logger.info('Ionosphere is not enabled on %s' % (base_name))
else:
logger.info('Ionosphere is enabled on %s' % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace('.', '/')
# @modified 20170115 - Feature #1854: Ionosphere learn
# Allowing the bifurcation of the metric_training_data_dir based on
# whether added_by is ionosphere_learn or not, this allows Ionosphere to
# be brought online to start evaluating the learn features profiles at
# 30 days or whatever the learn_full_duration_days is for the metric
# that is being automatically learnt uses these fuller duration features
# to determine if a new training data set has been created for an
# ionosphere_enabled metric. Here Ionosphere starts to try and get
# clever, let us hope not too clever, but this is where the
# max_percent_diff_from_origin and max_generations comes in. So ...
# here we go, a really "Crazy feedback loop" @astanway :) I would say
# that this is going to be way more useful than the last referenced one
# in https://github.com/etsy/skyline/pull/90#r13592782 ;) This is it
# 20170115202500 UTC Ionosphere really is now really going to begin.
# Here we go! Learn!
# metric_training_data_dir = '%s/%s/%s' % (
# settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
# metric_timeseries_dir)
if added_by != 'ionosphere_learn':
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
metric_timeseries_dir)
else:
# Here we go! Learn you bugger! SUCH A BIG THANKS TO tsfresh!
# And flowjob and The White Stripes, @matzhouse, her and the Dude.
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_LEARN_FOLDER, metric_timestamp,
metric_timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info('training data ts json available - %s' % (anomaly_json))
else:
logger.error('error :: training data ts json was not found - %s' % (anomaly_json))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info('training metric - %s' % (base_name))
redis_anomaly_json = False
if added_by == 'mirage':
logger.info('checking training data Redis json is available')
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = '%s/%s.mirage.redis.%sh.json' % (metric_training_data_dir, base_name, full_duration_hours)
if os.path.isfile(redis_anomaly_json):
logger.info('training data Redis full duration ts json available - %s' % (redis_anomaly_json))
else:
logger.info('no training data Redis full duration json was not found - %s' % (redis_anomaly_json))
except:
logger.error(traceback.format_exc())
logger.error('error :: training data Redis full duration json was not found - %s' % (redis_anomaly_json))
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20170101 - Feature #1836: ionosphere - local features profiles disk cache
# Cache fp ids for 300 seconds?
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
logger.info('getting MySQL engine')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to get fp_ids')
if not engine:
logger.error('error :: engine not obtained to get fp_ids')
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_table meta for %s' % base_name)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids so that we can handle multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# After the features profile evaluations this fps_db_object will
# be used to determine what settings.FULL_DURATION features
# profiles need to be created for ionosphere_echo
fps_db_object = None
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# Set both fp_count_with_echo and fp_count to 0 initially so that
# if the are echo fps, then the database can be updated with the
# fp_count_with_echo value for fp_count in the ionosphere_matched
# table
fp_count = 0
fp_count_with_echo = 0
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
# @modified 20210429 - Feature #4014: Ionosphere - inference
# Task #2446: Optimize Ionosphere
# For efficiency order by the last fp matched, if there are
# multipe features profiles and one matches chances are the
# that the metric may be sent through for multiple time over
# a period. When a features profilee matches, chances are it
# will match again multiple times for that incident period.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.last_matched))
result = connection.execute(stmt)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# To be used for ionosphere_echo
fps_db_object = [{column: value for column, value in rowproxy.items()} for rowproxy in result]
# for row in result:
for row in fps_db_object:
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add the fp_layers_id if > 0
# fp_layers_ids.append(fp_layers_id)
if fp_layers_id > 0:
if fp_layers_id not in fp_layers_ids:
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
# @modified 20200717 - Bug #3382: Prevent ionosphere.learn loop edge cases
# Added the fp full_duration for clarity sake
# logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
logger.info('not using fp id %s of full_duration %s as does not match full_duration %s - %s' % (
str(fp_id), str(row['full_duration']), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
# @added 20200908 - Feature #3734: waterfall alerts
# If any layers are found but any fps for analysis have been
# discarded because of echo rate limiting or they do not match
# the fulll duration, still check any enabed layers
if fp_layers_count:
logger.info('there are %s fp layers for %s' % (str(fp_layers_count), base_name))
fp_ids_found = True
else:
fp_ids_found = True
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# If there are no features profiles enabled for the metric
# send it back to the source to alert and update the DB with
# ionosphere_enabled=0, it has been willy nillied, all its
# fps have been disabled. This has the ramification that
# any layers the metric has will be disabled as well
if added_by != 'ionosphere_learn':
logger.info('%s has been willy nillied, all its features profiles have been disabled, but it is still flagged as ionosphere_enabled' % (base_name))
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(cache_key, 300, str(cache_key_value))
# logger.info('added Redis alert key - %s - %s' % (
# cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# Update DB as to the fact that the metric is an ionosphere
# metric, all its fps have been disabled, it has been willy
# nillied
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
connection = engine.connect()
connection.execute(
metrics_table.update(
metrics_table.c.id == metrics_id).
values(ionosphere_enabled=0))
connection.close()
logger.info('updated %s to ionosphere_enabled=0' % (
base_name))
logger.info('%s has been unwilly nillied' % (base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20200930 - Feature #3734: waterfall alerts
# Send to Panorama as Mirage and Analyzer will not.
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
matched_motifs = {}
fps_checked_for_motifs = []
# @modified 20210426 - Feature #4014: Ionosphere - inference
# Do not run inference on ionosphere_learn jobs
if not training_metric and not added_by == 'ionosphere_learn':
if IONOSPHERE_INFERENCE_MOTIFS_ENABLED and fp_ids:
try:
logger.info('calling inference to find matching similar motif')
start_inference = timer()
matched_motifs, fps_checked_for_motifs = ionosphere_motif_inference(base_name, metric_timestamp)
end_inference = timer()
logger.info('inference found %s matching similar motifs, checked %s fps in %6f seconds' % (
str(len(matched_motifs)), str(len(fps_checked_for_motifs)),
(end_inference - start_inference)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed calling ionosphere_motif_inference - %s' % e)
matched_motifs = {}
fps_checked_for_motifs = []
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Update the motif related columns of all the ionosphere fps
# that where checked
if len(fps_checked_for_motifs) > 0:
motif_checked_timestamp = int(time())
motif_checks_updated_count = 0
for fp_checked_for_motifs in fps_checked_for_motifs:
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_checked_for_motifs).
values(motif_checked_count=ionosphere_table.c.motif_checked_count + 1,
motif_last_checked=motif_checked_timestamp))
connection.close()
motif_checks_updated_count += 1
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update motif_checked_count and motif_last_checked for %s - %s' % (str(fp_checked_for_motifs), e))
logger.info('updated the motif_checked_count column and the motif_last_checked column to %s in ionosphere for %s fps' % (
str(motif_checked_timestamp), str(motif_checks_updated_count)))
if matched_motifs:
# Here we should update DB, clean up and return before incurring any
# features profiles calculations (unless in testing mode)
ordered_matched_motifs = []
matching_motif = []
for motif_id in list(matched_motifs.keys()):
try:
motif_metric_id = matched_motifs[motif_id]['metric_id']
motif_fp_id = matched_motifs[motif_id]['fp_id']
motif_fp_index = matched_motifs[motif_id]['index']
motif_dist = matched_motifs[motif_id]['distance']
motif_size = matched_motifs[motif_id]['size']
motif_matched_timestamp = matched_motifs[motif_id]['timestamp']
match_type_id = matched_motifs[motif_id]['type_id']
match_type = matched_motifs[motif_id]['type']
motif_sequence = matched_motifs[motif_id]['motif_sequence']
# @added 20210423 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
try:
motif_area = matched_motifs[motif_id]['motif_area']
except Exception as e:
dev_null = e
motif_area = 0
try:
fp_motif_area = matched_motifs[motif_id]['fp_motif_area']
except Exception as e:
dev_null = e
fp_motif_area = 0
# @added 20210427 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
try:
area_percent_diff = matched_motifs[motif_id]['area_percent_diff']
except Exception as e:
dev_null = e
area_percent_diff = 0
# @added 20210428 - Feature #4014: Ionosphere - inference
# Add time taken and fps checked
try:
fps_checked = matched_motifs[motif_id]['fps_checked']
except Exception as e:
dev_null = e
fps_checked = 0
try:
runtime = matched_motifs[motif_id]['runtime']
except Exception as e:
dev_null = e
runtime = 0
ordered_matched_motifs.append([motif_metric_id, motif_fp_id, motif_fp_index, motif_dist, motif_size, motif_matched_timestamp, match_type_id, match_type, motif_sequence, motif_area, fp_motif_area, area_percent_diff, fps_checked, runtime])
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine ordered_matched_motifs item')
# Sort by the best dist
if ordered_matched_motifs:
sorted_matched_motifs = sorted(ordered_matched_motifs, key=lambda x: x[3])
matching_motif = sorted_matched_motifs[0]
if matching_motif:
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
redis_set = 'ionosphere.features_profiles_checked'
data = str(matching_motif[1])
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('motifs_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# Add all motif_matches to the DB
try:
motifs_matched_table, log_msg, trace = motifs_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('motifs_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get motifs_matched_table meta for %s' % base_name)
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Store the not anomalous motifs
try:
not_anomalous_motifs_table, log_msg, trace = not_anomalous_motifs_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('not_anomalous_motifs_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get not_anomalous_motifs_table meta for %s' % base_name)
new_motifs_matched_ids = []
for matched_motif in ordered_matched_motifs:
primary_match = 0
if matching_motif == matched_motif:
primary_match = 1
# Only a single ionosphere_matched record is created for
# the most similar motif (primary_match=1) HOWEVER
# DO NOTE that EVERY motif match that is surfaced is
# in a run is recorded in the motifs_matched table.
# ordered_matched_motifs.append([motif_metric_id, motif_fp_id, motif_fp_index, motif_dist, motif_size, motif_matched_timestamp, match_type_id, match_type, motif_sequence, motif_area, fp_motif_area, area_percent_diff])
try:
connection = engine.connect()
ins = motifs_matched_table.insert().values(
metric_id=int(matched_motif[0]),
fp_id=int(matched_motif[1]),
metric_timestamp=int(matched_motif[5]),
primary_match=primary_match,
index=int(matched_motif[2]),
size=int(matched_motif[4]),
distance=float(matched_motif[3]),
type_id=int(matched_motif[6]),
# @added 20210427 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
motif_area=float(matched_motif[9]),
fp_motif_area=float(matched_motif[10]),
area_percent_diff=float(matched_motif[11]),
# @added 20210428 - Feature #4014: Ionosphere - inference
# Add time taken and fps checked
fps_checked=int(matched_motif[12]),
runtime=float(matched_motif[13]))
result = connection.execute(ins)
connection.close()
new_motif_matched_id = result.inserted_primary_key[0]
new_motifs_matched_ids.append(new_motif_matched_id)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert motifs_matched record into DB: %s' % str(matched_motif))
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Store the not anomalous motifs
# @modified 20210419 - Feature #4014: Ionosphere - inference
# Only store motif data in the database if specifically
# enabled, inference.matched_motifs.dict file is always
# saved to the training_data dir
if new_motif_matched_id and IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS:
new_motif_sequence_ids = []
try:
connection = engine.connect()
for motif_sequence_timestamp, motif_sequence_value in matched_motif[8]:
try:
ins = not_anomalous_motifs_table.insert().values(
motif_id=int(new_motif_matched_id),
timestamp=int(motif_sequence_timestamp),
value=motif_sequence_value)
result = connection.execute(ins)
new_motif_sequence_id = result.inserted_primary_key[0]
new_motif_sequence_ids.append(new_motif_sequence_id)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert %s, %s into not_anomalous_motifs for matched_motif_id: %s' % (
str(motif_sequence_timestamp),
str(motif_sequence_value),
str(new_motif_matched_id)))
connection.close()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert timestamps and values for into not_anomalous_motifs table: %s' % (
str(new_motif_matched_id)))
logger.info('inserted %s new motif sequence records into the not_anomalous_motifs table for matched_motif_id: %s' % (
str(len(new_motif_sequence_ids)), str(new_motif_matched_id)))
# If in testing mode no ionosphere tables are updated
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
if matching_motif == matched_motif:
# Only a single ionosphere_matched record is created for
# the most similar motif (primary_match=1) HOWEVER
# DO NOTE that EVERY motif match that is surfaced is
# in a run is recorded in the motifs_matched table.
new_matched_id = 0
try:
connection = engine.connect()
ins = ionosphere_matched_table.insert().values(
fp_id=int(matching_motif[1]),
metric_timestamp=int(matching_motif[5]),
motifs_matched_id=int(new_motif_matched_id))
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
logger.info('new ionosphere_matched id: %s (for matched motif with matched_motif_id: %s' % (
str(new_matched_id), str(new_motif_matched_id)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not create ionosphere_matched record for fp id %s and motif match with id %s for matching_motif: %s' % (
str(fp_id), str(new_motif_matched_id),
str(matching_motif)))
# ONLY fp of the most similar motif match gets as having
# been checked and matched
if new_matched_id:
# Update motif_matched_count in ionosphere_table
motif_matched_timestamp = int(time())
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == matching_motif[1]).
values(motif_matched_count=ionosphere_table.c.motif_matched_count + 1,
motif_last_matched=motif_matched_timestamp))
connection.close()
logger.info('updated motif_matched_count and motif_last_matched for fp_id %s for dur to matched_motif_id: %s' % (
str(matching_motif[1]), str(new_matched_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update motif_matched_count and motif_last_matched for fp_id %s for dur to matched_motif_id: %s' % (
str(matching_motif[1]), str(new_matched_id)))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(matching_motif[1]))
logger.info('added matched fp_id %s - %s' % (
str(matching_motif[1]), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: adding motif matched fp_id %s - %s' % (
str(matching_motif[1]), profile_id_matched_file))
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# Continue with normal features profile matching if no motifs were matched
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
# @added 20200908 -
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
calculated_features = get_calculated_features(calculated_feature_file)
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('%s calculated features determined' % (str(len(calculated_feature_file))))
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = 0
layers_checked_count = 0
# @added 20190314 - Feature #2484: FULL_DURATION feature profiles
# Here we add the bifurcation to also create a features
# profile at FULL_DURATION for all Mirage metrics. With a
# view to increase the number of matches trained metric
# achieve by also allowing for the creation and comparing of
# the FULL_DURATION features profiles as well.
echo_check = False
echo_calculated_feature_file = False
echo_calculated_feature_file_found = False
echo_calculated_features = []
echo_fp_ids = []
echo_anomalous_timeseries = None
if added_by == 'mirage':
try:
echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
echo_enabled = False
if echo_enabled:
echo_check = True
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
# Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# In the batch processing context do not apply the alternation between
# normal Ionosphere Mirage features profile checks and Ionosphere echo
# features profile checks when ionosphere_busy is set to True as it
# results in false positives on batch processing metrics where one check
# matches and the next does not, then the next does.
batch_metric = False
if echo_check and BATCH_PROCESSING:
# Batch processing metric
try:
batch_metric = is_batch_metric(skyline_app, base_name)
except:
batch_metric = False
if batch_metric and ionosphere_busy:
ionosphere_busy = False
logger.info('batch processing metric, ionosphere_busy has been changed from True to False to prevent switching between Mirage and echo fps')
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 4 metric check files, alternate between normal
# Ionosphere Mirage features profile checks and Ionosphere echo features
# profile checks.
if echo_check:
if ionosphere_busy:
# Check the ionosphere_echo metric Redis keys to see which check
# to run, ionosphere or ionosphere_echo. If Ionosphere is busy,
# Ionosphere will alternate between normal Ionosphere features
# profiles (Mirage duration) and Ionosphere echo features
# profiles (FULL_DURATION) comparison.
echo_ionosphere_check_cache_key = 'ionosphere_echo.ionosphere.check.%s' % base_name
echo_ionosphere_check_key = False
try:
echo_ionosphere_check_key = self.redis_conn.get(echo_ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
echo_ionosphere_echo_check_cache_key = 'ionosphere_echo.echo.check.%s' % base_name
echo_ionosphere_echo_check_key = False
try:
echo_ionosphere_echo_check_key = self.redis_conn.get(echo_ionosphere_echo_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
create_ionosphere_echo_check_key = False
remove_ionosphere_echo_check_key = False
# If neither the ionosphere or the ionosphere_echo key exist do
# only check ionosphere
if not echo_ionosphere_check_key:
if not echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
# If the ionosphere_echo key exists only check ionosphere
if echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
# If ionosphere_echo key exists only check ionosphere
if echo_ionosphere_check_key:
echo_check = True
logger.info('ionosphere_busy - skipping the normal Mirage feature profiles checks as run last time and running ionosphere_echo checks this time')
# Remove the Mirage features profiles from the
fp_ids = []
logger.info('ionosphere_busy - removed %s Mirage feature profile ids from fp_ids' % str(fp_count))
create_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
if remove_ionosphere_echo_check_key:
try:
self.redis_conn.delete(remove_ionosphere_echo_check_key)
logger.info(
'deleted Redis check key - %s' % (remove_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to delete Redis check key - %s' % (remove_ionosphere_echo_check_key))
if create_ionosphere_echo_check_key:
try:
key_created_at = int(time())
self.redis_conn.setex(
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# create_ionosphere_echo_check_key, 300, [key_created_at])
create_ionosphere_echo_check_key, 300, key_created_at)
logger.info(
'created Redis check key - %s' % (create_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to create Redis check key - %s' % (create_ionosphere_echo_check_key))
if echo_check:
try:
if fps_db_object:
for row in fps_db_object:
# @added 20201009 - Bug #3782: Exclude disabled echo features profile
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
if int(row['full_duration']) == int(settings.FULL_DURATION):
fp_ids.append(int(row['id']))
echo_fp_ids.append(int(row['id']))
logger.info('appending ionosphere_echo fp id %s matched full_duration of %s - %s' % (str(row['id']), str(settings.FULL_DURATION), base_name))
fp_count_with_echo = len(fp_ids)
echo_fp_count = len(echo_fp_ids)
if echo_fp_count == 0:
echo_check = False
if echo_fp_count > 0:
logger.info('added an additional %s echo fp ids for %s' % (str(echo_fp_count), base_name))
logger.info('determined a total of %s fp ids (incl. echo) for %s' % (str(fp_count_with_echo), base_name))
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
else:
use_context = 'ionosphere_echo_check'
f_calc = None
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, use_context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
echo_calculated_features = []
if echo_calculated_feature_file_found:
try:
echo_calculated_features = get_calculated_features(echo_calculated_feature_file)
except:
# 20190412 - just for debug
logger.error(traceback.format_exc())
logger.error('error :: ionosphere_echo_check no echo_calculated_features were determined')
echo_calculated_features = False
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to process echo')
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If the Ionosphere features profile checks are approaching the
# ionosphere_max_runtime, skip the remaining checks.
time_now_check = int(time())
# Allow 5 seconds for layers checks to be done
max_runtime_tolereance = ionosphere_max_runtime - 5
running_for = time_now_check - check_process_start
if running_for >= max_runtime_tolereance:
logger.info('features profile checks have been running for %s seconds, the ionosphere_max_runtime is about to be breached, skipping remaining features profile checks' % str(running_for))
break
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
check_type = 'ionosphere'
if echo_check:
for echo_fp_id in echo_fp_ids:
if fp_id == echo_fp_id:
check_type = 'ionosphere_echo_check'
if check_type == 'ionosphere_echo_check':
if not echo_calculated_features:
continue
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager().list to reduce memory
# self.features_profiles_checked.append(fp_id)
redis_set = 'ionosphere.features_profiles_checked'
data = str(fp_id)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for feature_id and values from %s' % metric_fp_table)
if not engine:
logger.error('error :: engine not obtained for feature_id and values from %s' % metric_fp_table)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
# First check to determine if the fp_id has data in memcache
# before querying the database
fp_id_feature_values = None
if settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
if python_version == 2:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
else:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_feature_values_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_feature_values_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if fp_id_feature_values:
fp_features = literal_eval(fp_id_feature_values)
logger.info('using memcache %s key data' % fp_id_feature_values_key)
if not fp_features:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT feature_id, value FROM %s WHERE fp_id=%s' % (metric_fp_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row['feature_id'])
fp_value = float(row['value'])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info('determined %s features for fp_id %s' % (str(features_count), str(fp_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine feature_id, value from %s' % metric_fp_table)
if fp_features and settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
self.memcache_client.set(fp_id_feature_values_key, fp_features)
logger.info('populated memcache %s key' % fp_id_feature_values_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_feature_values_key)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added the calculated features sum for verification purposes
all_calc_features_sum_list = []
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_calculated_features = calculated_features
if check_type == 'ionosphere_echo_check':
use_calculated_features = echo_calculated_features
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
all_calc_features_sum_list.append(float(calc_value))
all_calc_features_sum = sum(all_calc_features_sum_list)
# Convert feature names in calculated_features to their id
logger.info('converting tsfresh feature names to Skyline feature ids')
calc_features_by_id = []
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(calc_value)])
# Determine what features each data has, extract only values for
# common features.
logger.info('determining common features')
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, calc_value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(calc_value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error('error :: mismatch in number of common features')
logger.error('error :: relevant_fp_feature_values_count - %s' % str(relevant_fp_feature_values_count))
logger.error('error :: relevant_calc_feature_values_count - %s' % str(relevant_calc_feature_values_count))
continue
else:
logger.info('comparing on %s common features' % str(relevant_fp_feature_values_count))
if relevant_fp_feature_values_count == 0:
logger.error('error :: relevant_fp_feature_values_count is zero')
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
'sum of the values of the %s common features in features profile - %s' % (
str(relevant_fp_feature_values_count), str(sum_fp_values)))
logger.info(
'sum of the values of the %s common features in the calculated features - %s' % (
str(relevant_calc_feature_values_count), str(sum_calc_values)))
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
percent_different = 100
# @modified 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
# Use the common function added
# sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
# try:
# calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
# percent_different = calc_percent_different[0]
# logger.info('percent_different between common features sums - %s' % str(percent_different))
# except:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to calculate percent_different')
# continue
try:
percent_different = get_percent_different(sum_fp_values, sum_calc_values, True)
logger.info('percent_different between common features sums - %s' % str(percent_different))
except Exception as e:
logger.error('error :: failed to calculate percent_different - %s' % e)
continue
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
# @modified 20170118 - Bug #1860: Debug learn not matched in ionosphere
# This broke it, no variable was interpolated
# logger.info('common features sums are almost equal, not anomalous' % str(relevant_fp_feature_values_count))
logger.info('common features sums are almost equal, not anomalous')
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info('updating checked details in db for %s' % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update checked details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update checked details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated checked_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update checked_count and last_checked for %s ' % str(fp_id))
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
use_percent_similar = float(settings.IONOSPHERE_ECHO_FEATURES_PERCENT_SIMILAR)
except:
use_percent_similar = 2.0
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < use_percent_similar:
not_anomalous = True
# log
logger.info('not anomalous - features profile match - %s' % base_name)
logger.info(
'calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check - not anomalous with fp id %s for %s' % (str(fp_id), base_name))
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
# Now if not matched use Min-Max scaling as per
# http://sebastianraschka.com/Articles/2014_about_feature_scaling.html#numpy
# Min-Max scale the fp time series z_ts_<metric_id> SELECT WHERE fp_id
# or from memcache to create minmax_fp_ts
# Min-Max scale the current time series to create minmax_anomalous_ts
# Create features profiles for minmax_fp_ts
# Create features profiles for minmax_anomalous_ts
try:
minmax_scaling_enabled = settings.IONOSPHERE_MINMAX_SCALING_ENABLED
except:
minmax_scaling_enabled = False
minmax_not_anomalous = False
minmax_check = False
minmax = 0
if not not_anomalous:
if minmax_scaling_enabled:
minmax_check = True
if added_by == 'ionosphere_learn' and minmax_check:
minmax_check = False
logger.info('ionosphere_learn job not minmax scaling')
if minmax_check:
logger.info('running minmax scaling')
# First check to determine if the z_ts_<mertic_id> for the fp
# has data in memcache before querying the database
metric_fp_ts_table = 'z_ts_%s' % str(metrics_id)
fp_id_metric_ts = []
if settings.MEMCACHE_ENABLED:
# @added 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Explicitly set the fp_id_metric_ts_object so it
# always exists to be evaluated
fp_id_metric_ts_object = None
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
if python_version == 2:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
else:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_metric_ts_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
if fp_id_metric_ts_object:
# @modified 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Wrapped in try and except
try:
fp_id_metric_ts = literal_eval(fp_id_metric_ts_object)
logger.info('used memcache %s key data to populate fp_id_metric_ts with %s data points' % (fp_id_metric_ts_key, str(len(fp_id_metric_ts))))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to literal_eval the fp_id_metric_ts_object in minmax_check')
fp_id_metric_ts = []
else:
logger.info('no memcache %s key data, will use database' % fp_id_metric_ts_key)
if not fp_id_metric_ts:
if LOCAL_DEBUG:
logger.debug('debug :: getting data from %s database table for fp id %s to populate the fp_id_metric_ts list' % (metric_fp_ts_table, str(fp_id)))
try:
stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % (metric_fp_ts_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_id_ts_timestamp = int(row['timestamp'])
fp_id_ts_value = float(row['value'])
fp_id_metric_ts.append([fp_id_ts_timestamp, fp_id_ts_value])
connection.close()
values_count = len(fp_id_metric_ts)
logger.info('determined %s values for the fp_id time series %s for %s' % (str(values_count), str(fp_id), str(base_name)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps and values from %s' % metric_fp_ts_table)
if fp_id_metric_ts and settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
self.memcache_client.set(fp_id_metric_ts_key, fp_id_metric_ts)
logger.info('populated memcache %s key' % fp_id_metric_ts_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
# Get anomalous time series
anomalous_ts_values_count = 0
if fp_id_metric_ts:
anomalous_timeseries_not_defined = True
try:
test_anomalous_timeseries = anomalous_timeseries
if len(test_anomalous_timeseries) > 0:
anomalous_timeseries_not_defined = False
except:
logger.info('anomalous_timeseries is not defined loading from anomaly json')
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_data_dir, base_name)
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere_echo_check':
anomaly_json = redis_anomaly_json
if not echo_anomalous_timeseries:
try:
with open((redis_anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
echo_anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(echo_anomalous_timeseries) > 0:
logger.info('echo_anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (redis_anomaly_json, str(len(echo_anomalous_timeseries))))
else:
logger.error('error :: echo_anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % redis_anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create echo_anomalous_timeseries from anomaly json %s' % redis_anomaly_json)
else:
logger.info('echo_anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(echo_anomalous_timeseries))))
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if anomalous_timeseries_not_defined:
if anomalous_timeseries_not_defined and check_type == 'ionosphere':
try:
with open((anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(anomalous_timeseries) > 0:
logger.info('anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (anomaly_json, str(len(anomalous_timeseries))))
else:
logger.error('error :: anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create anomalous_timeseries from anomaly json %s' % anomaly_json)
else:
if check_type == 'ionosphere':
logger.info('anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(anomalous_timeseries))))
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_anomalous_timeseries = anomalous_timeseries
if check_type == 'ionosphere_echo_check':
use_anomalous_timeseries = echo_anomalous_timeseries
anomalous_ts_values_count = len(use_anomalous_timeseries)
# @added 20180621 - Feature #2404: Ionosphere - fluid approximation
# Check ranges and only Min-Max scale if the 2 time series
# are similar in range
# @added 20180819 - Bug #2534: Ionosphere - fluid approximation - IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE on low ranges
# TODO
try:
range_tolerance = settings.IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE
except:
range_tolerance = 0.15
range_tolerance_percentage = range_tolerance * 100
check_range = False
range_similar = False
if fp_id_metric_ts:
if anomalous_ts_values_count > 0:
check_range = True
lower_range_similar = False
upper_range_similar = False
if check_range:
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
min_fp_value = min(minmax_fp_values)
max_fp_value = max(minmax_fp_values)
except:
min_fp_value = False
max_fp_value = False
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
min_anomalous_value = min(minmax_anomalous_values)
max_anomalous_value = max(minmax_anomalous_values)
except:
min_anomalous_value = False
max_anomalous_value = False
lower_range_not_same = True
try:
try:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_not_same = False
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
except:
lower_range_not_same = True
if min_fp_value and min_anomalous_value and lower_range_not_same:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
else:
lower_min_fp_value = int(min_fp_value - (min_fp_value * range_tolerance))
upper_min_fp_value = int(min_fp_value + (min_fp_value * range_tolerance))
if int(min_anomalous_value) in range(lower_min_fp_value, upper_min_fp_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(min_fp_value),
str(min_anomalous_value),
str(range_tolerance_percentage)))
if not lower_range_similar:
logger.info('lower range of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(min_fp_value), str(min_anomalous_value)))
upper_range_not_same = True
try:
if int(max_fp_value) == int(max_anomalous_value):
upper_range_not_same = False
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(max_fp_value), str(max_anomalous_value)))
except:
upper_range_not_same = True
if max_fp_value and max_anomalous_value and lower_range_similar and upper_range_not_same:
# @added 20180717 - Task #2446: Optimize Ionosphere
# Feature #2404: Ionosphere - fluid approximation
# On low values such as 1 and 2, the range_tolerance
# should be adjusted to account for the very small
# range. TODO
lower_max_fp_value = int(max_fp_value - (max_fp_value * range_tolerance))
upper_max_fp_value = int(max_fp_value + (max_fp_value * range_tolerance))
if int(max_anomalous_value) in range(lower_max_fp_value, upper_max_fp_value):
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(max_fp_value), str(max_anomalous_value),
str(range_tolerance_percentage)))
else:
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(max_fp_value), str(max_anomalous_value)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not calculate range similarity with the current anomalous_timeseries and the fp id %s time series' % (str(fp_id)))
if lower_range_similar and upper_range_similar:
range_similar = True
else:
logger.info('the ranges of fp_id_metric_ts and anomalous_timeseries differ significantly Min-Max scaling will be skipped')
minmax_fp_ts = []
# if fp_id_metric_ts:
if range_similar:
if LOCAL_DEBUG:
logger.debug('debug :: creating minmax_fp_ts from minmax scaled fp_id_metric_ts')
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
x_np = np.asarray(minmax_fp_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_fp_ts.append([ts[0], v])
logger.info('minmax_fp_ts list populated with the minmax scaled time series with %s data points' % str(len(minmax_fp_ts)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale fp id %s time series for %s' % (str(fp_id), str(base_name)))
if not minmax_fp_ts:
logger.error('error :: minmax_fp_ts list not populated')
minmax_anomalous_ts = []
if minmax_fp_ts:
# Only process if they are approximately the same length
minmax_fp_ts_values_count = len(minmax_fp_ts)
if minmax_fp_ts_values_count - anomalous_ts_values_count in range(-14, 14):
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
x_np = np.asarray(minmax_anomalous_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_anomalous_ts.append([ts[0], v])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine np_minmax with current time series anomalous_timeseries and fp id %s time series' % (str(fp_id)))
if len(minmax_anomalous_ts) > 0:
logger.info('minmax_anomalous_ts is populated with %s data points' % str(len(minmax_anomalous_ts)))
else:
logger.error('error :: minmax_anomalous_ts is not populated')
else:
logger.info('minmax scaled check will be skipped - anomalous_ts_values_count is %s and minmax_fp_ts is %s' % (str(anomalous_ts_values_count), str(minmax_fp_ts_values_count)))
minmax_fp_ts_csv = '%s/fpid.%s.%s.minmax_fp_ts.tsfresh.input.std.csv' % (
settings.SKYLINE_TMP_DIR, str(fp_id), base_name)
minmax_fp_fname_out = minmax_fp_ts_csv + '.transposed.csv'
anomalous_ts_csv = '%s/%s.%s.minmax_anomalous_ts.tsfresh.std.csv' % (
settings.SKYLINE_TMP_DIR, metric_timestamp, base_name)
anomalous_fp_fname_out = anomalous_ts_csv + '.transposed.csv'
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# tsf_settings = ReasonableFeatureExtractionSettings()
# tsf_settings.disable_progressbar = True
minmax_fp_features_sum = None
minmax_anomalous_features_sum = None
if minmax_anomalous_ts and minmax_fp_ts:
if LOCAL_DEBUG:
logger.debug('debug :: analyzing minmax_fp_ts and minmax_anomalous_ts')
if not os.path.isfile(minmax_fp_ts_csv):
if LOCAL_DEBUG:
logger.debug('debug :: creating %s from minmax_fp_ts' % minmax_fp_ts_csv)
datapoints = minmax_fp_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
# @added 20210425 - Task #4030: refactoring
except TypeError:
# This allows for the handling when the
# entry has a value of None
continue
# @modified 20210425 - Task #4030: refactoring
# except: # nosec
except Exception as e:
logger.error('error :: could not create converted timeseries from minmax_fp_ts - %s' % e)
continue
del datapoints
if LOCAL_DEBUG:
if len(converted) > 0:
logger.debug('debug :: converted is populated')
else:
logger.debug('debug :: error :: converted is not populated')
for ts, value in converted:
try:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(minmax_fp_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not write to file %s' % (str(minmax_fp_ts_csv)))
del converted
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
del df
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
del df_t
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
df_sum['feature_name'] = df_sum['feature_name'].astype(str)
df_sum['value'] = df_sum['value'].astype(float)
minmax_fp_features_count = len(df_sum['value'])
minmax_fp_features_sum = df_sum['value'].sum()
logger.info('minmax_fp_ts - features_count: %s, features_sum: %s' % (str(minmax_fp_features_count), str(minmax_fp_features_sum)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_sum from %s' % (str(minmax_fp_fname_out)))
if minmax_fp_features_count > 0:
if LOCAL_DEBUG:
logger.debug('debug :: minmax_fp_features_count of the minmax_fp_ts is %s' % str(minmax_fp_features_count))
else:
logger.error('error :: minmax_fp_features_count is %s' % str(minmax_fp_features_count))
if not os.path.isfile(anomalous_ts_csv):
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
# @added 20210425 - Task #4030: refactoring
except TypeError:
# This allows for the handling when the
# entry has a value of None
continue
# @modified 20210425 - Task #4030: refactoring
# except: # nosec
except Exception as e:
logger.error('error :: could not create converted timeseries from minmax_anomalous_ts - %s' % e)
continue
del datapoints
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
del df
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
del df_t
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
minmax_anomalous_features_sum = df_sum_2['value'].sum()
logger.info('minmax_anomalous_ts - minmax_anomalous_features_count: %s, minmax_anomalous_features_sum: %s' % (
str(minmax_anomalous_features_count),
str(minmax_anomalous_features_sum)))
if minmax_fp_features_sum and minmax_anomalous_features_sum:
percent_different = None
# @modified 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
# Use the common function added
# try:
# fp_sum_array = [minmax_fp_features_sum]
# calc_sum_array = [minmax_anomalous_features_sum]
# percent_different = 100
# sums_array = np.array([minmax_fp_features_sum, minmax_anomalous_features_sum], dtype=float)
# calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
# percent_different = calc_percent_different[0]
# logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
# except Exception as e:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to calculate percent_different from minmax scaled features sums - %s' % e)
percent_different = 100
try:
percent_different = get_percent_different(minmax_fp_features_sum, minmax_anomalous_features_sum, True)
logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
except Exception as e:
logger.error('error :: failed to calculate percent_different between minmax scaled features sums- %s' % e)
if percent_different:
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
minmax_not_anomalous = True
logger.info('minmax scaled common features sums are almost equal, not anomalous')
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
mm_use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
mm_use_percent_similar = float(settings.IONOSPHERE_ECHO_MINMAX_SCALING_FEATURES_PERCENT_SIMILAR)
except:
mm_use_percent_similar = 3.5
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < mm_use_percent_similar:
minmax_not_anomalous = True
# log
logger.info('not anomalous - minmax scaled features profile match - %s - %s' % (base_name, str(minmax_not_anomalous)))
logger.info(
'minmax scaled calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(mm_use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check :: not anomalous - minmax scaled features profile match - %s' % (base_name))
if minmax_not_anomalous:
not_anomalous = True
minmax = 1
# Created time series resources for graphing in
# the matched page
try:
if os.path.isfile(minmax_fp_ts_csv):
self.remove_metric_check_file(str(minmax_fp_ts_csv))
except:
pass
try:
if os.path.isfile(minmax_fp_fname_out):
self.remove_metric_check_file(str(minmax_fp_fname_out))
except:
pass
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Clean up echo files
if echo_check:
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
try:
if os.path.isfile(echo_calculated_feature_file):
self.remove_metric_check_file(str(echo_calculated_feature_file))
except:
pass
echo_features_file = '%s/%s.%s.echo.fp.details.txt' % (metric_training_data_dir, str(metric_timestamp), base_name)
try:
if os.path.isfile(echo_features_file):
self.remove_metric_check_file(str(echo_features_file))
except:
pass
# Clean up
if minmax_check:
try:
clean_file = anomalous_ts_csv
if os.path.isfile(anomalous_ts_csv):
self.remove_metric_check_file(str(anomalous_ts_csv))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_ts_csv file to clean up')
try:
clean_file = anomalous_fp_fname_out
if os.path.isfile(anomalous_fp_fname_out):
self.remove_metric_check_file(str(anomalous_fp_fname_out))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_fp_fname_out file to clean up')
# END - Feature #2404: Ionosphere - fluid approximation
if not_anomalous:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous.append(base_name)
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# update matched_count in ionosphere_table
matched_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp))
connection.close()
logger.info('updated matched_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
# @added 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched update
# @modified 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update ionosphere_matched for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update ionosphere_matched for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
if minmax_not_anomalous == 1:
minmax_fp_features_sum = float(minmax_fp_features_sum)
minmax_fp_features_count = int(minmax_fp_features_count)
minmax_anomalous_features_sum = float(minmax_anomalous_features_sum)
minmax_anomalous_features_count = int(minmax_anomalous_features_count)
else:
minmax_fp_features_sum = 0
minmax_fp_features_count = 0
minmax_anomalous_features_sum = 0
minmax_anomalous_features_count = 0
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# If there are additional echo fps then the database can be
# updated with the fp_count_with_echo value for fp_count in
# the ionosphere_matched table
if fp_count_with_echo > fp_count:
fp_count = fp_count_with_echo
try:
connection = engine.connect()
# @modified 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added all_calc_features_sum, all_calc_features_count,
# sum_calc_values, common_features_count, tsfresh_version
ins = ionosphere_matched_table.insert().values(
fp_id=int(fp_id),
metric_timestamp=int(metric_timestamp),
all_calc_features_sum=float(all_calc_features_sum),
all_calc_features_count=len(all_calc_features_sum_list),
sum_common_values=float(sum_calc_values),
common_features_count=int(relevant_calc_feature_values_count),
tsfresh_version=str(tsfresh_version),
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
minmax=minmax,
minmax_fp_features_sum=minmax_fp_features_sum,
minmax_fp_features_count=minmax_fp_features_count,
minmax_anomalous_features_sum=minmax_anomalous_features_sum,
minmax_anomalous_features_count=minmax_anomalous_features_count,
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_count=fp_count, fp_checked=fp_checked)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax
if minmax == 0:
logger.info('new ionosphere_matched id: %s' % str(new_matched_id))
else:
logger.info('new minmax scaled ionosphere_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not update ionosphere_matched for %s with with timestamp %s' % (
str(fp_id), str(metric_timestamp)))
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if not_anomalous:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(fp_id))
logger.info('added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Stop on the first match
break
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info('debug :: %s is a features profile for %s' % (str(fp_id), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# If this is an ionosphere_learn check them we handle it before
# the others and exit and ionosphere_learn uses the Redis work
# queue. Here we go! Learn!
if added_by == 'ionosphere_learn':
if not_anomalous:
logger.info('an ionosphere_learn metric has been found to be not anomalous before')
# @added 20170607 - Feature #2010: Ionosphere learn - rate limiting profile learning
learning_rate_limited = False
now = int(time())
rate_limit_timestamp = now - 3600
rate_limit_datetime = datetime.fromtimestamp(rate_limit_timestamp)
f = '%Y-%m-%d %H:%M:%S'
after_datetime = rate_limit_datetime.strftime(f)
try:
connection = engine.connect()
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
result = connection.execute(
'SELECT * FROM ionosphere WHERE metric_id=%s AND created_timestamp > \'%s\' AND generation > 1' % (str(metrics_id), str(after_datetime))) # nosec
for row in result:
last_full_duration = row['full_duration']
if int(full_duration) <= int(last_full_duration):
learning_rate_limited = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: determining whether learning should be rate limited')
if learning_rate_limited:
logger.info('learning currently dynamically rate limited on %s' % str(base_name))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('learning is not currently rate limited on %s' % str(base_name))
# @added 20170605 - Bug #2038: Ionosphere learn parent generation incorrect
# Determine generation of the matched fp not the last in the
# list
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT generation FROM ionosphere WHERE id=%s' % str(fp_id) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
matched_fp_generation = int(row['generation'])
connection.close()
logger.info(
'determined matched fp_id %s is a generation %s profile' % (
str(fp_id), str(matched_fp_generation)))
current_fp_generation = matched_fp_generation
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine generation from ionosphere table for fp id %s' % str(fp_id))
logger.info(
'ionosphere_learn metric matches the generation %s features profile id %s - %s' % (
str(current_fp_generation), str(fp_id), base_name))
# Added Redis to work_set, learn will then go off and create
# the features profile with the parent training data if
# less than max_generations, although ionosphere_learn
# should not should Ionosphere any work if the result would
# be greater than max_generations
logger.info('adding work item to Redis set ionosphere.learn.work')
ionosphere_job = 'learn_fp_learnt'
work_deadline = 'Soft'
try:
logger.info(
'LEARNT :: adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to create a learnt features profile' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to make a learn features profile later' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly. We only evaluate
# the Ionosphere layer algorithms after Skyline has had an
# an opportunity to match the original and learnt features
# profiles. This enables the original, evolutionary,
# generations based learning to be continually evaluated.
# This needs to happen for any future implemenation of
# Feature #1888: Ionosphere learn - evolutionary maturity forget
logger.info('layers algorithms check')
check_layers_algorithms = False
if not not_anomalous:
check_layers_algorithms = True
if added_by == 'ionosphere_learn':
check_layers_algorithms = False
logger.info('ionosphere_learn - layers algorithms check - False')
else:
logger.info('layers algorithms check - True, %s layers to be checked' % str(fp_layers_count))
else:
logger.info('a features profile matched as not_anomalous - layers algorithms check - False')
if check_layers_algorithms and fp_layers_present:
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
mirage_full_duration_json_file = '%s/%s.mirage.redis.%sh.json' % (
metric_training_data_dir, base_name,
str(int(full_duration_in_hours)))
if os.path.isfile(mirage_full_duration_json_file):
full_duration_json_file = mirage_full_duration_json_file
else:
full_duration_json_file = '%s/%s.json' % (metric_training_data_dir, base_name)
anomalous_timeseries = None
if os.path.isfile(full_duration_json_file):
logger.info('full duration ts json available for layers check - %s' % (full_duration_json_file))
try:
# Read the timeseries json file
with open((full_duration_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not load json for layers check - %s' % (base_name))
logger.info('data points surfaced for layers check - %s' % (len(anomalous_timeseries)))
else:
logger.error('error :: full duration ts json for layers was not found - %s' % (full_duration_json_file))
matched_layers_id = None
for layers_id in fp_layers_ids:
if not_anomalous:
logger.info('checking layers_id %s - %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not_anomalous:
logger.info('skipping checking layers_id %s - %s layers profiles of %s possible layers as layer id %s already matched' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count), str(matched_layers_id)))
continue
if int(layers_id) != 0:
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked += 1
layers_checked_count += 1
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to Redis set here and commented out the
# self.layers_checked.append in the try below this
redis_set = 'ionosphere.layers_checked'
data = layers_id
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Get the layers algorithms and run then on the timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to the ionosphere.layers_checked Redis set
# above
# self.layers_checked.append(layers_id)
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked)
not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked_count)
if not_anomalous:
matched_layers_id = layers_id
except:
logger.error(traceback.format_exc())
logger.error('error :: run_layer_algorithms failed for layers_id - %s' % (str(layers_id)))
if not_anomalous:
logger.info('not_anomalous :: layers_id %s was matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
else:
logger.info('still anomalous :: layers_id %s was NOT matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not not_anomalous:
logger.info('anomalous - no features profiles layers were matched - %s' % base_name)
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1854: Ionosphere learn
# A create a layer_id matched txt file in the training_data dir
# to advise the operator if a training_data set has been matched
# by a layer. Further below if app is not ionosphere_learn a
# 'learn_fp_generation' ionosphere_job is added so ionosphere_learn
# can still try and learning from the existing features profiles
# that exist even if a layer matched as not_anomalous.
if not_anomalous:
layers_id_matched_file = '%s/%s.layers_id_matched.layers_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(layers_id_matched_file):
try:
write_data_to_file(skyline_app, layers_id_matched_file, 'w', str(matched_layers_id))
logger.info('added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
else:
logger.info('no layers algorithm check required')
# Ionosphere layers DONE
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
remove_waterfall_alert(added_by, metric_timestamp, base_name)
if not not_anomalous:
logger.info('anomalous - no feature profiles were matched - %s' % base_name)
# @added 20170116 - Feature #1854: Ionosphere learn
# If this is an ionosphere_learn check an Ionosphere alert will
# not be sent back to Analyzer, Mirage or the ionosphere.learn.work
# Redis set. We exit, work is done.
if added_by == 'ionosphere_learn':
logger.info('ionosphere_learn check complete - %s' % base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(base_name)
redis_set = 'ionosphere.anomalous_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panorama_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(anomalous_value), str(int(from_timestamp)),
str(int(metric_timestamp)), str(settings.ALGORITHMS),
str(triggered_algorithms), skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panorama_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
base_name)
try:
write_data_to_file(
skyline_app, panorama_anomaly_file, 'w',
panorama_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panorama_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the Redis set function below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panorama_anomaly_file))
logger.info(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'ionosphere.sent_to_panorama'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
# @modified 20170116 - Feature #1854: Ionosphere learn
# Only do the cache_key if not ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# added 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# Added cache_key_value
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(
# cache_key, 300,
# # modified 20190412 - Task #2824: Test redis-py upgrade
# # Task #2926: Update dependencies
# # [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration])
# str(cache_key_value))
# logger.info(
# 'add Redis alert key - %s - %s' %
# (cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# @added 20170116 - Feature #1854: Ionosphere learn
# Added an ionosphere_learn job for the timeseries that did not
# match any profiles. Here we go! Learn!
if added_by != 'ionosphere_learn':
ionosphere_job = 'learn_fp_generation'
logger.info(
'adding an ionosphere_learn %s job for the timeseries that did not match any profiles - %s' % (
ionosphere_job, base_name))
try:
logger.info(
'adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.remove_metric_check_file(str(metric_check_file))
if dev_null:
del dev_null
if engine:
engine_disposal(engine)
return
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
logger.info('removing %s' % skyline_app_logwait)
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
logger.info('SKYLINE_FEEDBACK_NAMESPACES is set to %s' % str(SKYLINE_FEEDBACK_NAMESPACES))
while True:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to Redis')
except:
logger.error('error :: cannot connect to redis at socket path %s' % (
settings.REDIS_SOCKET_PATH))
sleep(30)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @added 20191115 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
continue
# Report app up
try:
# @modified 20210524 - Branch #1444: thunder
# Report app AND Redis as up
# self.redis_conn.setex(skyline_app, 120, now)
# logger.info('updated Redis key for %s up' % skyline_app)
redis_is_up = self.redis_conn.setex(skyline_app, 120, now)
if redis_is_up:
logger.info('updated Redis key for %s up' % skyline_app)
try:
self.redis_conn.setex('redis', 120, now)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update the Redis redis key - %s' % (
e))
except Exception as e:
logger.error('error :: failed to update Redis key for %s up - %s' % (skyline_app, e))
# @modified 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Wrapped purging up in a conditional to allow the user to offload
# purging to a script and cron if they so desire for any reason.
if IONOSPHERE_MANAGE_PURGE:
# purge_old_data_dirs after every check file run, this takes less
# than a second and keeps the purging somewhat consistent with
# input rate.
# @added 20200723 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Do not purge every run
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('purging any old training data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170110 - Feature #1854: Ionosphere learn
# purge_old_data_dirs learn data
if settings.IONOSPHERE_LEARN:
try:
logger.info('purging any old learning data')
self.purge_old_data_dirs(
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs learn - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
else:
logger.info('purge is not managed by Ionosphere - IONOSPHERE_MANAGE_PURGE = %s' % str(IONOSPHERE_MANAGE_PURGE))
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage training data
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('running purge_old_data_dirs only to manage ionosphere.training_data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
# @added 20170916 - Feature #1996: Ionosphere - matches page
# Create the ionosphere_summary_memcache_object
# @modified 20180103 - Feature #1996: Ionosphere - matches page
# The ionosphere_summary_list memcache object is not managed in
# ionosphere.py and was an artefact of some dev work that may
# resume at some point
# if settings.MEMCACHE_ENABLED:
# try:
# logger.info('updating the ionosphere_summary_memcache_object')
# self.update_ionosphere_summary_memcache_object
# except:
# logger.error('error :: update_ionosphere_summary_memcache_object - %s' % traceback.print_exc())
# self.populate the database metatdata tables
# What is my host id in the Skyline panorama DB?
host_id = False
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Check memcached before MySQL
if settings.MEMCACHE_ENABLED:
hosts_id_key = 'hosts.id.%s' % this_host
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# host_id = self.memcache_client.get(hosts_id_key)
if python_version == 2:
host_id = self.memcache_client.get(hosts_id_key)
else:
host_id = self.memcache_client.get(hosts_id_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if host_id:
logger.info('using memcache %s key data' % hosts_id_key)
logger.info('host_id: %s' % str(host_id))
if not host_id:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec
results = mysql_select(skyline_app, query)
if results:
host_id = results[0][0]
logger.info('host_id: %s' % str(host_id))
else:
logger.info('failed to determine host id of %s' % this_host)
if host_id and settings.MEMCACHE_ENABLED:
try:
self.memcache_client.set(hosts_id_key, int(host_id))
logger.info('populated memcache %s key' % hosts_id_key)
except:
logger.error('error :: failed to set %s in memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# if not known - INSERT hostname INTO host
if not host_id:
logger.info('inserting %s into hosts table' % this_host)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec
host_id = self.mysql_insert(query)
if host_id:
logger.info('new host_id: %s' % str(host_id))
if not host_id:
logger.error(
'error :: failed to determine populate %s into the hosts table' %
this_host)
sleep(30)
continue
"""
Determine if any metric has been added to add
"""
# while True:
while 1:
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Process the ionosphere.echo.work queue as echo features
# profiles cannot be easily shoehorned into the
# ionosphere.learn.work pipeline
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
echo_job = False
if not metric_var_files and ionosphere_echo_enabled:
ionosphere_echo_work = None
echo_job = False
try:
ionosphere_echo_work = self.redis_conn_decoded.smembers('ionosphere.echo.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.echo.work - %s' % e)
if ionosphere_echo_work:
echo_work_queue_items = len(ionosphere_echo_work)
if echo_work_queue_items > 0:
echo_job = True
logger.info('processing a ionosphere.echo.work item')
if echo_job:
for index, ionosphere_echo_work in enumerate(ionosphere_echo_work):
try:
echo_metric_list = literal_eval(ionosphere_echo_work)
echo_metric_timestamp = int(echo_metric_list[2])
echo_base_name = str(echo_metric_list[3])
echo_full_duration = int(echo_metric_list[6])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from ionosphere_echo_work item')
continue
if not echo_base_name:
echo_job = False
if echo_job:
# When an item is in the ionosphere.echo.work set it needs
# metric_echo_check_file created to pass to process_ionosphere_echo
echo_metric_check_file = '%s/%s.%s.echo.txt' % (
settings.SKYLINE_TMP_DIR, str(echo_metric_timestamp),
echo_base_name)
echo_create_fp_metric_key = 'ionosphere.%s.%s.echo_create_check' % (
str(echo_metric_timestamp), echo_base_name)
echo_create_fp_metric_count = 1
try:
echo_create_fp_metric_count = self.redis_conn.get(echo_create_fp_metric_key)
except Exception as e:
logger.error('error :: could not query Redis for %s: %s' % (echo_metric_check_file, e))
if not echo_create_fp_metric_count:
echo_create_fp_metric_count = 1
else:
echo_create_fp_metric_count += 1
if os.path.isfile(str(echo_metric_check_file)):
logger.error('error :: echo_metric_check_file - %s already exists, removing' % (
echo_metric_check_file))
self.remove_metric_check_file(echo_metric_check_file)
if echo_create_fp_metric_count >= 3:
logger.error('error :: echo_create_fp_metric_count is %s, no further attempts will be made to create an echo fp for %s' % (
str(echo_create_fp_metric_count), str(echo_metric_list)))
logger.info('removing ionosphere.echo.work item %s' % (
str(echo_metric_list)))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
echo_job = False
if echo_job:
check_data = 'metric = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'added_by = \'%s\'\n' \
'full_duration = \'%s\'\n' \
% (str(echo_base_name), str(echo_metric_timestamp),
'webapp', str(echo_full_duration))
echo_metric_check_file_created = False
try:
write_data_to_file(skyline_app, echo_metric_check_file, 'w', check_data)
logger.info('added ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
echo_metric_check_file_created = True
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
if echo_metric_check_file_created:
# Set a Redis key so that if the echo fp creation fails
# a continous loop to try to create it does not occur
try:
self.redis_conn.setex(echo_create_fp_metric_key, 3600, echo_create_fp_metric_count)
logger.info('updated Redis key %s' % echo_create_fp_metric_key)
except:
logger.error('error :: failed to update Redis key %s' % echo_create_fp_metric_key)
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, echo_metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s for ionosphere.echo.work item' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s for ionosphere.echo.work item' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds for ionosphere.echo.work item' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
self.remove_metric_check_file(echo_metric_check_file)
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes for ionosphere.echo.work item' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process for ionosphere.echo.work item' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes for ionosphere.echo.work item' % function_name)
if not metric_var_files:
logger.info('sleeping 20 no metric check files')
sleep(20)
up_now = time()
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, up_now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
# Manage the ionosphere.unique_metrics Redis set which is queried
# by Analyzer and Mirage, yes and we use multiprocessing
last_update = None
try:
last_update = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics: %s' % e)
if not last_update:
pids = []
now = time()
try:
logger.info('starting manage_ionosphere_unique_metrics process')
p = Process(target=self.manage_ionosphere_unique_metrics)
pids.append(p)
p.start()
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to start manage_ionosphere_unique_metrics')
# Self monitor process and terminate if run for too long
p_starts = time()
# @modified 20200507 - increase the allowed time
# while time() - p_starts <= 5:
while time() - p_starts <= 20:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'manage_ionosphere_unique_metrics completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing manage_ionosphere_unique_metrics process' % (skyline_app))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('%s :: killed manage_ionosphere_unique_metrics process' % (skyline_app))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all manage_ionosphere_unique_metrics processes')
# Discover metric anomalies to insert
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
logger.info('metric check files found - %s' % str(len(metric_var_files)))
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Task #1658: Patterning Skyline Ionosphere
# Send Ionosphere metrics to Graphite every minute now that
# Ionosphere is better tuned and Reset lists
cache_key = '%s.sent_graphite_metrics' % skyline_app
redis_sent_graphite_metrics = False
try:
redis_sent_graphite_metrics = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for key %s: %s' % (cache_key, e))
# Flush metrics to Graphite
if not redis_sent_graphite_metrics:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# not_anomalous = str(len(self.not_anomalous))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# not_anomalous = str(len(list(self.redis_conn.smembers('ionosphere.not_anomalous'))))
not_anomalous = str(len(list(self.redis_conn_decoded.smembers('ionosphere.not_anomalous'))))
except:
not_anomalous = '0'
logger.info('not_anomalous :: %s' % not_anomalous)
send_metric_name = '%s.not_anomalous' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
# total_anomalies = str(len(self.anomalous_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# total_anomalies = str(len(list(self.redis_conn.smembers('ionosphere.anomalous_metrics'))))
total_anomalies = str(len(list(self.redis_conn_decoded.smembers('ionosphere.anomalous_metrics'))))
except:
total_anomalies = '0'
logger.info('total_anomalies :: %s' % total_anomalies)
send_metric_name = '%s.total_anomalies' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, total_anomalies)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# training_metrics = str(len(self.training_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# training_metrics = str(len(list(self.redis_conn.smembers('ionosphere.training_metrics'))))
training_metrics = str(len(list(self.redis_conn_decoded.smembers('ionosphere.training_metrics'))))
except:
training_metrics = '0'
logger.info('training metrics :: %s' % training_metrics)
send_metric_name = '%s.training_metrics' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, training_metrics)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# features_profiles_checked = str(len(self.features_profiles_checked))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# features_profiles_checked = str(len(list(self.redis_conn.smembers('ionosphere.features_profiles_checked'))))
features_profiles_checked = str(len(list(self.redis_conn_decoded.smembers('ionosphere.features_profiles_checked'))))
except:
features_profiles_checked = '0'
logger.info('fps checked count :: %s' % features_profiles_checked)
send_metric_name = '%s.fps_checked' % skyline_app_graphite_namespace
# @modified 20170306 - Feature #1960: ionosphere_layers
# Corrected namespace
# send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
send_graphite_metric(skyline_app, send_metric_name, features_profiles_checked)
# @added 20170306 - Feature #1960: ionosphere_layers
try:
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = str(len(self.layers_checked))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# str_layers_checked = str(len(self.layers_checked))
str_layers_checked = str(len(list(self.redis_conn.smembers('ionosphere.layers_checked'))))
except:
str_layers_checked = '0'
logger.info('layers checked count :: %s' % str_layers_checked)
send_metric_name = '%s.layers_checked' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str_layers_checked)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_panorama = str(len(list(self.redis_conn.smembers('ionosphere.sent_to_panorama'))))
sent_to_panorama = str(len(list(self.redis_conn_decoded.smembers('ionosphere.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
sent_graphite_metrics_now = int(time())
try:
self.redis_conn.setex(cache_key, 59, sent_graphite_metrics_now)
logger.info('updated Redis key - %s' % cache_key)
except:
logger.error('error :: failed to update Redis key - %s up' % cache_key)
# Reset lists
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.anomalous_metrics[:] = []
# self.not_anomalous[:] = []
# self.features_profiles_checked[:] = []
# self.training_metrics[:] = []
# self.sent_to_panorama[:] = []
# @added 20170306 - Feature #1960: ionosphere_layers
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.layers_checked[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'ionosphere.anomalous_metrics',
'ionosphere.not_anomalous',
'ionosphere.features_profiles_checked',
'ionosphere.training_metrics',
'ionosphere.sent_to_panorama',
'ionosphere.layers_checked',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
ionosphere_job = False
learn_job = False
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if the namespace is a declared SKYLINE_FEEDBACK_NAMESPACES
# namespace that has been checked in the last 10 minutes if
# there are multiple checks to do.
rate_limit_feedback_metrics = False
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
metric_var_files_count = len(metric_var_files_sorted)
if metric_var_files_count > 2:
rate_limit_feedback_metrics = True
logger.info('rate_limit_feedback_metrics set to %s' % (str(rate_limit_feedback_metrics)))
if rate_limit_feedback_metrics:
for i_metric_check_file in metric_var_files_sorted:
feedback_metric = False
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched on to_skip %s in base_name %s' % (to_skip, base_name))
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched elements in %s' % base_name)
break
if feedback_metric:
remove_feedback_metric_check = False
if metric_var_files_count > 4:
logger.info('rate limiting feedback metric, removing check for %s as Ionosphere has %s pending checks, not checking feedback metric' % (
base_name, str(metric_var_files_count)))
remove_feedback_metric_check = True
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
check_done = False
try:
check_done = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('not removing feedback metric as no check has been done in last 600 seconds on %s' % base_name)
remove_feedback_metric_check = False
else:
logger.info('rate limiting feedback metric, removing check as %s has been checked in the last 600 seconds' % (
base_name))
remove_feedback_metric_check = True
if remove_feedback_metric_check:
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, i_metric_check_file)
# @added 20200907 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
redis_set = 'analyzer.waterfall_alerts.sent_to_ionosphere'
metric_check_file_timestamp = i_metric_check_file.split('.', -1)[0]
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
self.remove_metric_check_file(str(metric_check_file))
# Determine metric_var_files after possible feedback metric removals
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Prioritise realtime metric checks over analyzer_batch checks
# as if a lot of anomalies are submitted from analyzer_batch
# and they are processed first then real time metrics waiting to
# be processed could the max_age_seconds time limit. Batch
# anomalies are not submitted to max_age_seconds check,
# therefore they will get done in due course.
prioritise_realtime_checks = True
remove_batch_anomalies_check_files = []
realtime_metric_var_files_count = 0
batch_metric_var_files_count = 0
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
if metric_var_files and prioritise_realtime_checks and BATCH_PROCESSING:
if rate_limit_feedback_metrics:
prioritise_realtime_checks = False
logger.info('prioritise_realtime_checks set to %s' % (str(prioritise_realtime_checks)))
try:
metric_var_files_sorted = []
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
# logger.info('prioritise_realtime_checks checking %s metrics for batch anomalies' % (str(len(metric_var_files_sorted))))
for i_metric_check_file in metric_var_files_sorted:
analyzer_batch_anomaly = None
check_file_anomaly_timestamp = None
try:
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
i_metric_check_filename = i_metric_check_file.replace(settings.IONOSPHERE_CHECK_PATH + '/', '')
check_file_anomaly_timestamp = i_metric_check_filename.split('.', 1)[0]
except Exception as e:
logger.error('error :: could not determine anomaly_timestamp from filename %s - %s' % (
i_metric_check_file, str(e)))
check_file_anomaly_timestamp = None
# Is this a analyzer_batch related anomaly
if check_file_anomaly_timestamp:
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(check_file_anomaly_timestamp), base_name)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
remove_batch_anomalies_check_files.append(i_metric_check_file)
batch_metric_var_files_count += 1
else:
realtime_metric_var_files_count += 1
# logger.info('batch processing - no batch anomaly Redis key found - %s' % analyzer_batch_metric_anomaly_key)
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
realtime_metric_var_files = []
if realtime_metric_var_files_count > 0:
if remove_batch_anomalies_check_files:
for metric_var_file in metric_var_files_sorted:
if metric_var_file in remove_batch_anomalies_check_files:
logger.info('removing batch anomaly check file to prioritise realtime metric checks - %s' % str(metric_var_file))
else:
realtime_metric_var_files.append(metric_var_file)
if realtime_metric_var_files:
realtime_metric_var_files_count = len(realtime_metric_var_files)
metric_var_files = realtime_metric_var_files
logger.info('removed %s batch anomaly check files from metric_var_files list to prioritise the %s realtime metric checks' % (
str(batch_metric_var_files_count),
str(realtime_metric_var_files_count)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine batch anomalies')
if metric_var_files:
ionosphere_job = True
logger.info('%s metric check files, so set to ionosphere_job = True' % (str(len(metric_var_files))))
break
# @added 20170113 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
work_queue_items = 0
if settings.IONOSPHERE_LEARN:
learn_work = None
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# learn_work = self.redis_conn.smembers('ionosphere.learn.work')
learn_work = self.redis_conn_decoded.smembers('ionosphere.learn.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.learn.work - %s' % e)
if learn_work:
work_queue_items = len(learn_work)
if work_queue_items > 0:
learn_job = True
if learn_job:
break
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not run an Ionosphere and echo checks on a metrics when a lot of
# checks are being done. Manage the Ionosphere load and increased
# runtime in general that Ionosphere echo has introduced, especially
# when Ionosphere is issued lots of checks, if lots of metrics suddenly
# become anomalous.
metric_var_files_count = 0
ionosphere_busy = False
if ionosphere_job:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added a count of the number of checks to be done
metric_var_files_count = len(metric_var_files)
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
# @added 20170108 - Feature #1830: Ionosphere alerts
# Adding lists of smtp_alerter_metrics and ionosphere_non_smtp_alerter_metrics
# Timed this takes 0.013319 seconds on 689 unique_metrics
unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
unique_metrics = list(self.redis_conn_decoded.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the unique_metrics list from Redis')
unique_metrics = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis analyzer.smtp_alerter_metrics list is created here to
# replace the self.ionosphere_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
redis_sets_to_rename = [
'ionosphere.ionosphere_smtp_alerter_metrics',
'ionosphere.ionosphere_non_smtp_alerter_metrics'
]
for current_redis_set in redis_sets_to_rename:
new_redis_set = '%s.old' % current_redis_set
try:
self.redis_conn.rename(current_redis_set, new_redis_set)
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
else:
logger.error('error :: could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
for metric_name in unique_metrics:
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
for alert in settings.ALERTS:
pattern_match = False
if str(alert[1]) == 'smtp':
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = base_name
pattern_match = False
try:
# Match by regex
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
pattern_match = True
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
if base_name not in ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
pattern_match = False
if not pattern_match:
# Match by substring
if alert[0] in base_name:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_non_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_non_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis lists are used here to replace the self.ionosphere_
# Manager().list()
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('smtp_alerter_metrics :: %s' % str(len(self.ionosphere_smtp_alerter_metrics)))
# logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(self.ionosphere_non_smtp_alerter_metrics)))
logger.info('smtp_alerter_metrics :: %s' % str(len(ionosphere_smtp_alerter_metrics)))
logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(ionosphere_non_smtp_alerter_metrics)))
if ionosphere_job:
# @added 20190326 - Feature #2484
# First process ionosphere_echo to create any missing
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 2 metric check files, do not run
# process_ionosphere_echo to create echo features profiles
run_process_ionosphere_echo = True
if metric_var_files_count > 2:
run_process_ionosphere_echo = False
logger.info(
'not running process_ionosphere_echo as there are %s metric check files to be checked' % (
str(metric_var_files_count)))
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
# Branch #3002: docker
# Only process if there is a ionosphere.unique_metrics Redis set
if run_process_ionosphere_echo:
ionosphere_unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis smembers ionosphere.unique_metrics')
ionosphere_unique_metrics = []
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
if not ionosphere_unique_metrics:
logger.info('there are metrics in the Redis ionosphere.unique_metrics set, skipping process_ionosphere_echo')
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
if ionosphere_echo_enabled and run_process_ionosphere_echo:
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
logger.info('processing - %s' % str(metric_var_files_sorted[0]))
function_name = 'spin_process'
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
# @added 20170112 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
# Ionosphere learn needs Redis works sets
# When a features profile is created there needs to be work added to a Redis
# set
# When a human makes a features profile, we want Ionosphere to make a
# use_full_duration_days features profile valid_learning_duration (e.g.
# 3361) later.
if learn_job:
logger.info('processing - learn work queue - %s' % str(work_queue_items))
function_name = 'spawn_learn_process'
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
now = time()
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# for i in range(1, settings.IONOSPHERE_PROCESSES + 1):
for i in range(1, IONOSPHERE_PROCESSES + 1):
if ionosphere_job:
try:
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_busy if there are queued checks
# to ensure that Ionosphere echo is rate limited if a
# lot of metrics become anomalous and that Ionosphere
# alternates between normal Mirage features profiles
# comparisons and Ionosphere echo features profiles
# during busy times.
# p = Process(target=self.spin_process, args=(i, metric_check_file))
p = Process(target=self.spin_process, args=(i, metric_check_file, ionosphere_busy))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# @added 20170113 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
if learn_job:
try:
p = Process(target=self.spawn_learn_process, args=(i, int(now)))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor processes and terminate if any spin_process has run
# for to long
p_starts = time()
# @modified 20180621 - Feature #2404: Ionosphere - fluid approximation
# Increase run time to 55 seconds to allow for Min-Max scaling
# while time() - p_starts <= 20:
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_echo which takes more time
# while time() - p_starts <= 55:
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
while time() - p_starts <= ionosphere_max_runtime:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
if ionosphere_job:
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
for p in pids:
if p.is_alive():
# @modified 20191031 - Bug #3296: Ionosphere spawn_learn_process hanging on docker
# Branch #3002 - docker
# Use terminate not join for docker
# logger.info('stopping %s - %s' % (function_name, str(p.is_alive())))
# p.join()
logger.info('killing %s - %s' % (function_name, str(p.is_alive())))
p.terminate()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Reset added lists of ionospehere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.ionosphere_smtp_alerter_metrics[:] = []
# self.ionosphere_non_smtp_alerter_metrics[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# delete_redis_sets = [
# 'ionosphere.ionosphere_smtp_alerter_metrics',
# 'ionosphere.ionosphere_non_smtp_alerter_metrics',
# ]
delete_redis_sets = [
'ionosphere.ionosphere_smtp_alerter_metrics.old',
'ionosphere.ionosphere_non_smtp_alerter_metrics.old',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
|
asyncorereactor.py | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread
import time
import weakref
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map
_dispatcher_map = {}
if cls._loop:
cls._loop._cleanup()
cls._loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map)
self._writable = True
self._readable = True
self._send_options_message()
# start the event loop if needed
self._loop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
asyncore.dispatcher.close(self)
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
self._loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
|
run_nvmf.py | #!/usr/bin/env python3
import os
import re
import sys
import json
import paramiko
import zipfile
import threading
import subprocess
import itertools
import time
import uuid
import rpc
import rpc.client
import pandas as pd
from collections import OrderedDict
from common import *
class Server:
def __init__(self, name, username, password, mode, nic_ips, transport):
self.name = name
self.mode = mode
self.username = username
self.password = password
self.nic_ips = nic_ips
self.transport = transport.lower()
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
def get_uncommented_lines(self, lines):
return [l for l in lines if l and not l.startswith('#')]
class Target(Server):
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
null_block_devices=0, sar_settings=None, pcm_settings=None,
bandwidth_settings=None, dpdk_settings=None, zcopy_settings=None,
scheduler_settings="static"):
super(Target, self).__init__(name, username, password, mode, nic_ips, transport)
self.null_block = null_block_devices
self.enable_sar = False
self.enable_pcm = False
self.enable_bandwidth = False
self.enable_dpdk_memory = False
self.enable_zcopy = False
self.scheduler_name = scheduler_settings
if sar_settings:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = sar_settings
if pcm_settings:
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = pcm_settings
self.enable_pcm = True
if bandwidth_settings:
self.enable_bandwidth, self.bandwidth_count = bandwidth_settings
if dpdk_settings:
self.enable_dpdk_memory, self.dpdk_wait_time = dpdk_settings
if zcopy_settings:
self.enable_zcopy = zcopy_settings
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.sys_config()
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, v in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, initiator_count=None, run_num=None):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
csv_file = "nvmf_results.csv"
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have diffrent num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if job_name in x]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
time.sleep(self.sar_delay)
cmd = ["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count]
out = subprocess.check_output(cmd).decode(encoding="utf-8")
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line and "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
if "Average" in line and "all" in line:
self.log_print(line)
fh.write(out)
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count]
out = subprocess.check_output(cmd).decode(encoding="utf-8")
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_bandwidth(self, results_dir, bandwidth_file_name):
cmd = ["bwm-ng", "-o csv", "-F %s/%s" % (results_dir, bandwidth_file_name), "-a 1", "-t 1000", "-c %s" % self.bandwidth_count]
bwm = subprocess.run(cmd)
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
subprocess.run(["cpupower", "frequency-info"])
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma", cpu_frequency=None,
nvmecli_bin="nvme", workspace="/tmp/spdk", cpus_allowed=None,
cpus_allowed_policy="shared", fio_bin="/usr/src/fio/fio"):
super(Initiator, self).__init__(name, username, password, mode, nic_ips, transport)
self.ip = ip
self.spdk_dir = workspace
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.fio_bin = fio_bin
self.cpus_allowed = cpus_allowed
self.cpus_allowed_policy = cpus_allowed_policy
self.cpu_frequency = cpu_frequency
self.nvmecli_bin = nvmecli_bin
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.remote_call("sudo rm -rf %s/nvmf_perf" % self.spdk_dir)
self.remote_call("mkdir -p %s" % self.spdk_dir)
self.set_cpu_frequency()
self.sys_config()
def __del__(self):
self.ssh_connection.close()
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def remote_call(self, cmd):
stdin, stdout, stderr = self.ssh_connection.exec_command(cmd)
out = stdout.read().decode(encoding="utf-8")
err = stderr.read().decode(encoding="utf-8")
return out, err
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
stdout, stderr = self.remote_call("ls %s/nvmf_perf" % self.spdk_dir)
file_list = stdout.strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t %s" % self.transport,
"-s %s" % (4420 + subsys_no),
"-a %s" % ip]
nvme_discover_cmd = " ".join(nvme_discover_cmd)
stdout, stderr = self.remote_call(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
return subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
"""
if "spdk" in self.mode:
subsystems = self.discover_subsystems(self.nic_ips, subsys_no)
bdev_conf = self.gen_spdk_bdev_conf(subsystems)
self.remote_call("echo '%s' > %s/bdev.conf" % (bdev_conf, self.spdk_dir))
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = "libaio"
spdk_conf = ""
out, err = self.remote_call("sudo nvme list | grep -E 'SPDK|Linux' | awk '{print $1}'")
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
threads = range(0, cpus_num)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(subsystems, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time)
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.remote_call("mkdir -p %s/nvmf_perf" % self.spdk_dir)
self.remote_call("echo '%s' > %s/nvmf_perf/%s" % (fio_config, self.spdk_dir, fio_config_filename))
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.remote_call('sudo cpupower frequency-set -g userspace')
self.remote_call('sudo cpupower frequency-set -f %s' % self.cpu_frequency)
cmd = "sudo cpupower frequency-info"
output, error = self.remote_call(cmd)
self.log_print(output)
self.log_print(error)
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
output, error = self.remote_call(cmd)
self.log_print(output)
self.log_print(error)
else:
output_filename = job_name + "_" + self.name + ".json"
cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
output, error = self.remote_call(cmd)
self.log_print(output)
self.log_print(error)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.remote_call('uname -r')[0])
self.log_print("====Kernel command line:====")
cmdline, error = self.remote_call('cat /proc/cmdline')
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl, error = self.remote_call('cat /etc/sysctl.conf')
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.remote_call("cpupower frequency-info")
class KernelTarget(Target):
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
null_block_devices=0, sar_settings=None, pcm_settings=None,
bandwidth_settings=None, dpdk_settings=None, nvmet_bin="nvmetcli", **kwargs):
super(KernelTarget, self).__init__(name, username, password, mode, nic_ips, transport,
null_block_devices, sar_settings, pcm_settings, bandwidth_settings,
dpdk_settings)
self.nvmet_bin = nvmet_bin
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": "nqn.2018-09.io.spdk:cnode%s" % subsys_no
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": ["nqn.2018-09.io.spdk:cnode%s" % subsys_no]
})
subsys_no += 1
port_no += 1
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
null_block_devices=0, null_block_dif_type=0, sar_settings=None, pcm_settings=None,
bandwidth_settings=None, dpdk_settings=None, zcopy_settings=None,
scheduler_settings="static", num_shared_buffers=4096, num_cores=1,
dif_insert_strip=False, **kwargs):
super(SPDKTarget, self).__init__(name, username, password, mode, nic_ips, transport,
null_block_devices, sar_settings, pcm_settings, bandwidth_settings,
dpdk_settings, zcopy_settings, scheduler_settings)
self.num_cores = num_cores
self.num_shared_buffers = num_shared_buffers
self.null_block_dif_type = null_block_dif_type
self.dif_insert_strip = dif_insert_strip
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
numa_list = get_used_numa_nodes()
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
dif_insert_or_strip=self.dif_insert_strip)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.null_block:
nvme_section = self.spdk_tgt_add_nullblock(self.null_block)
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
nvme_section = self.spdk_tgt_add_nvme_conf()
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips)
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client, nqn,
trtype=self.transport,
traddr=ip,
trsvcid="4420",
adrfam="ipv4")
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.num_cores])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initilize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send=True)
self.log_print("Target socket options:")
rpc.client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
self.spdk_tgt_configure()
def __del__(self):
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, username, password, mode, nic_ips, ip, transport,
cpus_allowed=None, cpus_allowed_policy="shared",
cpu_frequency=None, fio_bin="/usr/src/fio/fio", **kwargs):
super(KernelInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
cpus_allowed=cpus_allowed, cpus_allowed_policy=cpus_allowed_policy,
cpu_frequency=cpu_frequency, fio_bin=fio_bin)
self.extra_params = ""
if kwargs["extra_params"]:
self.extra_params = kwargs["extra_params"]
def __del__(self):
self.ssh_connection.close()
def kernel_init_connect(self, address_list, subsys_no):
subsystems = self.discover_subsystems(address_list, subsys_no)
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in subsystems:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.remote_call("sudo %s connect -t %s -s %s -n %s -a %s %s" % (self.nvmecli_bin,
self.transport,
*subsystem,
self.extra_params))
time.sleep(2)
def kernel_init_disconnect(self, address_list, subsys_no):
subsystems = self.discover_subsystems(address_list, subsys_no)
for subsystem in subsystems:
self.remote_call("sudo %s disconnect -n %s" % (self.nvmecli_bin, subsystem[1]))
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
out, err = self.remote_call("sudo nvme list | grep -E 'SPDK|Linux' | awk '{print $1}'")
nvme_list = [x for x in out.split("\n") if "nvme" in x]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for j in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma",
num_cores=1, cpus_allowed=None, cpus_allowed_policy="shared",
cpu_frequency=None, fio_bin="/usr/src/fio/fio", **kwargs):
super(SPDKInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
cpus_allowed=cpus_allowed, cpus_allowed_policy=cpus_allowed_policy,
cpu_frequency=cpu_frequency, fio_bin=fio_bin)
self.num_cores = num_cores
def install_spdk(self, local_spdk_zip):
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.remote_call("unzip -qo /tmp/spdk_drop.zip -d %s" % self.spdk_dir)
self.log_print("Sources unpacked")
self.log_print("Using fio binary %s" % self.fio_bin)
self.remote_call("cd %s; git submodule update --init; make clean; ./configure --with-rdma --with-fio=%s;"
"make -j$(($(nproc)*2))" % (self.spdk_dir, os.path.dirname(self.fio_bin)))
self.log_print("SPDK built")
self.remote_call("sudo %s/scripts/setup.sh" % self.spdk_dir)
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for j in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
spdk_zip_path = "/tmp/spdk.zip"
target_results_dir = "/tmp/results"
if (len(sys.argv) > 1):
config_file_path = sys.argv[1]
else:
script_full_dir = os.path.dirname(os.path.realpath(__file__))
config_file_path = os.path.join(script_full_dir, "config.json")
print("Using config file: %s" % config_file_path)
with open(config_file_path, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
for k, v in data.items():
if "target" in k:
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(name=k, **data["general"], **v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(name=k, **data["general"], **v)
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(name=k, **data["general"], **v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(name=k, **data["general"], **v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
else:
continue
# Copy and install SPDK on remote initiators
if "skip_spdk_install" not in data["general"]:
target_obj.zip_spdk_sources(target_obj.spdk_dir, spdk_zip_path)
threads = []
for i in initiators:
if i.mode == "spdk":
t = threading.Thread(target=i.install_spdk, args=(spdk_zip_path,))
threads.append(t)
t.start()
for t in threads:
t.join()
target_obj.tgt_start()
try:
os.mkdir(target_results_dir)
except FileExistsError:
pass
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect(i.nic_ips, target_obj.subsys_no)
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(target_results_dir, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(target_results_dir, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(target_results_dir, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(target_results_dir, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_bandwidth, args=(target_results_dir, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(target_results_dir))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect(i.nic_ips, target_obj.subsys_no)
i.copy_result_files(target_results_dir)
target_obj.parse_results(target_results_dir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.