source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
money.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target = write, args = (q,))
pr = Process(target = read, args = (q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
sampler.py
|
import time
import numpy as np
from multiprocessing import Process, Queue, cpu_count
import dataset.utils as utils
class ParallelSampler():
def __init__(self, data, args, sampled_classes, source_classes, num_episodes=None):
self.data = data
self.args = args
self.num_episodes = num_episodes
self.sampled_classes = sampled_classes
self.source_classes = source_classes
self.all_classes = np.unique(self.data['label'])
self.num_classes = len(self.all_classes)
if self.num_classes < self.args.way:
raise ValueError("Total number of classes is less than #way.")
self.idx_list = []
for y in self.all_classes:
self.idx_list.append(
np.squeeze(np.argwhere(self.data['label'] == y)))
self.count = 0
self.done_queue = Queue()
self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers
self.p_list = []
for i in range(self.num_cores):
self.p_list.append(
Process(target=self.worker, args=(self.done_queue, self.sampled_classes, self.source_classes)))
for i in range(self.num_cores):
self.p_list[i].start()
def get_epoch(self):
for _ in range(self.num_episodes):
# wait until self.thread finishes
support, query, source = self.done_queue.get()
# convert to torch.tensor
support = utils.to_tensor(support, self.args.cuda, ['raw'])
query = utils.to_tensor(query, self.args.cuda, ['raw'])
source = utils.to_tensor(source, self.args.cuda, ['raw'])
support['is_support'] = True
query['is_support'] = False
source['is_support'] = False
yield support, query, source
def worker(self, done_queue, sampled_classes, source_classes):
'''
Generate one task (support and query).
Store into self.support[self.cur] and self.query[self.cur]
'''
while True:
if done_queue.qsize() > 100:
time.sleep(1)
continue
# sample examples
support_idx, query_idx, source_idx = [], [], []
for y in sampled_classes:
tmp = np.random.permutation(len(self.idx_list[y]))
support_idx.append(
self.idx_list[y][tmp[:self.args.shot]])
query_idx.append(
self.idx_list[y][
tmp[self.args.shot:self.args.shot + self.args.query]])
for z in source_classes:
tmp = np.random.permutation(len(self.idx_list[z]))
source_idx.append(
tmp[:self.args.query]
)
support_idx = np.concatenate(support_idx)
query_idx = np.concatenate(query_idx)
source_idx = np.concatenate(source_idx)
# aggregate examples
max_support_len = np.max(self.data['text_len'][support_idx])
max_query_len = np.max(self.data['text_len'][query_idx])
max_source_len = np.max(self.data['text_len'][source_idx])
support = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
support_idx, max_support_len)
query = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
query_idx, max_query_len)
source = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
source_idx, max_source_len)
done_queue.put((support, query, source))
def __del__(self):
'''
Need to terminate the processes when deleting the object
'''
for i in range(self.num_cores):
self.p_list[i].terminate()
del self.done_queue
class ParallelSampler_Test():
def __init__(self, data, args, num_episodes=None):
self.data = data
self.args = args
self.num_episodes = num_episodes
self.all_classes = np.unique(self.data['label'])
self.num_classes = len(self.all_classes)
if self.num_classes < self.args.way:
raise ValueError("Total number of classes is less than #way.")
self.idx_list = []
for y in self.all_classes:
self.idx_list.append(
np.squeeze(np.argwhere(self.data['label'] == y)))
self.count = 0
self.done_queue = Queue()
self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers
self.p_list = []
for i in range(self.num_cores):
self.p_list.append(
Process(target=self.worker, args=(self.done_queue,)))
for i in range(self.num_cores):
self.p_list[i].start()
def get_epoch(self):
for _ in range(self.num_episodes):
# wait until self.thread finishes
support, query = self.done_queue.get()
# convert to torch.tensor
support = utils.to_tensor(support, self.args.cuda, ['raw'])
query = utils.to_tensor(query, self.args.cuda, ['raw'])
support['is_support'] = True
query['is_support'] = False
yield support, query
def worker(self, done_queue):
'''
Generate one task (support and query).
Store into self.support[self.cur] and self.query[self.cur]
'''
while True:
if done_queue.qsize() > 100:
time.sleep(1)
continue
# sample ways
sampled_classes = np.random.permutation(
self.num_classes)[:self.args.way]
source_classes = []
for j in range(self.num_classes):
if j not in sampled_classes:
source_classes.append(self.all_classes[j])
source_classes = sorted(source_classes)
# sample examples
support_idx, query_idx = [], []
for y in sampled_classes:
tmp = np.random.permutation(len(self.idx_list[y]))
support_idx.append(
self.idx_list[y][tmp[:self.args.shot]])
query_idx.append(
self.idx_list[y][
tmp[self.args.shot:self.args.shot + self.args.query]])
support_idx = np.concatenate(support_idx)
query_idx = np.concatenate(query_idx)
if self.args.mode == 'finetune' and len(query_idx) == 0:
query_idx = support_idx
# aggregate examples
max_support_len = np.max(self.data['text_len'][support_idx])
max_query_len = np.max(self.data['text_len'][query_idx])
support = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
support_idx, max_support_len)
query = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
query_idx, max_query_len)
done_queue.put((support, query))
def __del__(self):
'''
Need to terminate the processes when deleting the object
'''
for i in range(self.num_cores):
self.p_list[i].terminate()
del self.done_queue
def task_sampler(data, args):
all_classes = np.unique(data['label'])
num_classes = len(all_classes)
# sample classes
temp = np.random.permutation(num_classes)
sampled_classes = temp[:args.way]
source_classes = temp[args.way:args.way + args.way]
return sampled_classes, source_classes
|
lab5old.py
|
import random, time
from threading import BoundedSemaphore, Thread
max_itens = 5
count=0
itens=[]
#container = 0
#container = BoundedSemaphore(max_itens)
def producer(qtd):
global count
global itens
for i in range(qtd):
item = random.randrange(1, 10)
if count<max_itens:
itens.append(item)
time.sleep(1)
count+=1
print("[Producer]",time.ctime(),item,itens,count)
else:
print("[Producer] - FULL")
# try:
# container.release()
# print("Produced an item.")
# except ValueError:
# print("Full, skipping.")
def consumer(qtd):
global count
global itens
for i in range(qtd):
if count>0:
item = itens.pop(count-1)
time.sleep(1)
count-=1
print("[Consumer]",time.ctime(),item,itens,count)
else:
print("[Consumer] - EMPTY")
# print("[Consumer]",time.ctime(),container)
# if container.acquire(False):
# print("Consumed an item.")
# else:
# print("Empty, skipping.")
if __name__=="__main__":
start = time.time()
qtd = random.randrange(3, 10)
print(qtd)
thread1 = Thread(target=producer,args=(qtd,))
thread1.start()
thread2 = Thread(target=consumer,args=(qtd,))
thread2.start()
thread1.join()
thread2.join()
print(itens)
print(count)
end = time.time()
print('Time taken in seconds -', end - start)
|
cmdserver.py
|
"""
*azcam.tools.cmdserver* contains the CommandServer class for azcam's socket command interface.
"""
import json
import os
import socket
import socketserver
import threading
import time
import azcam
class CommandServer(socketserver.ThreadingTCPServer):
"""
Main class for cmdserver tool.
CommandServer class to receive and execute client commands over the socket interface.
This is a socket server which receives command strings, executes them, and returns a reply string.
The server normally runs in a thread so as to not block the command line. Each client which
connects runs in its own thread (through the ThreadingTCPServer class) and so operates
concurrently. There is no global locking, so this process can be dangerous but it does allows
multiple clients to interact simultaneously with azcam, which is important for operations
like camera control, telescope movement, temperature readback, instrument control, etc.
"""
def __init__(self, port=2402):
self.welcome_message = None
self.port = port # listen port
self.is_running = 0 # True when server is running
self.server = 0 # server instance
self.verbose = 0
self.log_connections = 1 # log open and close connections
self.monitorinterface = 0
self.logcommands = 0
self.socketnames = {}
self.use_clientname = 1 # log client name with command
self.default_tool = None # object not name
self.currentclient = 0
azcam.db.cmdserver = self
def begin(self, port=-1):
"""
Start command server.
"""
if port == -1:
port = self.port
else:
self.port = port
server_address = ("", port) # '' better than localhost when no network
try:
self.server = ThreadedTCPServer(server_address, MyBaseRequestHandler)
self.server.RequestHandlerClass.cmdserver = self
self.is_running = 1
self.server.serve_forever() # waits here forever
except Exception as message:
self.is_running = 0
azcam.log(f"ERROR in cmdserver:{repr(message)} Is it already running? Exiting...")
time.sleep(2)
os._exit(1)
# Exits here when server is aborted
return
def start(self, port=-1):
"""
Starts command server in a thread.
"""
cmdthread = threading.Thread(target=self.begin, name="cmdserver")
cmdthread.daemon = True # terminates when main process exits
cmdthread.start()
return
def stop(self):
"""
Stops command server running in thread.
"""
self.server.shutdown()
self.is_running = 0
return
def command(self, command: str):
"""
Parse and execute a socket command string.
Returns the reply string, always starting with OK or ERROR.
"""
objid, args, kwargs = self.parser(command)
reply = self.execute(objid, *args, **kwargs)
return reply
def parser(self, command: str) -> tuple((object, list, dict)):
"""
Parse a socket command string.
If command does not start with a dotted object.method token, then
assume it is the method of the default_tool.
Returns (objid, args, kwargs)
objid is a bound method of a class
args is a list of strings
kwargs is a dict of strings
"""
# parse command string
tokens = azcam.utils.parse(command, 0)
cmd = tokens[0]
arglist = tokens[1:]
args = []
kwargs = {}
if len(arglist) == 0:
pass
else:
for token in arglist:
if "=" in token:
keyname, value = token.split("=")
kwargs[keyname] = value
else:
args.append(token)
if "." not in cmd:
# process default_tool commands
if self.default_tool is None:
s = f"command not recognized: {cmd} "
raise azcam.AzcamError(s)
else:
objid = getattr(self.default_tool, cmd)
else:
# primary object must be in db.remote_tools
objects = cmd.split(".")
if objects[0] not in azcam.db.remote_tools:
raise azcam.AzcamError(f"remote call not allowed: {objects[0]}", 4)
if len(objects) == 1:
objid = azcam.db.get(objects[0])
elif len(objects) == 2:
objid = getattr(azcam.db.get(objects[0]), objects[1])
elif len(objects) == 3:
objid = getattr(getattr(azcam.db.get(objects[0]), objects[1]), objects[2])
elif len(objects) == 4:
objid = getattr(
getattr(getattr(azcam.db.get(objects[0]), objects[1]), objects[2]),
objects[3],
)
else:
objid = None # too complicated for now
kwargs = {}
l1 = len(tokens)
if l1 > 1:
args = tokens[1:]
if "=" in args[0]:
# assume all keywords for now
kwargs = {}
for argtoken in args:
keyword, value = argtoken.split("=")
kwargs[keyword] = value
args = []
else:
args = []
return objid, args, kwargs
def execute(self, objid, *args, **kwargs) -> str:
"""
Executes a command which has been parsed into object method and arguments.
Args:
objid ([type]): [description]
args ([type]): [description]
kwargs ([type]): [description]
Returns:
str: reply from command executed. Always starts with OK or ERROR.
"""
if kwargs == {}:
if args == []:
reply = objid()
reply = objid(*args)
else:
reply = objid(*args, **kwargs)
reply = self.reply(reply)
return reply
def reply(self, reply: str):
"""
Create a reply string for a socket command.
Args:
reply (str): command reply
Returns:
[type]: formatted reply string
"""
if reply is None or reply == "":
s = ""
elif type(reply) == str:
s = reply
elif type(reply) == list:
s = ""
for x in reply:
if type(x) == str and " " in x: # check if space in the string
s = s + " " + "'" + str(x) + "'"
else:
s = s + " " + str(x)
s = s.strip()
elif type(reply) == dict:
s = json.dumps(reply)
else:
s = repr(reply)
if s != '""':
s = s.strip()
# add OK status if needed
if not (s.startswith("OK") or s.startswith("ERROR") or s.startswith("WARNING")):
s = "OK " + s
s = s.strip()
return s
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# allow_reuse_address = True
allow_reuse_address = False
class MyBaseRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
azcam.db.cmdserver.currentclient += 1
self.currentclient = azcam.db.cmdserver.currentclient
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
def handle(self):
"""
Called when a connection is made from a client.
Starts an infinite loop waiting for new commands.
Commands are executed sequentially.
"""
if azcam.db.cmdserver.welcome_message is not None:
self.request.send(str.encode(azcam.db.cmdserver.welcome_message + "\r\n"))
while True:
try:
prefix_in = f"Rcv{self.currentclient:01}> "
prefix_out = f"Out{self.currentclient:01}> " # extra space for indent
# ************************************************************************
# receive command from the network socket
# ************************************************************************
try:
command_string = self.receive_command(self.currentclient).strip()
except ConnectionResetError:
azcam.log(
f"Client {azcam.db.cmdserver.socketnames[self.currentclient]} disconnected",
prefix=prefix_in,
)
break
except Exception as e:
azcam.log(f"ERROR in handle: {e}", prefix="Err-> ")
break
# ************************************************************************
# disconnect on empty string - important
# ************************************************************************
if command_string.strip() == "":
try:
self.request.send(str.encode("OK\r\n"))
except OSError:
pass
except Exception as e:
azcam.log(f"Null command send error for client {self.currentclient}: {e}")
# azcam.log(f"closing connection to client {self.currentclient}")
break
# ************************************************************************
# log received command
# ************************************************************************
try:
self.cmdserver.socketnames[self.currentclient]
except Exception:
azcam.db.cmdserver.socketnames[
self.currentclient
] = f"unknown_{self.currentclient}"
if azcam.db.cmdserver.logcommands:
azcam.log(command_string.strip(), prefix=prefix_in)
# ************************************************************************
# check special cases which do not leave cmdserver
# ************************************************************************
# close socket connection to client
if command_string.lower().startswith("closeconnection"):
azcam.log(
f"closing connection to {azcam.db.cmdserver.socketnames[self.currentclient]}",
prefix=prefix_in,
)
self.request.send(str.encode("OK\r\n"))
self.request.close()
break
# register - register a client name, example: register console
elif command_string.lower().startswith("register"):
x = command_string.split(" ")
azcam.db.cmdserver.socketnames[
self.currentclient
] = f"{x[1]}_{int(self.currentclient)}"
self.request.send(str.encode("OK\r\n"))
azcam.log(f"OK client {self.currentclient}", prefix=prefix_out) # log reply
command_string = ""
# echo - for polling as "echo hello" or just "echo"
elif command_string.lower().startswith("echo"):
s = command_string.split(" ")
if len(s) == 1:
reply = "OK"
elif len(s) == 2:
reply = "OK %s" % s[1]
else:
reply = "OK %s" % " ".join(s[1:])
self.request.send(str.encode(reply + "\r\n"))
if azcam.db.cmdserver.logcommands:
azcam.log("%s" % reply, prefix=prefix_out)
command_string = ""
# update - azcammonitor
elif command_string.lower().startswith("update"):
if azcam.db.cmdserver.monitorinterface == 0:
azcam.log("ERROR could not update azcammonitor", prefix=prefix_out)
reply = "ERROR Could not update azcammonitor"
else:
azcam.db.cmdserver.monitorinterface.Register()
azcam.log("%s" % "OK", prefix=prefix_out)
reply = "OK"
self.request.send(str.encode(reply + "\r\n"))
command_string = ""
# exit - send reply for handshake before closing socket and shutting down
elif command_string.lower().startswith("exit"):
self.request.send(str.encode("OK\r\n"))
azcam.log("%s" % "OK", prefix=prefix_out) # log reply
self.request.close()
os._exit(0) # kill python
# ************************************************************************
# process all other command_strings
# ************************************************************************
if command_string != "":
# execute command
try:
reply = azcam.db.cmdserver.command(command_string)
except Exception as e:
reply = f"ERROR {repr(e)}"
# log reply
if azcam.db.cmdserver.logcommands:
azcam.log(reply, prefix=prefix_out)
# send reply to socket
self.request.send(str.encode(reply + "\r\n"))
else:
time.sleep(0.10) # for telnet
except Exception as message: # catch everything so cmdserver never crashes
azcam.log(f"ERROR in cmdserver: {command_string}: {message}")
# try to reply but this may not work
try:
self.request.send(str.encode(f"ERROR {repr(message)}\r\n"))
except Exception as e:
print(e)
pass # OK to do nothing
return
def setup(self):
"""
Called when new connection made.
"""
if azcam.db.cmdserver.log_connections and azcam.db.cmdserver.verbose:
azcam.log(
f"Client connection made from {str(self.client_address)}",
prefix="cmd> ",
)
return socketserver.BaseRequestHandler.setup(self)
def finish(self):
"""
Called when existing connection is closed.
"""
if azcam.db.cmdserver.log_connections and azcam.db.cmdserver.verbose:
azcam.log(f"Connection closed to {str(self.client_address)}")
return socketserver.BaseRequestHandler.finish(self)
def receive_command(self, currentclient):
"""
Receive a string from socket until terminator is found.
Returns a string.
Returns empty string on error.
:param currentclient: client ID for socket ID
"""
terminator = "\n" # likely ends with \r\n
# read socket until terminator found
msg = ""
msg1 = ""
while True:
try:
msg1 = self.request.recv(1024).decode()
if msg1 == "":
return ""
if msg1[-1] == terminator: # found terminator
msg += msg1
break
msg += msg1
except socket.error as e:
if e.errno == 10054: # connection closed
pass
else:
azcam.log(f"receive_command: {e}", prefix="Err-> ")
break
reply = msg[:-1] # \n
if len(reply) == 0:
return ""
if reply[-1] == "\r":
reply = msg[:-1] # \r
if reply is None:
reply = ""
return reply
|
test_threading_local.py
|
import sys
import unittest
from doctest import DocTestSuite
from test import support
import weakref
import gc
# Modules under test
import _thread
import threading
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
support.gc_collect() # For PyPy or other GCs.
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
support.gc_collect() # For PyPy or other GCs.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
with support.start_threads(threading.Thread(target=f, args=(i,))
for i in range(10)):
pass
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
support.gc_collect() # For PyPy or other GCs.
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def test_arguments(self):
# Issue 1522237
class MyLocal(self._local):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, self._local, a=1)
self.assertRaises(TypeError, self._local, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
pyshell.py
|
#! /usr/bin/env python3
import sys
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1
try:
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.pyshell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
else:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['pyshell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
__init__.py
|
"""
https://github.com/tarantool/test-run/issues/265
Scenario:
The function '_target_function' cannot be work anymore in Python 3.9 up.
Code:
@wraps(target)
@ParallelStrategy.save_return_value
def _target_function(*_args, **_kwargs):
result_value = target(*_args, **_kwargs)
return result_value
return Process(target=_target_function, args=args, kwargs=kwargs)
Note:
In Python 3.9 up version, the package 'multiprocessing' only
receives target function which is pickleable. In other words,
it means that you couldn't set decorator like 'classmethod' or
'staticmethod' at any function which targets to run Parallel.
Solution:
It needs to configure 'set_start_method' value to be 'fork' to
let it work around.
"""
from multiprocessing import set_start_method as set_multiprocessing_start_method, current_process
from multirunnable import PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION
from platform import system as runtime_os
import logging
import re
if (PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION) >= (3, 9):
logging.info("Force 'multiprocessing' to use 'fork'.")
if re.search(re.escape(runtime_os()), "Windows", re.IGNORECASE) is not None:
set_multiprocessing_start_method('spawn', force=True)
else:
set_multiprocessing_start_method('fork')
from multirunnable.parallel.features import ProcessQueueType, ProcessLock, ProcessCommunication
from multirunnable.parallel.strategy import ParallelStrategy, ProcessStrategy, ProcessPoolStrategy
from multirunnable.parallel.result import ParallelResult
|
client.py
|
import xmlrpc.client
import threading
from http import client as http_client
import functools
from sys import hexversion
OPTIONS = {'CONFIGURED': False, 'TIMEOUT': 20}
def configure(opts):
if not OPTIONS['CONFIGURED']:
try: # support for django
import django.conf
OPTIONS.update(django.conf.settings.PYAPNS_CONFIG)
OPTIONS['CONFIGURED'] = True
except:
pass
if not OPTIONS['CONFIGURED']:
try: # support for programatic configuration
OPTIONS.update(opts)
OPTIONS['CONFIGURED'] = True
except:
pass
if not OPTIONS['CONFIGURED']:
try: # pylons support
import pylons.config
OPTIONS.update({'HOST': pylons.config.get('pyapns_host')})
try:
OPTIONS.update({'TIMEOUT': int(pylons.config.get('pyapns_timeout'))})
except:
pass # ignore, an optional value
OPTIONS['CONFIGURED'] = True
except:
pass
# provision initial app_ids
if 'INITIAL' in OPTIONS:
for args in OPTIONS['INITIAL']:
provision(*args)
return OPTIONS['CONFIGURED']
class UnknownAppID(Exception): pass
class APNSNotConfigured(Exception): pass
def reprovision_and_retry(func):
"""
Wraps the `errback` callback of the API functions, automatically trying to
re-provision if the app ID can not be found during the operation. If that's
unsuccessful, it will raise the UnknownAppID error.
"""
@functools.wraps(func)
def wrapper(*a, **kw):
errback = kw.get('errback', None)
if errback is None:
def errback(e):
raise e
def errback_wrapper(e):
if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS:
try:
for initial in OPTIONS['INITIAL']:
provision(*initial) # retry provisioning the initial setup
func(*a, **kw) # and try the function once more
except Exception(new_exc):
errback(new_exc) # throwing the new exception
else:
errback(e) # not an instance of UnknownAppID - nothing we can do here
kw['errback'] = errback_wrapper
return func(*a, **kw)
return wrapper
def default_callback(func):
@functools.wraps(func)
def wrapper(*a, **kw):
if 'callback' not in kw:
kw['callback'] = lambda c: c
return func(*a, **kw)
return wrapper
@default_callback
@reprovision_and_retry
def provision(app_id, path_to_cert, environment, timeout=15, async=False,
callback=None, errback=None):
args = [app_id, path_to_cert, environment, timeout]
f_args = ['provision', args, callback, errback]
if not async:
return _xmlrpc_thread(*f_args)
t = threading.Thread(target=_xmlrpc_thread, args=f_args)
t.daemon = True
t.start()
@default_callback
@reprovision_and_retry
def notify(app_id, tokens, notifications, async=False, callback=None,
errback=None):
args = [app_id, tokens, notifications]
f_args = ['notify', args, callback, errback]
if not async:
return _xmlrpc_thread(*f_args)
t = threading.Thread(target=_xmlrpc_thread, args=f_args)
t.daemon = True
t.start()
@default_callback
@reprovision_and_retry
def feedback(app_id, async=False, callback=None, errback=None):
args = [app_id]
f_args = ['feedback', args, callback, errback]
if not async:
return _xmlrpc_thread(*f_args)
t = threading.Thread(target=_xmlrpc_thread, args=f_args)
t.daemon = True
t.start()
def _xmlrpc_thread(method, args, callback, errback=None):
if not configure({}):
raise APNSNotConfigured('APNS Has not been configured.')
proxy = ServerProxy(OPTIONS['HOST'], allow_none=True, use_datetime=True,
timeout=OPTIONS['TIMEOUT'])
try:
parts = method.strip().split('.')
for part in parts:
proxy = getattr(proxy, part)
return callback(proxy(*args))
except xmlrpc.client.Fault as e:
if e.faultCode == 404:
e = UnknownAppID()
if errback is not None:
errback(e)
else:
raise e
## --------------------------------------------------------------
## Thank you Volodymyr Orlenko:
## http://blog.bjola.ca/2007/08/using-timeout-with-xmlrpclib.html
## --------------------------------------------------------------
def ServerProxy(url, *args, **kwargs):
t = TimeoutTransport()
t.timeout = kwargs.pop('timeout', 20)
kwargs['transport'] = t
return xmlrpc.client.ServerProxy(url, *args, **kwargs)
class TimeoutTransport(xmlrpc.client.Transport):
def make_connection(self, host):
conn = TimeoutHTTPConnection(host)
conn.timeout = self.timeout
return conn
class TimeoutHTTPConnection(http_client.HTTPConnection):
def connect(self):
http_client.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
|
graph.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import json
import logging
import threading
from abc import ABCMeta
from abc import abstractmethod
from copy import deepcopy
from typing import List
from typing import Mapping
from typing import Union
try:
import vineyard
except ImportError:
vineyard = None
from graphscope.config import GSConfig as gs_config
from graphscope.framework import dag_utils
from graphscope.framework import utils
from graphscope.framework.dag import DAGNode
from graphscope.framework.errors import check_argument
from graphscope.framework.graph_schema import GraphSchema
from graphscope.framework.graph_utils import EdgeLabel
from graphscope.framework.graph_utils import EdgeSubLabel
from graphscope.framework.graph_utils import VertexLabel
from graphscope.framework.operation import Operation
from graphscope.framework.utils import data_type_to_cpp
from graphscope.proto import attr_value_pb2
from graphscope.proto import graph_def_pb2
from graphscope.proto import types_pb2
logger = logging.getLogger("graphscope")
class GraphInterface(metaclass=ABCMeta):
"""Base Class to derive GraphDAGNode and Graph"""
def __init__(self):
self._session = None
@abstractmethod
def add_column(self, results, selector):
raise NotImplementedError
@abstractmethod
def add_vertices(self, vertices, label="_", properties=None, vid_field=0):
raise NotImplementedError
@abstractmethod
def add_edges(
self,
edges,
label="_",
properties=None,
src_label=None,
dst_label=None,
src_field=0,
dst_field=1,
):
raise NotImplementedError
@abstractmethod
def unload(self):
raise NotImplementedError
def to_numpy(self, selector, vertex_range=None):
raise NotImplementedError
def to_dataframe(self, selector, vertex_range=None):
raise NotImplementedError
def save_to(self, path, **kwargs):
raise NotImplementedError
def load_from(cls, path, sess, **kwargs):
raise NotImplementedError
@abstractmethod
def project(self, vertices, edges):
raise NotImplementedError
def _from_nx_graph(self, g):
"""Create a gs graph from a nx graph.
Args:
g (:class:`graphscope.nx.graph`): A nx graph that contains graph data.
Raises:
RuntimeError: NX graph and gs graph not in the same session.
TypeError: Convert a graph view of nx graph to gs graph.
Returns: :class:`graphscope.framework.operation.Operation`
that will be used to construct a :class:`graphscope.Graph`
Examples:
.. code:: python
>>> import graphscope as gs
>>> nx_g = gs.nx.path_graph(10)
>>> gs_g = gs.Graph(nx_g)
"""
if self.session_id != g.session_id:
raise RuntimeError(
"networkx graph and graphscope graph not in the same session."
)
if hasattr(g, "_graph"):
raise TypeError("graph view can not convert to gs graph")
return dag_utils.dynamic_to_arrow(g)
def _from_vineyard(self, vineyard_object):
"""Load a graph from a already existed vineyard graph.
Args:
vineyard_object (:class:`vineyard.Object`, :class:`vineyard.ObjectID`
or :class:`vineyard.ObjectName`): vineyard object,
which represents a graph.
Returns:
:class:`graphscope.framework.operation.Operation`
"""
if isinstance(vineyard_object, vineyard.Object):
return self._construct_op_from_vineyard_id(vineyard_object.id)
if isinstance(vineyard_object, vineyard.ObjectID):
return self._construct_op_from_vineyard_id(vineyard_object)
if isinstance(vineyard_object, vineyard.ObjectName):
return self._construct_op_from_vineyard_name(vineyard_object)
def _construct_op_from_vineyard_id(self, vineyard_id):
assert self._session is not None
config = {}
config[types_pb2.IS_FROM_VINEYARD_ID] = utils.b_to_attr(True)
config[types_pb2.VINEYARD_ID] = utils.i_to_attr(int(vineyard_id))
# FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard
#
# the metadata should be retrived from vineyard
config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t")
config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t")
return dag_utils.create_graph(
self.session_id, graph_def_pb2.ARROW_PROPERTY, attrs=config
)
def _construct_op_from_vineyard_name(self, vineyard_name):
assert self._session is not None
config = {}
config[types_pb2.IS_FROM_VINEYARD_ID] = utils.b_to_attr(True)
config[types_pb2.VINEYARD_NAME] = utils.s_to_attr(str(vineyard_name))
# FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard
#
# the metadata should be retrived from vineyard
config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t")
config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t")
return dag_utils.create_graph(
self.session_id, graph_def_pb2.ARROW_PROPERTY, attrs=config
)
def _construct_op_of_empty_graph(self):
config = {}
config[types_pb2.ARROW_PROPERTY_DEFINITION] = attr_value_pb2.AttrValue()
config[types_pb2.DIRECTED] = utils.b_to_attr(self._directed)
config[types_pb2.GENERATE_EID] = utils.b_to_attr(self._generate_eid)
config[types_pb2.OID_TYPE] = utils.s_to_attr(self._oid_type)
config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t")
config[types_pb2.IS_FROM_VINEYARD_ID] = utils.b_to_attr(False)
return dag_utils.create_graph(
self.session_id, graph_def_pb2.ARROW_PROPERTY, inputs=None, attrs=config
)
class GraphDAGNode(DAGNode, GraphInterface):
"""A class represents a graph node in a DAG.
In GraphScope, all operations that generate a new graph will return
a instance of :class:`GraphDAGNode`, which will be automatically
executed by :method:`sess.run` in `eager` mode.
The following example demonstrates its usage:
.. code:: python
>>> # lazy mode
>>> import graphscope as gs
>>> sess = gs.session(mode="lazy")
>>> g = sess.g()
>>> g1 = g.add_vertices("person.csv","person")
>>> print(g1) # <graphscope.framework.graph.GraphDAGNode object>
>>> g2 = sess.run(g1)
>>> print(g2) # <graphscope.framework.graph.Graph object>
>>> # eager mode
>>> import graphscope as gs
>>> sess = gs.session(mode="eager")
>>> g = sess.g()
>>> g1 = g.add_vertices("person.csv","person")
>>> print(g1) # <graphscope.framework.graph.Graph object>
>>> g1.unload()
"""
def __init__(
self,
session,
incoming_data=None,
oid_type="int64",
directed=True,
generate_eid=True,
):
"""Construct a :class:`GraphDAGNode` object.
Args:
session (:class:`Session`): A graphscope session instance.
incoming_data: Graph can be initialized through various type of sources,
which can be one of:
- :class:`graphscope.framework.operation.Operation`
- :class:`graphscope.nx.Graph`
- :class:`graphscope.Graph`
- :class:`vineyard.Object`, :class:`vineyard.ObjectId` or :class:`vineyard.ObjectName`
oid_type: (str, optional): Type of vertex original id. Defaults to "int64".
directed: (bool, optional): Directed graph or not. Defaults to True.
generate_eid: (bool, optional): Generate id for each edge when setted True. Defaults to True.
"""
super().__init__()
self._session = session
oid_type = utils.normalize_data_type_str(oid_type)
if oid_type not in ("int64_t", "std::string"):
raise ValueError("oid_type can only be int64_t or string.")
self._oid_type = oid_type
self._directed = directed
self._generate_eid = generate_eid
self._graph_type = graph_def_pb2.ARROW_PROPERTY
# list of pair <parent_op_key, VertexLabel/EdgeLabel>
self._unsealed_vertices_and_edges = list()
# check for newly added vertices and edges.
self._v_labels = list()
self._e_labels = list()
self._e_relationships = list()
self._base_graph = None
# add op to dag
self._resolve_op(incoming_data)
self._session.dag.add_op(self._op)
@property
def v_labels(self):
return self._v_labels
@v_labels.setter
def v_labels(self, value):
self._v_labels = value
@property
def e_labels(self):
return self._e_labels
@e_labels.setter
def e_labels(self, value):
self._e_labels = value
@property
def e_relationships(self):
return self._e_relationships
@e_relationships.setter
def e_relationships(self, value):
self._e_relationships = value
@property
def graph_type(self):
"""The type of the graph object.
Returns:
type (`types_pb2.GraphType`): the type of the graph.
"""
return self._graph_type
def _project_to_simple(self):
check_argument(self.graph_type == graph_def_pb2.ARROW_PROPERTY)
op = dag_utils.project_arrow_property_graph_to_simple(self)
# construct dag node
graph_dag_node = GraphDAGNode(self._session, op)
graph_dag_node._base_graph = self
return graph_dag_node
def _resolve_op(self, incoming_data):
# Don't import the :code:`NXGraph` in top-level statements to improve the
# performance of :code:`import graphscope`.
from graphscope import nx
if incoming_data is None:
# create dag node of empty graph
self._op = self._construct_op_of_empty_graph()
elif isinstance(incoming_data, Operation):
self._op = incoming_data
if self._op.type == types_pb2.PROJECT_TO_SIMPLE:
self._graph_type = graph_def_pb2.ARROW_PROJECTED
elif isinstance(incoming_data, nx.classes.graph._GraphBase):
self._op = self._from_nx_graph(incoming_data)
elif isinstance(incoming_data, Graph):
self._op = dag_utils.copy_graph(incoming_data)
self._graph_type = incoming_data.graph_type
elif isinstance(incoming_data, GraphDAGNode):
if incoming_data.session_id != self.session_id:
raise RuntimeError("{0} not in the same session.".formar(incoming_data))
raise NotImplementedError
elif vineyard is not None and isinstance(
incoming_data, (vineyard.Object, vineyard.ObjectID, vineyard.ObjectName)
):
self._op = self._from_vineyard(incoming_data)
else:
raise RuntimeError("Not supported incoming data.")
def to_numpy(self, selector, vertex_range=None):
"""Select some elements of the graph and output to numpy.
Args:
selector (str): Select a portion of graph as a numpy.ndarray.
vertex_range(dict, optional): Slice vertices. Defaults to None.
Returns:
:class:`graphscope.framework.context.ResultDAGNode`:
A result holds the `numpy.ndarray`, evaluated in eager mode.
"""
# avoid circular import
from graphscope.framework.context import ResultDAGNode
check_argument(self.graph_type == graph_def_pb2.ARROW_PROPERTY)
vertex_range = utils.transform_vertex_range(vertex_range)
op = dag_utils.graph_to_numpy(self, selector, vertex_range)
return ResultDAGNode(self, op)
def to_dataframe(self, selector, vertex_range=None):
"""Select some elements of the graph and output as a pandas.DataFrame
Args:
selector (dict): Select some portions of graph.
vertex_range (dict, optional): Slice vertices. Defaults to None.
Returns:
:class:`graphscope.framework.context.ResultDAGNode`:
A result holds the `pandas.DataFrame`, evaluated in eager mode.
"""
# avoid circular import
from graphscope.framework.context import ResultDAGNode
check_argument(self.graph_type == graph_def_pb2.ARROW_PROPERTY)
check_argument(
isinstance(selector, Mapping),
"selector of to dataframe must be a dict",
)
selector = json.dumps(selector)
vertex_range = utils.transform_vertex_range(vertex_range)
op = dag_utils.graph_to_dataframe(self, selector, vertex_range)
return ResultDAGNode(self, op)
def to_directed(self):
op = dag_utils.to_directed(self)
graph_dag_node = GraphDAGNode(self._session, op)
return graph_dag_node
def to_undirected(self):
op = dag_utils.to_undirected(self)
graph_dag_node = GraphDAGNode(self._session, op)
return graph_dag_node
def add_vertices(self, vertices, label="_", properties=None, vid_field=0):
"""Add vertices to the graph, and return a new graph.
Args:
vertices (Union[str, Loader]): Vertex data source.
label (str, optional): Vertex label name. Defaults to "_".
properties (list[str], optional): List of column names loaded as properties. Defaults to None.
vid_field (int or str, optional): Column index or property name used as id field. Defaults to 0.
Raises:
ValueError: If the given value is invalid or conflict with current graph.
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`:
A new graph with vertex added, evaluated in eager mode.
"""
if label in self._v_labels:
raise ValueError(f"Label {label} already existed in graph.")
if not self._v_labels and self._e_labels:
raise ValueError("Cannot manually add vertices after inferred vertices.")
unsealed_vertices_and_edges = deepcopy(self._unsealed_vertices_and_edges)
vertex_label = VertexLabel(
label=label,
loader=vertices,
properties=properties,
vid_field=vid_field,
id_type=self._oid_type,
session_id=self._session.session_id,
)
unsealed_vertices_and_edges.append((self.op.key, vertex_label))
v_labels = deepcopy(self._v_labels)
v_labels.append(label)
# generate and add a loader op to dag
loader_op = dag_utils.create_loader(vertex_label)
self._session.dag.add_op(loader_op)
# construct add label op
op = dag_utils.add_labels_to_graph(self, loader_op)
# construct dag node
graph_dag_node = GraphDAGNode(
self._session, op, self._oid_type, self._directed, self._generate_eid
)
graph_dag_node._v_labels = v_labels
graph_dag_node._e_labels = self._e_labels
graph_dag_node._e_relationships = self._e_relationships
graph_dag_node._unsealed_vertices_and_edges = unsealed_vertices_and_edges
graph_dag_node._base_graph = self
return graph_dag_node
def add_edges(
self,
edges,
label="_e",
properties=None,
src_label=None,
dst_label=None,
src_field=0,
dst_field=1,
):
"""Add edges to the graph, and return a new graph.
Here the src_label and dst_label must be both specified or both unspecified,
i. src_label and dst_label both unspecified and current graph has no vertex label.
We deduce vertex label from edge table, and set vertex label name to '_'.
ii. src_label and dst_label both unspecified and current graph has one vertex label.
We set src_label and dst label to this single vertex label.
ii. src_label and dst_label both specified and existed in current graph's vertex labels.
iii. src_label and dst_label both specified and some are not existed in current graph's vertex labels.
we deduce missing vertex labels from edge tables.
Args:
edges (Union[str, Loader]): Edge data source.
label (str, optional): Edge label name. Defaults to "_e".
properties (list[str], optional): List of column names loaded as properties. Defaults to None.
src_label (str, optional): Source vertex label. Defaults to None.
dst_label (str, optional): Destination vertex label. Defaults to None.
src_field (int, optional): Column index or name used as src field. Defaults to 0.
dst_field (int, optional): Column index or name used as dst field. Defaults to 1.
Raises:
ValueError: If the given value is invalid or conflict with current graph.
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`:
A new graph with edge added, evaluated in eager mode.
"""
if src_label is None and dst_label is None:
check_argument(
len(self._v_labels) <= 1,
"Ambiguous vertex label, please specify the src_label and dst_label.",
)
if len(self._v_labels) == 1:
src_label = dst_label = self._v_labels[0]
else:
src_label = dst_label = "_"
if src_label is None or dst_label is None:
raise ValueError(
"src and dst label must be both specified or either unspecified."
)
check_argument(
src_field != dst_field, "src and dst field cannot refer to the same field"
)
if self.evaluated:
if label in self._e_labels:
raise ValueError(f"Label {label} already existed in graph")
unsealed_vertices = list()
unsealed_edges = list()
v_labels = deepcopy(self._v_labels)
e_labels = deepcopy(self._e_labels)
relations = deepcopy(self._e_relationships)
if src_label not in self._v_labels:
logger.warning("Deducing vertex labels %s", src_label)
v_labels.append(src_label)
if src_label != dst_label and dst_label not in self._v_labels:
logger.warning("Deducing vertex labels %s", dst_label)
v_labels.append(dst_label)
parent = self
if label in self.e_labels:
# aggregate op with the same edge label
fork = False
unsealed_vertices_and_edges = list()
for parent_op_key, unsealed_v_or_e in self._unsealed_vertices_and_edges:
if (
isinstance(unsealed_v_or_e, EdgeLabel)
and unsealed_v_or_e.label == label
):
parent = self._backtrack_graph_dag_node_by_op_key(parent_op_key)
cur_label = unsealed_v_or_e
cur_label.add_sub_label(
EdgeSubLabel(
edges,
properties,
src_label,
dst_label,
src_field,
dst_field,
id_type=self._oid_type,
)
)
fork = True
else:
unsealed_vertices_and_edges.append((parent_op_key, unsealed_v_or_e))
if fork:
if isinstance(unsealed_v_or_e, VertexLabel):
unsealed_vertices.append(unsealed_v_or_e)
else:
unsealed_edges.append(unsealed_v_or_e)
unsealed_edges.append(cur_label)
unsealed_vertices_and_edges.append((parent.op.key, cur_label))
else:
unsealed_vertices_and_edges = deepcopy(self._unsealed_vertices_and_edges)
e_labels.append(label)
relations.append([(src_label, dst_label)])
cur_label = EdgeLabel(label, self._oid_type, self._session.session_id)
cur_label.add_sub_label(
EdgeSubLabel(
edges,
properties,
src_label,
dst_label,
src_field,
dst_field,
id_type=self._oid_type,
)
)
unsealed_edges.append(cur_label)
unsealed_vertices_and_edges.append((parent.op.key, cur_label))
# generate and add a loader op to dag
loader_op = dag_utils.create_loader(unsealed_vertices + unsealed_edges)
self._session.dag.add_op(loader_op)
# construct add label op
op = dag_utils.add_labels_to_graph(parent, loader_op)
# construct dag node
graph_dag_node = GraphDAGNode(
self._session, op, self._oid_type, self._directed, self._generate_eid
)
graph_dag_node._v_labels = v_labels
graph_dag_node._e_labels = e_labels
graph_dag_node._e_relationships = relations
graph_dag_node._unsealed_vertices_and_edges = unsealed_vertices_and_edges
graph_dag_node._base_graph = parent
return graph_dag_node
def _backtrack_graph_dag_node_by_op_key(self, key):
if self.op.key == key:
return self
graph_dag_node = self._base_graph
while graph_dag_node is not None:
if graph_dag_node.op.key == key:
return graph_dag_node
graph_dag_node = graph_dag_node._base_graph
def add_column(self, results, selector):
"""Add the results as a column to the graph. Modification rules are given by the selector.
Args:
results: A instance of concrete class derive from (:class:`graphscope.framework.context.BaseContextDAGNode`):
A context that created by doing an app query on a graph, and holds the corresponding results.
selector (dict): Select results to add as column.
Format is similar to selectors in :class:`graphscope.framework.context.Context`
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`:
A new graph with new columns, evaluated in eager mode.
"""
check_argument(
isinstance(selector, Mapping), "selector of add column must be a dict"
)
for key, value in selector.items():
results._check_selector(value)
selector = json.dumps(selector)
op = dag_utils.add_column(self, results, selector)
graph_dag_node = GraphDAGNode(self._session, op)
graph_dag_node._base_graph = self
return graph_dag_node
def unload(self):
"""Unload this graph from graphscope engine.
Returns:
:class:`graphscope.framework.graph.UnloadedGraph`: Evaluated in eager mode.
"""
op = dag_utils.unload_graph(self)
return UnloadedGraph(self._session, op)
def project(
self,
vertices: Mapping[str, Union[List[str], None]],
edges: Mapping[str, Union[List[str], None]],
):
"""Project a subgraph from the property graph, and return a new graph.
A graph produced by project just like a normal property graph, and can be projected further.
Args:
vertices (dict):
key is the vertex label name, the value is a list of str, which represents the
name of properties. Specifically, it will select all properties if value is None.
Note that, the label of the vertex in all edges you want to project should be included.
edges (dict):
key is the edge label name, the value is a list of str, which represents the
name of properties. Specifically, it will select all properties if value is None.
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`:
A new graph projected from the property graph, evaluated in eager mode.
"""
check_argument(self.graph_type == graph_def_pb2.ARROW_PROPERTY)
op = dag_utils.project_arrow_property_graph(
self, json.dumps(vertices), json.dumps(edges)
)
# construct dag node
graph_dag_node = GraphDAGNode(self._session, op)
graph_dag_node._base_graph = self
return graph_dag_node
class Graph(GraphInterface):
"""A class for representing metadata of a graph in the GraphScope.
A :class:`Graph` object holds the metadata of a graph, such as key, schema, and the graph is directed or not.
It is worth noticing that the graph is stored by the backend such as Analytical Engine, Vineyard.
In other words, the graph object holds nothing but metadata.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> sess = gs.session()
>>> graph = sess.g()
>>> graph = graph.add_vertices("person.csv","person")
>>> graph = graph.add_vertices("software.csv", "software")
>>> graph = graph.add_edges("knows.csv", "knows", src_label="person", dst_label="person")
>>> graph = graph.add_edges("created.csv", "created", src_label="person", dst_label="software")
>>> print(graph)
>>> print(graph.schema)
"""
def __init__(
self,
graph_node,
):
"""Construct a :class:`Graph` object."""
self._graph_node = graph_node
self._session = self._graph_node.session
# copy and set op evaluated
self._graph_node.op = deepcopy(self._graph_node.op)
self._graph_node.evaluated = True
self._session.dag.add_op(self._graph_node.op)
self._key = None
self._vineyard_id = 0
self._schema = GraphSchema()
self._detached = False
self._interactive_instance_launching_thread = None
self._interactive_instance_list = []
self._learning_instance_list = []
def __del__(self):
# cleanly ignore all exceptions, cause session may already closed / destroyed.
try:
self.unload()
except Exception: # pylint: disable=broad-except
pass
def _close_interactive_instances(self):
# Close related interactive instances when graph unloaded.
# Since the graph is gone, quering via interactive client is meaningless.
for instance in self._interactive_instance_list:
instance.close()
self._interactive_instance_list.clear()
def _close_learning_instances(self):
for instance in self._learning_instance_list:
instance.close()
self._learning_instance_list.clear()
def _launch_interactive_instance_impl(self):
try:
self._session.gremlin(self)
except: # noqa: E722
# Record error msg in `InteractiveQuery` when launching failed.
# Unexpect and suppress all exceptions here.
pass
def update_from_graph_def(self, graph_def):
check_argument(
self._graph_node.graph_type == graph_def.graph_type,
"Graph type doesn't match {} versus {}".format(
self._graph_node.graph_type, graph_def.graph_type
),
)
self._key = graph_def.key
self._directed = graph_def.directed
self._is_multigraph = graph_def.is_multigraph
vy_info = graph_def_pb2.VineyardInfoPb()
graph_def.extension.Unpack(vy_info)
self._vineyard_id = vy_info.vineyard_id
self._oid_type = data_type_to_cpp(vy_info.oid_type)
self._generate_eid = vy_info.generate_eid
self._schema_path = vy_info.schema_path
self._schema.from_graph_def(graph_def)
self._v_labels = self._schema.vertex_labels
self._e_labels = self._schema.edge_labels
self._e_relationships = self._schema.edge_relationships
# init saved_signature (must be after init schema)
self._saved_signature = self.signature
# create gremlin server pod asynchronously
if self._session.eager() and gs_config.initializing_interactive_engine:
self._interactive_instance_launching_thread = threading.Thread(
target=self._launch_interactive_instance_impl, args=()
)
self._interactive_instance_launching_thread.start()
def __getattr__(self, name):
if hasattr(self._graph_node, name):
return getattr(self._graph_node, name)
raise AttributeError("{0} not found.".format(name))
@property
def key(self):
"""The key of the corresponding graph in engine."""
return self._key
@property
def schema(self):
"""Schema of the graph.
Returns:
:class:`GraphSchema`: the schema of the graph
"""
return self._schema
@property
def schema_path(self):
"""Path that Coordinator will write interactive schema path to.
Returns:
str: The path contains the schema. for interactive engine.
"""
return self._schema_path
@property
def signature(self):
return hashlib.sha256(
"{}.{}".format(self._schema.signature(), self._key).encode("utf-8")
).hexdigest()
@property
def op(self):
return self._graph_node.op
@property
def template_str(self):
# transform str/string to std::string
oid_type = utils.normalize_data_type_str(self._oid_type)
vid_type = utils.data_type_to_cpp(self._schema._vid_type)
vdata_type = utils.data_type_to_cpp(self._schema.vdata_type)
edata_type = utils.data_type_to_cpp(self._schema.edata_type)
if self._graph_type == graph_def_pb2.ARROW_PROPERTY:
template = f"vineyard::ArrowFragment<{oid_type},{vid_type}>"
elif self._graph_type == graph_def_pb2.ARROW_PROJECTED:
template = f"gs::ArrowProjectedFragment<{oid_type},{vid_type},{vdata_type},{edata_type}>"
elif self._graph_type == graph_def_pb2.DYNAMIC_PROJECTED:
template = f"gs::DynamicProjectedFragment<{vdata_type},{edata_type}>"
else:
raise ValueError(f"Unsupported graph type: {self._graph_type}")
return template
@property
def vineyard_id(self):
"""Get the vineyard object_id of this graph.
Returns:
str: return vineyard id of this graph
"""
return self._vineyard_id
@property
def session_id(self):
"""Get the currrent session_id.
Returns:
str: Return session id that the graph belongs to.
"""
return self._session.session_id
def detach(self):
"""Detaching a graph makes it being left in vineyard even when the varaible for
this :class:`Graph` object leaves the lexical scope.
The graph can be accessed using the graph's :code:`ObjectID` or its name later.
"""
self._detached = True
def loaded(self):
return self._key is not None
def __str__(self):
v_str = "\n".join([f"VERTEX: {label}" for label in self._v_labels])
relations = []
for i in range(len(self._e_labels)):
relations.extend(
[(self._e_labels[i], src, dst) for src, dst in self._e_relationships[i]]
)
e_str = "\n".join(
[f"EDGE: {label}\tsrc: {src}\tdst: {dst}" for label, src, dst in relations]
)
return f"graphscope.Graph\n{graph_def_pb2.GraphTypePb.Name(self._graph_type)}\n{v_str}\n{e_str}"
def __repr__(self):
return self.__str__()
def unload(self):
"""Unload this graph from graphscope engine."""
if self._session is None:
raise RuntimeError("The graph is not loaded")
if self._key is None:
self._session = None
return
# close interactive instances first
try:
if (
self._interactive_instance_launching_thread is not None
and self._interactive_instance_launching_thread.is_alive()
):
# join raises a RuntimeError if an attempt is made to join the current thread.
# this exception occurs when a object collected by gc mechanism contains a running thread.
if (
threading.current_thread()
!= self._interactive_instance_launching_thread
):
self._interactive_instance_launching_thread.join()
self._close_interactive_instances()
except Exception as e:
logger.error("Failed to close interactive instances: %s" % e)
try:
self._close_learning_instances()
except Exception as e:
logger.error("Failed to close learning instances: %s" % e)
rlt = None
if not self._detached:
rlt = self._session._wrapper(self._graph_node.unload())
self._key = None
self._session = None
return rlt
def _project_to_simple(self):
return self._session._wrapper(self._graph_node._project_to_simple())
def add_column(self, results, selector):
return self._session._wrapper(self._graph_node.add_column(results, selector))
def to_numpy(self, selector, vertex_range=None):
"""Select some elements of the graph and output to numpy.
Args:
selector (str): Select a portion of graph as a numpy.ndarray.
vertex_range(dict, optional): Slice vertices. Defaults to None.
Returns:
`numpy.ndarray`
"""
self._check_unmodified()
return self._session._wrapper(self._graph_node.to_numpy(selector, vertex_range))
def to_dataframe(self, selector, vertex_range=None):
"""Select some elements of the graph and output as a pandas.DataFrame
Args:
selector (dict): Select some portions of graph.
vertex_range (dict, optional): Slice vertices. Defaults to None.
Returns:
`pandas.DataFrame`
"""
self._check_unmodified()
return self._session._wrapper(
self._graph_node.to_dataframe(selector, vertex_range)
)
def to_directed(self):
"""Returns a directed representation of the graph.
Returns:
:class:`Graph`: A directed graph with the same name, same nodes, and
with each edge (u, v, data) replaced by two directed edges (u, v, data) and (v, u, data).
"""
if self._directed:
return self
return self._session._wrapper(self._graph_node.to_directed())
def to_undirected(self):
"""Returns an undirected representation of the digraph.
Returns:
:class:`Graph`: An undirected graph with the same name and nodes and
with edge (u, v, data) if either (u, v, data) or (v, u, data) is in the digraph.
If both edges exist in digraph, they will both be preserved.
You must check and correct for this manually if desired.
"""
if not self._directed:
return self
return self._session._wrapper(self._graph_node.to_undirected())
def is_directed(self):
return self._directed
def is_multigraph(self):
return self._is_multigraph
def _check_unmodified(self):
check_argument(
self.signature == self._saved_signature, "Graph has been modified!"
)
def _attach_interactive_instance(self, instance):
"""Store the instance when a new interactive instance is started.
Args:
instance: interactive instance
"""
self._interactive_instance_list.append(instance)
def _attach_learning_instance(self, instance):
"""Store the instance when a new learning instance is created.
Args:
instance: learning instance
"""
self._learning_instance_list.append(instance)
def save_to(self, path, **kwargs):
"""Serialize graph to a location.
The meta and data of graph is dumped to specified location,
and can be restored by `Graph.deserialize` in other sessions.
Each worker will write a `path_{worker_id}.meta` file and
a `path_{worker_id}` file to storage.
Args:
path (str): supported storages are local, hdfs, oss, s3
"""
try:
import vineyard
import vineyard.io
except ImportError:
raise RuntimeError(
"Saving context to locations requires 'vineyard', "
"please install those two dependencies via "
"\n"
"\n"
" pip3 install vineyard vineyard-io"
"\n"
"\n"
)
sess = self._session
deployment = "kubernetes" if sess.info["type"] == "k8s" else "ssh"
conf = sess.info["engine_config"]
vineyard_endpoint = conf["vineyard_rpc_endpoint"]
vineyard_ipc_socket = conf["vineyard_socket"]
if sess.info["type"] == "k8s":
hosts = [
"{}:{}".format(sess.info["namespace"], s)
for s in sess.info["engine_hosts"].split(",")
]
else: # type == "hosts"
hosts = sess.info["engine_hosts"].split(",")
vineyard.io.serialize(
path,
vineyard.ObjectID(self._vineyard_id),
type="global",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options=kwargs,
deployment=deployment,
hosts=hosts,
)
@classmethod
def load_from(cls, path, sess, **kwargs):
"""Construct a `Graph` by deserialize from `path`.
It will read all serialization files, which is dumped by
`Graph.serialize`.
If any serialize file doesn't exists or broken, will error out.
Args:
path (str): Path contains the serialization files.
sess (`graphscope.Session`): The target session
that the graph will be construct in
Returns:
`Graph`: A new graph object. Schema and data is supposed to be
identical with the one that called serialized method.
"""
try:
import vineyard
import vineyard.io
except ImportError:
raise RuntimeError(
"Saving context to locations requires 'vineyard', "
"please install those two dependencies via "
"\n"
"\n"
" pip3 install vineyard vineyard-io"
"\n"
"\n"
)
deployment = "kubernetes" if sess.info["type"] == "k8s" else "ssh"
conf = sess.info["engine_config"]
vineyard_endpoint = conf["vineyard_rpc_endpoint"]
vineyard_ipc_socket = conf["vineyard_socket"]
if sess.info["type"] == "k8s":
hosts = [
"{}:{}".format(sess.info["namespace"], s)
for s in sess.info["engine_hosts"].split(",")
]
else: # type == "hosts"
hosts = sess.info["engine_hosts"].split(",")
graph_id = vineyard.io.deserialize(
path,
type="global",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options=kwargs,
deployment=deployment,
hosts=hosts,
)
return sess._wrapper(GraphDAGNode(sess, vineyard.ObjectID(graph_id)))
def add_vertices(self, vertices, label="_", properties=None, vid_field=0):
if not self.loaded():
raise RuntimeError("The graph is not loaded")
return self._session._wrapper(
self._graph_node.add_vertices(vertices, label, properties, vid_field)
)
def add_edges(
self,
edges,
label="_",
properties=None,
src_label=None,
dst_label=None,
src_field=0,
dst_field=1,
):
if not self.loaded():
raise RuntimeError("The graph is not loaded")
return self._session._wrapper(
self._graph_node.add_edges(
edges, label, properties, src_label, dst_label, src_field, dst_field
)
)
def project(
self,
vertices: Mapping[str, Union[List[str], None]],
edges: Mapping[str, Union[List[str], None]],
):
if not self.loaded():
raise RuntimeError("The graph is not loaded")
return self._session._wrapper(self._graph_node.project(vertices, edges))
class UnloadedGraph(DAGNode):
"""Unloaded graph node in a DAG."""
def __init__(self, session, op):
self._session = session
self._op = op
# add op to dag
self._session.dag.add_op(self._op)
|
framework_helpers.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions and classes used throughout Monorail."""
import logging
import random
import string
import textwrap
import threading
import time
import traceback
import urllib
import urlparse
from google.appengine.api import app_identity
from third_party import ezt
import settings
from framework import actionlimit
from framework import framework_constants
from framework import template_helpers
from framework import timestr
from framework import urls
from services import client_config_svc
# For random key generation
RANDOM_KEY_LENGTH = 128
RANDOM_KEY_CHARACTERS = string.ascii_letters + string.digits
# params recognized by FormatURL, in the order they will appear in the url
RECOGNIZED_PARAMS = ['can', 'start', 'num', 'q', 'colspec', 'groupby', 'sort',
'show', 'format', 'me', 'table_title', 'projects',
'hotlist_id']
def retry(tries, delay=1, backoff=2):
"""A retry decorator with exponential backoff.
Functions are retried when Exceptions occur.
Args:
tries: int Number of times to retry, set to 0 to disable retry.
delay: float Initial sleep time in seconds.
backoff: float Must be greater than 1, further failures would sleep
delay*=backoff seconds.
"""
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def decorator(func):
def wrapper(*args, **kwargs):
_tries, _delay = tries, delay
_tries += 1 # Ensure we call func at least once.
while _tries > 0:
try:
ret = func(*args, **kwargs)
return ret
except Exception:
_tries -= 1
if _tries == 0:
logging.error('Exceeded maximum number of retries for %s.',
func.__name__)
raise
trace_str = traceback.format_exc()
logging.warning('Retrying %s due to Exception: %s',
func.__name__, trace_str)
time.sleep(_delay)
_delay *= backoff # Wait longer the next time we fail.
return wrapper
return decorator
class PromiseCallback(object):
"""Executes the work of a Promise and then dereferences everything."""
def __init__(self, promise, callback, *args, **kwargs):
self.promise = promise
self.callback = callback
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
self.promise._WorkOnPromise(self.callback, *self.args, **self.kwargs)
finally:
# Make sure we no longer hold onto references to anything.
self.promise = self.callback = self.args = self.kwargs = None
class Promise(object):
"""Class for promises to deliver a value in the future.
A thread is started to run callback(args), that thread
should return the value that it generates, or raise an expception.
p.WaitAndGetValue() will block until a value is available.
If an exception was raised, p.WaitAndGetValue() will re-raise the
same exception.
"""
def __init__(self, callback, *args, **kwargs):
"""Initialize the promise and immediately call the supplied function.
Args:
callback: Function that takes the args and returns the promise value.
*args: Any arguments to the target function.
**kwargs: Any keyword args for the target function.
"""
self.has_value = False
self.value = None
self.event = threading.Event()
self.exception = None
promise_callback = PromiseCallback(self, callback, *args, **kwargs)
# Execute the callback in another thread.
promise_thread = threading.Thread(target=promise_callback)
promise_thread.start()
def _WorkOnPromise(self, callback, *args, **kwargs):
"""Run callback to compute the promised value. Save any exceptions."""
try:
self.value = callback(*args, **kwargs)
except Exception as e:
trace_str = traceback.format_exc()
logging.info('Exception while working on promise: %s\n', trace_str)
# Add the stack trace at this point to the exception. That way, in the
# logs, we can see what happened further up in the call stack
# than WaitAndGetValue(), which re-raises exceptions.
e.pre_promise_trace = trace_str
self.exception = e
finally:
self.has_value = True
self.event.set()
def WaitAndGetValue(self):
"""Block until my value is available, then return it or raise exception."""
self.event.wait()
if self.exception:
raise self.exception # pylint: disable=raising-bad-type
return self.value
def FormatAbsoluteURLForDomain(
host, project_name, servlet_name, scheme='https', **kwargs):
"""A variant of FormatAbsoluteURL for when request objects are not available.
Args:
host: string with hostname and optional port, e.g. 'localhost:8080'.
project_name: the destination project name, if any.
servlet_name: site or project-local url fragement of dest page.
scheme: url scheme, e.g., 'http' or 'https'.
**kwargs: additional query string parameters may be specified as named
arguments to this function.
Returns:
A full url beginning with 'http[s]://'.
"""
path_and_args = FormatURL(None, servlet_name, **kwargs)
if host:
domain_port = host.split(':')
domain_port[0] = GetPreferredDomain(domain_port[0])
host = ':'.join(domain_port)
absolute_domain_url = '%s://%s' % (scheme, host)
if project_name:
return '%s/p/%s%s' % (absolute_domain_url, project_name, path_and_args)
return absolute_domain_url + path_and_args
def FormatAbsoluteURL(
mr, servlet_name, include_project=True, project_name=None,
scheme=None, copy_params=True, **kwargs):
"""Return an absolute URL to a servlet with old and new params.
Args:
mr: info parsed from the current request.
servlet_name: site or project-local url fragement of dest page.
include_project: if True, include the project home url as part of the
destination URL (as long as it is specified either in mr
or as the project_name param.)
project_name: the destination project name, to override
mr.project_name if include_project is True.
scheme: either 'http' or 'https', to override mr.request.scheme.
copy_params: if True, copy well-known parameters from the existing request.
**kwargs: additional query string parameters may be specified as named
arguments to this function.
Returns:
A full url beginning with 'http[s]://'. The destination URL will be in
the same domain as the current request.
"""
path_and_args = FormatURL(
mr if copy_params else None, servlet_name, **kwargs)
scheme = scheme or mr.request.scheme
project_base = ''
if include_project:
project_base = '/p/%s' % (project_name or mr.project_name)
return '%s://%s%s%s' % (scheme, mr.request.host, project_base, path_and_args)
def FormatCanonicalURL(mr, retain_query):
"""Return an absolute canonical URL based on a request.
Args:
mr: info parsed from the current request.
retain_query: iterable of querystring keys to retain.
Returns:
A full url beginning with 'http[s]://'. The URL will be in the configured
canonical domain with the same path as for the current request.
"""
host = GetPreferredDomain(mr.request.host)
qs = ''
if mr.request.query_string:
# we only retain a single value for retain_query - multi-valued
# parameters not currently handled.
qs_params = [(k, mr.request.params.get(k))
for k in mr.request.GET.keys() if k in retain_query]
qs = _FormatQueryString('', qs_params)
return '%s://%s%s%s' % (mr.request.scheme, host, mr.request.path, qs)
def FormatMovedProjectURL(mr, moved_to):
"""Return a transformation of the given url into the given project.
Args:
mr: common information parsed from the HTTP request.
moved_to: A string from a project's moved_to field that matches
framework_bizobj.RE_PROJECT_NAME.
Returns:
The url transposed into the given destination project.
"""
project_name = moved_to
_, _, path, parameters, query, fragment_identifier = urlparse.urlparse(
mr.current_page_url)
# Strip off leading "/p/<moved from project>"
path = '/' + path.split('/', 3)[3]
rest_of_url = urlparse.urlunparse(
('', '', path, parameters, query, fragment_identifier))
return '/p/%s%s' % (project_name, rest_of_url)
def FormatURL(mr, servlet_path, **kwargs):
"""Return a project relative URL to a servlet with old and new params."""
# Standard params not overridden in **kwargs come first, followed by kwargs.
# The exception is the 'id' param. If present then the 'id' param always comes
# first. See bugs.chromium.org/p/monorail/issues/detail?id=374
all_params = []
if kwargs.get('id'):
all_params.append(('id', kwargs['id']))
if mr:
all_params.extend(
(name, mr.GetParam(name)) for name in RECOGNIZED_PARAMS
if name not in kwargs)
all_params.extend(
# Ignore the 'id' param since we already added it above.
sorted([kwarg for kwarg in kwargs.items() if kwarg[0] != 'id']))
return _FormatQueryString(servlet_path, all_params)
def _FormatQueryString(url, params):
"""URLencode a list of parameters and attach them to the end of a URL."""
param_string = '&'.join(
'%s=%s' % (name, urllib.quote(unicode(value).encode('utf-8')))
for name, value in params if value is not None)
if not param_string:
qs_start_char = ''
elif '?' in url:
qs_start_char = '&'
else:
qs_start_char = '?'
return '%s%s%s' % (url, qs_start_char, param_string)
def WordWrapSuperLongLines(s, max_cols=100):
"""Reformat input that was not word-wrapped by the browser.
Args:
s: the string to be word-wrapped, it may have embedded newlines.
max_cols: int maximum line length.
Returns:
Wrapped text string.
Rather than wrap the whole thing, we only wrap super-long lines and keep
all the reasonable lines formated as-is.
"""
lines = [textwrap.fill(line, max_cols) for line in s.splitlines()]
wrapped_text = '\n'.join(lines)
# The split/join logic above can lose one final blank line.
if s.endswith('\n') or s.endswith('\r'):
wrapped_text += '\n'
return wrapped_text
def StaticCacheHeaders():
"""Returns HTTP headers for static content, based on the current time."""
year_from_now = int(time.time()) + framework_constants.SECS_PER_YEAR
headers = [
('Cache-Control',
'max-age=%d, private' % framework_constants.SECS_PER_YEAR),
('Last-Modified', timestr.TimeForHTMLHeader()),
('Expires', timestr.TimeForHTMLHeader(when=year_from_now)),
]
logging.info('static headers are %r', headers)
return headers
def ComputeListDeltas(old_list, new_list):
"""Given an old and new list, return the items added and removed.
Args:
old_list: old list of values for comparison.
new_list: new list of values for comparison.
Returns:
Two lists: one with all the values added (in new_list but was not
in old_list), and one with all the values removed (not in new_list
but was in old_lit).
"""
if old_list == new_list:
return [], [] # A common case: nothing was added or removed.
added = set(new_list)
added.difference_update(old_list)
removed = set(old_list)
removed.difference_update(new_list)
return list(added), list(removed)
def GetRoleName(effective_ids, project):
"""Determines the name of the role a member has for a given project.
Args:
effective_ids: set of user IDs to get the role name for.
project: Project PB containing the different the different member lists.
Returns:
The name of the role.
"""
if not effective_ids.isdisjoint(project.owner_ids):
return 'Owner'
if not effective_ids.isdisjoint(project.committer_ids):
return 'Committer'
if not effective_ids.isdisjoint(project.contributor_ids):
return 'Contributor'
return None
def GetHotlistRoleName(effective_ids, hotlist):
"""Determines the name of the role a member has for a given hotlist."""
if not effective_ids.isdisjoint(hotlist.owner_ids):
return 'Owner'
if not effective_ids.isdisjoint(hotlist.editor_ids):
return 'Editor'
if not effective_ids.isdisjoint(hotlist.follower_ids):
return 'Follower'
return None
class UserSettings(object):
"""Abstract class providing static methods for user settings forms."""
@classmethod
def GatherUnifiedSettingsPageData(
cls, logged_in_user_id, settings_user_view, settings_user):
"""Gather EZT variables needed for the unified user settings form.
Args:
logged_in_user_id: The user ID of the acting user.
settings_user_view: The UserView of the target user.
settings_user: The User PB of the target user.
Returns:
A dictionary giving the names and values of all the variables to
be exported to EZT to support the unified user settings form template.
"""
def ActionLastReset(action_limit):
"""Return a formatted time string for the last action limit reset."""
if action_limit:
return time.asctime(time.localtime(action_limit.reset_timestamp))
return 'Never'
def DefaultLifetimeLimit(action_type):
"""Return the deault lifetime limit for the give type of action."""
return actionlimit.ACTION_LIMITS[action_type][3]
def DefaultPeriodSoftLimit(action_type):
"""Return the deault period soft limit for the give type of action."""
return actionlimit.ACTION_LIMITS[action_type][1]
def DefaultPeriodHardLimit(action_type):
"""Return the deault period jard limit for the give type of action."""
return actionlimit.ACTION_LIMITS[action_type][2]
project_creation_lifetime_limit = (
(settings_user.project_creation_limit and
settings_user.project_creation_limit.lifetime_limit) or
DefaultLifetimeLimit(actionlimit.PROJECT_CREATION))
project_creation_soft_limit = (
(settings_user.project_creation_limit and
settings_user.project_creation_limit.period_soft_limit) or
DefaultPeriodSoftLimit(actionlimit.PROJECT_CREATION))
project_creation_hard_limit = (
(settings_user.project_creation_limit and
settings_user.project_creation_limit.period_hard_limit) or
DefaultPeriodHardLimit(actionlimit.PROJECT_CREATION))
issue_comment_lifetime_limit = (
(settings_user.issue_comment_limit and
settings_user.issue_comment_limit.lifetime_limit) or
DefaultLifetimeLimit(actionlimit.ISSUE_COMMENT))
issue_comment_soft_limit = (
(settings_user.issue_comment_limit and
settings_user.issue_comment_limit.period_soft_limit) or
DefaultPeriodSoftLimit(actionlimit.ISSUE_COMMENT))
issue_comment_hard_limit = (
(settings_user.issue_comment_limit and
settings_user.issue_comment_limit.period_hard_limit) or
DefaultPeriodHardLimit(actionlimit.ISSUE_COMMENT ))
issue_attachment_lifetime_limit = (
(settings_user.issue_attachment_limit and
settings_user.issue_attachment_limit.lifetime_limit) or
DefaultLifetimeLimit(actionlimit.ISSUE_ATTACHMENT))
issue_attachment_soft_limit = (
(settings_user.issue_attachment_limit and
settings_user.issue_attachment_limit.period_soft_limit) or
DefaultPeriodSoftLimit(actionlimit.ISSUE_ATTACHMENT))
issue_attachment_hard_limit = (
(settings_user.issue_attachment_limit and
settings_user.issue_attachment_limit.period_hard_limit) or
DefaultPeriodHardLimit(actionlimit.ISSUE_ATTACHMENT))
issue_bulk_edit_lifetime_limit = (
(settings_user.issue_bulk_edit_limit and
settings_user.issue_bulk_edit_limit.lifetime_limit) or
DefaultLifetimeLimit(actionlimit.ISSUE_BULK_EDIT))
issue_bulk_edit_soft_limit = (
(settings_user.issue_bulk_edit_limit and
settings_user.issue_bulk_edit_limit.period_soft_limit) or
DefaultPeriodSoftLimit(actionlimit.ISSUE_BULK_EDIT))
issue_bulk_edit_hard_limit = (
(settings_user.issue_bulk_edit_limit and
settings_user.issue_bulk_edit_limit.period_hard_limit) or
DefaultPeriodHardLimit(actionlimit.ISSUE_BULK_EDIT))
api_request_lifetime_limit = (
(settings_user.api_request_limit and
settings_user.api_request_limit.lifetime_limit) or
DefaultLifetimeLimit(actionlimit.API_REQUEST))
api_request_soft_limit = (
(settings_user.api_request_limit and
settings_user.api_request_limit.period_soft_limit) or
DefaultPeriodSoftLimit(actionlimit.API_REQUEST))
api_request_hard_limit = (
(settings_user.api_request_limit and
settings_user.api_request_limit.period_hard_limit) or
DefaultPeriodHardLimit(actionlimit.API_REQUEST))
return {
'settings_user': settings_user_view,
'settings_user_pb': template_helpers.PBProxy(settings_user),
'settings_user_is_banned': ezt.boolean(settings_user.banned),
'settings_user_ignore_action_limits': (
ezt.boolean(settings_user.ignore_action_limits)),
'self': ezt.boolean(logged_in_user_id == settings_user_view.user_id),
'project_creation_reset': (
ActionLastReset(settings_user.project_creation_limit)),
'issue_comment_reset': (
ActionLastReset(settings_user.issue_comment_limit)),
'issue_attachment_reset': (
ActionLastReset(settings_user.issue_attachment_limit)),
'issue_bulk_edit_reset': (
ActionLastReset(settings_user.issue_bulk_edit_limit)),
'api_request_reset': (
ActionLastReset(settings_user.api_request_limit)),
'project_creation_lifetime_limit': project_creation_lifetime_limit,
'project_creation_soft_limit': project_creation_soft_limit,
'project_creation_hard_limit': project_creation_hard_limit,
'issue_comment_lifetime_limit': issue_comment_lifetime_limit,
'issue_comment_soft_limit': issue_comment_soft_limit,
'issue_comment_hard_limit': issue_comment_hard_limit,
'issue_attachment_lifetime_limit': issue_attachment_lifetime_limit,
'issue_attachment_soft_limit': issue_attachment_soft_limit,
'issue_attachment_hard_limit': issue_attachment_hard_limit,
'issue_bulk_edit_lifetime_limit': issue_bulk_edit_lifetime_limit,
'issue_bulk_edit_soft_limit': issue_bulk_edit_soft_limit,
'issue_bulk_edit_hard_limit': issue_bulk_edit_hard_limit,
'api_request_lifetime_limit': api_request_lifetime_limit,
'api_request_soft_limit': api_request_soft_limit,
'api_request_hard_limit': api_request_hard_limit,
'profile_url_fragment': (
settings_user_view.profile_url[len('/u/'):]),
'preview_on_hover': ezt.boolean(settings_user.preview_on_hover),
}
@classmethod
def ProcessBanForm(
cls, cnxn, user_service, post_data, user_id, user):
"""Process the posted form data from the ban user form.
Args:
cnxn: connection to the SQL database.
user_service: An instance of UserService for saving changes.
post_data: The parsed post data from the form submission request.
user_id: The user id of the target user.
user: The user PB of the target user.
"""
user_service.UpdateUserBan(
cnxn, user_id, user, is_banned='banned' in post_data,
banned_reason=post_data.get('banned_reason', ''))
@classmethod
def ProcessSettingsForm(
cls, cnxn, user_service, post_data, user_id, user, admin=False):
"""Process the posted form data from the unified user settings form.
Args:
cnxn: connection to the SQL database.
user_service: An instance of UserService for saving changes.
post_data: The parsed post data from the form submission request.
user_id: The user id of the target user.
user: The user PB of the target user.
admin: Whether settings reserved for admins are supported.
"""
obscure_email = 'obscure_email' in post_data
kwargs = {}
if admin:
kwargs.update(is_site_admin='site_admin' in post_data,
ignore_action_limits='ignore_action_limits' in post_data)
kwargs.update(is_banned='banned' in post_data,
banned_reason=post_data.get('banned_reason', ''))
# action limits
action_limit_updates = {}
for action_name in actionlimit.ACTION_TYPE_NAMES.iterkeys():
reset_input = 'reset_' + action_name
lifetime_input = action_name + '_lifetime_limit'
soft_input = action_name + '_soft_limit'
hard_input = action_name + '_hard_limit'
pb_getter = action_name + '_limit'
old_lifetime_limit = getattr(user, pb_getter).lifetime_limit
old_soft_limit = getattr(user, pb_getter).period_soft_limit
old_hard_limit = getattr(user, pb_getter).period_hard_limit
# Try and get the new limit from post data.
# If the user doesn't use an integer, act as if no change requested.
def _GetLimit(post_data, limit_input, old_limit):
try:
new_limit = int(post_data[limit_input])
except (KeyError, ValueError):
new_limit = old_limit
return new_limit
new_lifetime_limit = _GetLimit(post_data, lifetime_input,
old_lifetime_limit)
new_soft_limit = _GetLimit(post_data, soft_input,
old_soft_limit)
new_hard_limit = _GetLimit(post_data, hard_input,
old_hard_limit)
if ((new_lifetime_limit >= 0 and
new_lifetime_limit != old_lifetime_limit) or
(new_soft_limit >= 0 and new_soft_limit != old_soft_limit) or
(new_hard_limit >= 0 and new_hard_limit != old_hard_limit)):
action_limit_updates[action_name] = (
new_soft_limit, new_hard_limit, new_lifetime_limit)
elif reset_input in post_data:
action_limit_updates[action_name] = None
kwargs.update(action_limit_updates=action_limit_updates)
user_service.UpdateUserSettings(
cnxn, user_id, user, notify='notify' in post_data,
notify_starred='notify_starred' in post_data,
email_compact_subject='email_compact_subject' in post_data,
email_view_widget='email_view_widget' in post_data,
notify_starred_ping='notify_starred_ping' in post_data,
preview_on_hover='preview_on_hover' in post_data,
obscure_email=obscure_email,
vacation_message=post_data.get('vacation_message', ''),
**kwargs)
def GetHostPort():
"""Get string domain name and port number."""
app_id = app_identity.get_application_id()
if ':' in app_id:
domain, app_id = app_id.split(':')
else:
domain = ''
if domain.startswith('google'):
hostport = '%s.googleplex.com' % app_id
else:
hostport = '%s.appspot.com' % app_id
return GetPreferredDomain(hostport)
def IssueCommentURL(hostport, project, local_id, seq_num=None):
"""Return a URL pointing directly to the specified comment."""
detail_url = FormatAbsoluteURLForDomain(
hostport, project.project_name, urls.ISSUE_DETAIL, id=local_id)
if seq_num:
detail_url += '#c%d' % seq_num
return detail_url
def MurmurHash3_x86_32(key, seed=0x0):
"""Implements the x86/32-bit version of Murmur Hash 3.0.
MurmurHash3 is written by Austin Appleby, and is placed in the public
domain. See https://code.google.com/p/smhasher/ for details.
This pure python implementation of the x86/32 bit version of MurmurHash3 is
written by Fredrik Kihlander and also placed in the public domain.
See https://github.com/wc-duck/pymmh3 for details.
The MurmurHash3 algorithm is chosen for these reasons:
* It is fast, even when implemented in pure python.
* It is remarkably well distributed, and unlikely to cause collisions.
* It is stable and unchanging (any improvements will be in MurmurHash4).
* It is well-tested, and easily usable in other contexts (such as bulk
data imports).
Args:
key (string): the data that you want hashed
seed (int): An offset, treated as essentially part of the key.
Returns:
A 32-bit integer (can be interpreted as either signed or unsigned).
"""
key = bytearray(key.encode('utf-8'))
def fmix(h):
h ^= h >> 16
h = (h * 0x85ebca6b) & 0xFFFFFFFF
h ^= h >> 13
h = (h * 0xc2b2ae35) & 0xFFFFFFFF
h ^= h >> 16
return h;
length = len(key)
nblocks = int(length / 4)
h1 = seed;
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in xrange(0, nblocks * 4, 4):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = c1 * k1 & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF
k1 = (c2 * k1) & 0xFFFFFFFF;
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size != 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
return fmix( h1 ^ length )
def MakeRandomKey(length=RANDOM_KEY_LENGTH, chars=RANDOM_KEY_CHARACTERS):
"""Return a string with lots of random characters."""
chars = [random.choice(chars) for _ in range(length)]
return ''.join(chars)
def IsServiceAccount(email):
"""Return a boolean value whether this email is a service account."""
if email.endswith('gserviceaccount.com'):
return True
_, client_emails = (
client_config_svc.GetClientConfigSvc().GetClientIDEmails())
return email in client_emails
def GetPreferredDomain(domain):
"""Get preferred domain to display.
The preferred domain replaces app_id for default version of monorail-prod
and monorail-staging.
"""
return settings.preferred_domains.get(domain, domain)
|
main.py
|
import cv2
import imutils
import time
import threading
import serial
import RPi.GPIO as GPIO
from bluetooth import *
from serial.serialutil import SerialException
# -------------------변수 선언 부분-------------------
port = "/dev/ttyACM0"
reset_timer_seconds = -1
angles = [150, 120, 130]
arduino = serial.Serial(port, 115200, timeout=1)
haarcascade_file = '/home/pi/ArduinoRobotArm_MDP/RaspberryPi/haarcascade/haarcascade_frontalface_alt2.xml'
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(3, GPIO.OUT)
server_socket = BluetoothSocket(RFCOMM)
# -------------------타이머 쓰레드 부분-------------------
def reset_timer():
global reset_timer_seconds, angles
while True:
if reset_timer_seconds > 0:
reset_timer_seconds -= 1
time.sleep(1)
if reset_timer_seconds == 0:
angles = [150, 120, 130]
print("자리 초기화")
reset_timer_seconds = -1
# -------------------블루투스 함수 부분-------------------
def get_bluetooth():
global angles, reset_timer_seconds
server_socket.bind(("", 1))
server_socket.listen(1)
client_socket, address = server_socket.accept()
print("Accepted connection from ", address)
client_socket.send("bluetooth connected!")
while True:
data = client_socket.recv(1024).decode('utf-8')
print(data)
X, Y, Z = data.split(",")
print(f"X: {X}, Y: {Y}, Z: {Z}")
angles = list(map(int, [X, Y, Z]))
reset_timer_seconds = -1
# -------------------모터 제어 함수 부분-------------------
"""
멀티스레드로 send_serial과 read_serial를 상주시켜서 계속해서 시리얼을 주고 받음.
"""
def send_serial(arduino):
global angles
while True:
c = str(int(angles[0])) + "," + str(int(angles[1])) + "," + str(int(angles[2])) # "각도1,각도2,각도3" 꼴로 전송
c = c.encode('utf-8')
try:
arduino.write(c)
time.sleep(0.25) # 시리얼 앞 메시지와 최소 간격 : 0.25초
except SerialException:
print("예외 발생")
def read_serial(arduino):
while True:
if arduino.readable():
val = arduino.readline()
val = val.decode()[:len(val) - 1]
if val != '':
pass
# print(val)
# -------------------OpenCV 함수 부분-------------------
faceCascade = cv2.CascadeClassifier(haarcascade_file) # 얼굴 학습 파일 불러오기
# eyeCascade = cv2.CascadeClassifier('./haarcascade/haarcascade_eye.xml')
def detect(gray, frame):
global reset_timer_seconds
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.03, minNeighbors=5, minSize=(
100, 100), flags=cv2.CASCADE_SCALE_IMAGE)
face_count = len(faces)
if face_count == 0:
GPIO.output(2, True) # LED 빨간색 점등, 초록색 소등
GPIO.output(3, False)
elif face_count == 1:
GPIO.output(2, False) # LED 빨간색 소등, 초록색 점등
GPIO.output(3, True)
for (x, y, w, h) in faces:
reset_timer_seconds = 10
center_x = int(x + w / 2) # 얼굴 중앙 계산
center_y = int(y + h / 2)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) # 얼굴 시각화
cv2.line(frame, (center_x, center_y), (center_x, center_y), (0, 255, 0), 5) # 얼굴 중앙 시각화
# face_gray = gray[y:y + h, x:x + w]
# face_color = frame[y:y + h, x:x + w]
if center_x < 110:
print("왼쪽으로 치우침")
if angles[0] > 10:
angles[0] -= 0.5
elif center_x > 210:
print("오른쪽으로 치우침")
if angles[0] < 170:
angles[0] += 0.5
if center_y < 60:
print("위로 치우침")
if angles[1] < 170:
angles[1] += 0.5
if angles[2] < 170:
angles[2] += 0.5
elif center_y > 120:
print("아래로 치우침")
if angles[1] > 10:
angles[1] -= 1
if angles[2] > 10:
angles[2] -= 0.5
else:
GPIO.output(2, True) # LED 빨간색 점등, 초록색 소등
GPIO.output(3, False)
print(f'{face_count}개의 얼굴이 감지됨')
return frame
video_capture = cv2.VideoCapture(0)
prevTime = 0
# -------------------초기화 부분-------------------
read_thread = threading.Thread(target=read_serial, args=(arduino,))
read_thread.start()
send_thread = threading.Thread(target=send_serial, args=(arduino,))
send_thread.start()
timer_thread = threading.Thread(target=reset_timer)
timer_thread.start()
bluetooth_thread = threading.Thread(target=get_bluetooth)
bluetooth_thread.start()
# -------------------반복문 부분-------------------
while True:
_, frame = video_capture.read()
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1 / sec
fps = "FPS : %0.1f" % fps
frame = imutils.resize(cv2.flip(frame, 1), width=320, height=240) # 라즈베리파이 연산력 부족으로 해상도 리사이즈
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.rectangle(frame, (110, 120), (210, 60), (0, 0, 255), 2)
cv2.putText(canvas, fps, (0, 13),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))
cv2.imshow('canvas', canvas)
if cv2.waitKey(30) == 27: # esc 눌렀을때 종료
break
video_capture.release()
cv2.destroyAllWindows()
|
trader.py
|
# 0.00257886 BTC @ 210817
from __future__ import print_function
from time import time
from time import sleep
import logging
from operator import itemgetter
from pymongo import MongoClient
import pandas as pd
import numpy as np
import json, requests, re, multiprocessing, subprocess
from decimal import *
global buystr
global sellstr
logger = logging.getLogger(__name__)
# Please dont use this version... THANKS.. it does NOT work.
def rsi(df, window, targetcol='weightedAverage', colname='rsi'):
""" Calculates the Relative Strength Index (RSI) from a pandas dataframe
http://stackoverflow.com/a/32346692/3389859
"""
series = df[targetcol]
delta = series.diff().dropna()
u = delta * 0
d = u.copy()
u[delta > 0] = delta[delta > 0]
d[delta < 0] = -delta[delta < 0]
# first value is sum of avg gains
u[u.index[window - 1]] = np.mean(u[:window])
u = u.drop(u.index[:(window - 1)])
# first value is sum of avg losses
d[d.index[window - 1]] = np.mean(d[:window])
d = d.drop(d.index[:(window - 1)])
rs = u.ewm(com=window - 1,
ignore_na=False,
min_periods=0,
adjust=False).mean() / d.ewm(com=window - 1,
ignore_na=False,
min_periods=0,
adjust=False).mean()
df[colname] = 100 - 100 / (1 + rs)
return df
def sma(df, window, targetcol='weightedAverage', colname='sma'):
""" Calculates Simple Moving Average on a 'targetcol' in a pandas dataframe
"""
df[colname] = df[targetcol].rolling(window=window, center=False).mean()
return df
def ema(df, window, targetcol='weightedAverage', colname='ema', **kwargs):
""" Calculates Expodential Moving Average on a 'targetcol' in a pandas
dataframe """
df[colname] = df[targetcol].ewm(
span=window,
min_periods=kwargs.get('min_periods', 1),
adjust=kwargs.get('adjust', True),
ignore_na=kwargs.get('ignore_na', False)
).mean()
return df
def macd(df, fastcol='emafast', slowcol='emaslow', colname='macd'):
""" Calculates the differance between 'fastcol' and 'slowcol' in a pandas
dataframe """
df[colname] = df[fastcol] - df[slowcol]
return df
def bbands(df, window, targetcol='weightedAverage', stddev=2.0):
""" Calculates Bollinger Bands for 'targetcol' of a pandas dataframe """
if not 'sma' in df:
df = sma(df, window, targetcol)
df['bbtop'] = df['sma'] + stddev * df[targetcol].rolling(
min_periods=window,
window=window,
center=False).std()
df['bbbottom'] = df['sma'] - stddev * df[targetcol].rolling(
min_periods=window,
window=window,
center=False).std()
df['bbrange'] = df['bbtop'] - df['bbbottom']
df['bbpercent'] = ((df[targetcol] - df['bbbottom']) / df['bbrange']) - 0.5
return df
class Chart(object):
""" Saves and retrieves chart data to/from mongodb. It saves the chart
based on candle size, and when called, it will automaticly update chart
data if needed using the timestamp of the newest candle to determine how
much data needs to be updated """
def __init__(self, api, pair, **kwargs):
"""
api = poloniex api object
pair = market pair
period = time period of candles (default: 5 Min)
"""
self.pair = pair
self.api = api
self.period = kwargs.get('period', self.api.MINUTE * 5)
self.db = MongoClient()['poloniex']['%s_%s_chart' %
(self.pair, str(self.period))]
def __call__(self, size=0):
""" Returns raw data from the db, updates the db if needed """
# get old data from db
old = sorted(list(self.db.find()), key=itemgetter('_id'))
try:
# get last candle
last = old[-1]
except:
# no candle found, db collection is empty
last = False
# no entrys found, get last year of data to fill the db
if not last:
logger.warning('%s collection is empty!',
'%s_%s_chart' % (self.pair, str(self.period)))
new = self.api.returnChartData(self.pair,
period=self.period,
start=time() - self.api.YEAR)
# we have data in db already
else:
new = self.api.returnChartData(self.pair,
period=self.period,
start=int(last['_id']))
# add new candles
updateSize = len(new)
logger.info('Updating %s with %s new entrys!',
self.pair + '-' + str(self.period), str(updateSize))
# show progress
for i in range(updateSize):
print("\r%s/%s" % (str(i + 1), str(updateSize)), end=" complete ")
date = new[i]['date']
del new[i]['date']
self.db.update_one({'_id': date}, {"$set": new[i]}, upsert=True)
print('')
logger.debug('Getting chart data from db')
# return data from db
return sorted(list(self.db.find()), key=itemgetter('_id'))[-size:]
def dataFrame(self, size=0, window=120):
# get data from db
data = self.__call__(size)
# make dataframe
df = pd.DataFrame(data)
# format dates
df['date'] = [pd.to_datetime(c['_id'], unit='s') for c in data]
# del '_id'
del df['_id']
# set 'date' col as index
df.set_index('date', inplace=True)
# calculate/add sma and bbands
df = bbands(df, window)
# add slow ema
df = ema(df, window // 2, colname='emaslow')
# add fast ema
df = ema(df, window // 4, colname='emafast')
# add macd
df = macd(df)
# add rsi
df = rsi(df, window // 5)
# add candle body and shadow size
df['bodysize'] = df['open'] - df['close']
df['shadowsize'] = df['high'] - df['low']
# add percent change
df['percentChange'] = df['close'].pct_change()
return df
def run():
while True:
global buystr
global sellstr
# Below is the coin list, please follow its format... I choose coins with volume above 1000 daily.
word_list = ["BTC_ETH", "BTC_BCH", "BTC_XMR", "BTC_XRP", "BTC_DASH", "BTC_LTC", "BTC_LSK", "BTC_DGB", "BTC_XEM", "BTC_ZRX", "BTC_STR", "BTC_ETC", "BTC_FCT", "BTC_BTS", "BTC_ZEC", "BTC_SC"]
# Let's just use 5 for now... keeps things going quicker.
for word in word_list:
# initiate the data calculations
df = Chart(api, word).dataFrame()
df.dropna(inplace=False)
data = (df.tail(2)[['percentChange']])
#Turn Data into a string
txt=str(data)
print(data)
# search for floats in the returned data
re1='.*?' # Non-greedy match on filler
re2='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1
re3='.*?' # Non-greedy match on filler
re4='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 2
rg = re.compile(re1+re2+re3+re4,re.IGNORECASE|re.DOTALL)
m = rg.search(txt)
# Search for floats that are too small to trade decision on
re1='.*?' # Non-greedy match on filler
re2='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1
re3='((?:[a-z][a-z0-9_]*))' # Variable Name 1
re4='([-+]\\d+)' # Integer Number 1
re5='.*?' # Non-greedy match on filler
re6='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 2
re7='((?:[a-z][a-z0-9_]*))' # Variable Name 2
re8='([-+]\\d+)' # Integer Number 2
rg = re.compile(re1+re2+re3+re4+re5+re6+re7+re8,re.IGNORECASE|re.DOTALL)
deny = rg.search(txt)
# Two if statements to decide what will happen... buy/sell/deny trade on limited data
if m:
if deny:
print(word + ' -- Percent changed too small to care')
else:
# Set the floats from the data that are real numbers
float1=m.group(1)
float2=m.group(2)
float3 = float(float1)
float4 = float(float2)
# Calculate the difference in the two numbers
diff = Decimal(float(float4 - float3))
diffstr = str(diff)
if (Decimal(float3) == 0):
print(word + ' -- Not Enough Data On This Measurement')
elif (Decimal(float4) == 0):
print(word + ' -- Not Enough Data On This Measurement')
# If Macd is not positive, then sell
elif (0 < Decimal(float3)):
print('Trigger 1 reached' + word)
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
if (0 < Decimal(float4)):
print('Trigger 2 reached')
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
if (Decimal(diff) > 0.0002):
print('Trigger 3 reached')
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
if (Decimal(float4) > Decimal(float3)):
print('Final trigger reached')
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
ke1=word.replace('BTC_', '')
ke3='-BTC'
ke8=ke1+ke3
buystr=ke8
m = buy()
m.start()
elif (0 > Decimal(float3)):
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
ke1=word.replace('BTC_', '')
ke3='-BTC'
ke10=ke1+ke3
sellstr=ke10
m = sell()
m.start()
elif (Decimal(diff) < -0.0001):
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
ke1=word.replace('BTC_', '')
ke3='-BTC'
ke10=ke1+ke3
sellstr=ke10
m = sell()
m.start()
else:
print('Waiting...')
print(word, Decimal(float3), Decimal(float4))
print('Current diff is: ' + diffstr)
def buy():
return multiprocessing.Process(target = buybuy , args = ())
def buybuy():
global buystr
variable=str(buystr)
variablestr=str(variable)
print('Starting BUY Of: ' + variablestr + ' -- Please always sell 100% and buy with low percentage.')
process1='./zenbot.sh buy --order_adjust_time=10000 --markup_pct=0 --debug poloniex.' + variablestr
subprocess.Popen(process1,shell=True)
def sell():
return multiprocessing.Process(target = sellsell , args = ())
def sellsell():
global sellstr
variable=str(sellstr)
variablestr=str(variable)
print('Starting SELL Of: ' + variablestr + ' -- Please always sell 100% and buy with low percentage.')
process1='./zenbot.sh sell --order_adjust_time=10000 --markup_pct=0 --debug poloniex.' + variablestr
subprocess.Popen(process1,shell=True)
if __name__ == '__main__':
from poloniex import Poloniex
#logging.basicConfig(level=logging.DEBUG)
#logging.getLogger("poloniex").setLevel(logging.INFO)
#logging.getLogger('requests').setLevel(logging.ERROR)
api = Poloniex(jsonNums=float)
run()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: Carbon
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: Carbon
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
A Note About Curl
=================
When sending passwords and data that might need to be urlencoded, you must set
the ``-d`` flag to indicate the content type, and the ``--data-urlencode`` flag
to urlencode the input.
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
return out(ret)
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending positional args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.moves.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.moves.StringIO(pub_key))
tarball.addfile(priv_key_file, six.moves.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.info(saltEvent.tag)
console.debug(saltEvent.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor():
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
return torch.sparse_coo_tensor(i, v, (2, 3))
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with torch.autograd.profiler.profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with torch.autograd.profiler.profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with torch.autograd.profiler.profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with torch.autograd.profiler.profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with torch.autograd.profiler.profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with torch.autograd.profiler.profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with torch.autograd.profiler.profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with torch.autograd.profiler.profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with torch.autograd.profiler.profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with torch.autograd.profiler.profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with torch.autograd.profiler.profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with torch.autograd.profiler.profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
AttributeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with torch.autograd.profiler.profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
yahoo_weather.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .. import bar
import base
import urllib
import urllib2
from xml.dom import minidom
import gobject
import threading
try:
import json
except ImportError:
import simplejson as json
QUERY_URL = 'http://query.yahooapis.com/v1/public/yql?'
WEATHER_URL = 'http://weather.yahooapis.com/forecastrss?'
WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0'
class YahooWeather(base._TextBox):
''' A weather widget, data provided by the Yahoo! Weather API
Format options:
astronomy_sunrise, astronomy_sunset
atmosphere_humidity, atmosphere_visibility,
atmosphere_pressure, atmosphere_rising
condition_text, condition_code, condition_temp, condition_date
location_city. location_region, location_country
units_temperature, units_distance, units_pressure, units_speed
wind_chill, wind_direction, wind_speed
'''
defaults = [
## One of (location, woeid) must be set.
(
'location',
None,
'Location to fetch weather for. Ignored if woeid is set.'
),
(
'woeid',
None,
'Where On Earth ID. Auto-calculated if location is set.'
),
(
'format',
'{location_city}: {condition_temp} °{units_temperature}',
'Display format'
),
('metric', True, 'True to use metric/C, False to use imperial/F'),
('update_interval', 600, 'Update interval in seconds'),
('up', '^', 'symbol for rising atmospheric pressure'),
('down', 'v', 'symbol for falling atmospheric pressure'),
('steady', 's', 'symbol for steady atmospheric pressure'),
]
def __init__(self, **config):
base._TextBox.__init__(self, 'N/A', width=bar.CALCULATED, **config)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.add_defaults(YahooWeather.defaults)
self.timeout_add(self.update_interval, self.wx_updater)
def button_press(self, x, y, button):
self.update(self.fetch_weather())
def wx_updater(self):
self.log.info('adding WX widget timer')
def worker():
data = self.fetch_weather()
gobject.idle_add(self.update, data)
threading.Thread(target=worker).start()
return True
def update(self, data):
if data:
self.text = self.format.format(**data)
else:
self.text = 'N/A'
self.bar.draw()
return False
def fetch_woeid(self, location):
url = QUERY_URL + urllib.urlencode({
'q': 'select woeid from geo.places where text="%s"' % location,
'format': 'json'
})
try:
response = urllib2.urlopen(url)
data = json.loads(response.read())
if data['query']['count'] > 1:
return data['query']['results']['place'][0]['woeid']
return data['query']['results']['place']['woeid']
except Exception:
## HTTPError? JSON Error? KeyError? Doesn't matter, return None
return None
def fetch_weather(self):
if not self.woeid:
if self.location:
self.woeid = self.fetch_woeid(self.location)
if not self.woeid:
return None
format = 'c' if self.metric else 'f'
url = WEATHER_URL + urllib.urlencode({'w': self.woeid, 'u': format})
try:
response = urllib2.urlopen(url).read()
dom = minidom.parseString(response)
except Exception:
## Invalid response or couldn't parse XML.
return None
structure = (
('location', ('city', 'region', 'country')),
('units', ('temperature', 'distance', 'pressure', 'speed')),
('wind', ('chill', 'direction', 'speed')),
('atmosphere', ('humidity', 'visibility', 'pressure', 'rising')),
('astronomy', ('sunrise', 'sunset')),
('condition', ('text', 'code', 'temp', 'date'))
)
data = {}
for tag, attrs in structure:
element = dom.getElementsByTagNameNS(WEATHER_NS, tag)[0]
for attr in attrs:
data['%s_%s' % (tag, attr)] = element.getAttribute(attr)
if data['atmosphere_rising'] == '0':
data['atmosphere_rising'] = self.steady
elif data['atmosphere_rising'] == '1':
data['atmosphere_rising'] = self.up
elif data['atmosphere_rising'] == '2':
data['atmosphere_rising'] = self.down
return data
|
daemon_thread.py
|
import threading
import time
def standard_thread():
print("Starting my Standard Thread")
time.sleep(20)
print("Ending my standard thread")
def daemon_thread():
while True:
print("Sending Out Heartbeat Signal")
time.sleep(2)
if __name__ == '__main__':
standardThread = threading.Thread(target=standard_thread)
daemonThread = threading.Thread(target=daemon_thread)
daemonThread.setDaemon(True)
daemonThread.start()
standardThread.start()
|
app.py
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import signal
import subprocess
import threading
import traceback
from typing import Dict
from flask import Flask, jsonify, make_response, request
import redis
from .RLPopThread import RLPopThread
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)-15s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger('app')
class Handler:
app = Flask(__name__)
listeners: Dict[str, RLPopThread] = {}
r = redis.StrictRedis(
host=os.getenv('REDIS_HOST', 'localhost'),
port=int(os.getenv('REDIS_PORT', '6379')),
password=os.getenv('REDIS_PASSWORD', None),
db=os.getenv('REDIS_DB', None),
)
command_methods = {
'del': 'delete',
'rpop': 'pop_generic',
'lpop': 'pop_generic',
'brpop': 'pop_generic',
'blpop': 'pop_generic',
'rpush': 'push_generic',
'lpush': 'push_generic'
}
def execute(self, command):
req = request.get_json()
method = self.command_methods.get(command, command)
return getattr(self, method)(command, req)
def decode(self, result):
if result is None:
return None
if isinstance(result, list):
return [self.decode(x) for x in result]
if isinstance(result, bytes):
return result.decode('utf-8')
return result
def ok(self, result=None):
result = self.decode(result)
resp = make_response(json.dumps(result))
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
def set(self, command, json_req):
self.r.set(json_req['key'], json_req['value'])
return self.ok()
def setnx(self, command, json_req):
val = self.r.setnx(json_req['key'], json_req['value'])
return self.ok(result=val > 0)
def mset(self, command, json_req):
self.r.mset(json_req['pairs'])
return self.ok()
def msetnx(self, command, json_req):
val = self.r.msetnx(json_req['pairs'])
return self.ok(result=val > 0)
def get(self, command, json_req):
val = self.r.get(json_req['key'])
return self.ok(result=val)
def mget(self, command, json_req):
val = self.r.mget(json_req['keys'])
return self.ok(result=val)
def incr(self, command, json_req):
by = json_req.get('by', 1)
val = self.r.incr(json_req['key'], by)
return self.ok(result=val)
def decr(self, command, json_req):
by = json_req.get('by', 1)
val = self.r.decr(json_req['key'], by)
return self.ok(result=val)
def append(self, command, json_req):
val = self.r.append(json_req['key'], json_req['value'])
return self.ok(result=val)
def getset(self, command, json_req):
val = self.r.getset(json_req['key'], json_req['value'])
return self.ok(result=val)
def push_generic(self, command, json_req):
"""
Handles LPUSH, RPUSH.
"""
c = getattr(self.r, command)
c(json_req['key'], json_req['value'])
return self.ok()
def pop_generic(self, command, json_req):
"""
Handles LPOP, RPOP, BLPOP, BRPOP.
"""
c = getattr(self.r, command)
val = c(json_req['key'])
if val:
if isinstance(val, tuple): # True if blocking pop.
return self.ok(val[1])
else:
return self.ok(val)
else:
return self.ok()
def delete(self, command, json_req):
"""
Pretty command - actual command is del.
"""
self.r.delete(json_req['key'])
return self.ok()
def expire(self, command, json_req):
self.r.expire(json_req['key'], json_req['seconds'])
return self.ok()
def listener(self, action):
req = request.get_json()
sub_id = req['id']
if action == 'remove':
old_thread = self.listeners.get(sub_id)
if old_thread is not None:
old_thread.shutdown = True
return 'ok\n'
return 'already_inactive\n'
assert action == 'add'
# We only support r/lpop for now.
assert req['event'] == 'rpop' or req['event'] == 'lpop'
key = req['data']['key']
old_thread = self.listeners.get(sub_id)
if old_thread is not None:
if old_thread.is_alive():
return 'already_active\n'
t = RLPopThread(sub_id, req['event'], self.r, key, req['endpoint'])
t.start()
self.listeners[sub_id] = t
return 'ok\n'
def health(self):
return 'OK'
class RedisOnDemand:
def __init__(self, redis_proc: subprocess.Popen):
self.redis_proc = redis_proc
def wait(self):
self.redis_proc.wait()
logger.error('Redis has exited!')
# Print stdout for debug.
while True:
line = self.redis_proc.stdout.readline()
if line != b'':
logger.info(line.rstrip())
else:
break
# Exit as soon as the redis server crashes.
# Note: sys.exit() will not work here.
os.kill(os.getpid(), signal.SIGINT)
def app_error(e):
logger.warn(traceback.format_exc())
return jsonify({'message': repr(e)}), 400
if __name__ == '__main__':
# Do we have creds to connect to? If not, let's spawn a redis server
# at this point. Why do we spawn it here rather than outside? Because if
# redis dies, then we can exit this script so that the entire service dies.
if os.getenv('REDIS_HOST') is None:
logger.warning('Starting self hosted redis server...')
p = subprocess.Popen('/usr/bin/redis-server /app/redis.conf',
stdout=subprocess.PIPE, shell=True)
server = RedisOnDemand(p)
t = threading.Thread(target=server.wait, daemon=True)
t.start()
handler = Handler()
handler.app.add_url_rule('/listener/<string:action>',
# action=add/remove.
'listener', handler.listener, methods=['post'])
handler.app.add_url_rule('/<string:command>', 'execute', handler.execute,
methods=['post'])
handler.app.add_url_rule('/health', 'health', handler.health,
methods=['get'])
handler.app.register_error_handler(Exception, app_error)
handler.app.run(host='0.0.0.0', port=8000)
|
Binance Detect Moonings.py
|
"""
Olorin Sledge Fork
Version: 1.18
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
See requirements.txt for versions of modules needed
Notes:
- Requires Python version 3.9.x to run
Functionality:
- Changed way profit % is calculated to be based on ROI
- More details provided on screen on state of bot (i.e. unrealised session profit, session profit, all time profit, bot paused or not etc)
- Totally reworked external signals. NOTE: you CANNOT use the default signals anymore with my bot unless you modify them to work with it
- Sell all coins on stopping bot functionality
- Stop bot on session profit / session stop loss trigger
- Discord support
- Better reporting in trades.txt
- A history.txt that records state of bot every minute (useful for past analysis /charting)
- Better error trapping on certain exceptions
- BNB is no longer used as the reference for TIME_DIFFERENCE, this allows one to not have it in their tickers.txt list.
- Tickers list can now auto reload (if set in the config.yml file)
- Held coins displayed in a Table format
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# used to display holding coins in an ascii table
from prettytable import PrettyTable
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_discord_creds
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit_incfees_perc, session_profit_incfees_total, session_tpsl_override_msg, is_bot_running
session_profit_incfees_perc = 0
session_profit_incfees_total = 0
session_tpsl_override_msg = ""
is_bot_running = True
global historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins, trade_losses
global sell_all_coins, bot_started_datetime
try:
historic_profit_incfees_perc
except NameError:
historic_profit_incfees_perc = 0 # or some other default value.
try:
historic_profit_incfees_total
except NameError:
historic_profit_incfees_total = 0 # or some other default value.
try:
trade_wins
except NameError:
trade_wins = 0 # or some other default value.
try:
trade_losses
except NameError:
trade_losses = 0 # or some other default value.
bot_started_datetime = ""
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def is_fiat():
# check if we are using a fiat as a base currency
global hsp_head
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
#list below is in the order that Binance displays them, apologies for not using ASC order
fiats = ['USDT', 'BUSD', 'AUD', 'BRL', 'EUR', 'GBP', 'RUB', 'TRY', 'TUSD', 'USDC', 'PAX', 'BIDR', 'DAI', 'IDRT', 'UAH', 'NGN', 'VAI', 'BVND']
if PAIR_WITH in fiats:
return True
else:
return False
def decimals():
# set number of decimals for reporting fractions
if is_fiat():
return 4
else:
return 8
def print_table(table):
global old_out
print('')
sys.stdout = old_out
print(table)
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
# get first element from the dictionary
firstcoin = next(iter(historical_prices[hsp_head]))
#BBif historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
if historical_prices[hsp_head][firstcoin]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
#BBtime.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head][firstcoin]['time'])).total_seconds())
# retrieve latest prices
#last_price = get_price()
last_price = wrap_get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
try:
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
#if coin == "BTCUSDT" or coin == "ETHUSDT":
#print(f"coin: {coin} min_price: {min_price[coin]['price']} max_price: {max_price[coin]['price']}")
except KeyError:
if DEBUG:
print(f"wait_for_price(): Got a KeyError for {coin}. If this coin was just added to your tickers file, no need to worry about this KeyError.")
pass
# FOR NEGATIVE PRICE CHECKING
#if threshold_check>0 and CHANGE_IN_PRICE<0: threshold_check=0
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than TRADE_SLOTS is not reached.
# FOR NEGATIVE PRICE CHECKING
#if abs(threshold_check) > abs(CHANGE_IN_PRICE):
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# volatility_cooloff[coin] = datetime.now() - timedelta(minutes=COOLOFF_PERIOD)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
#if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < TRADE_SLOTS or TRADE_SLOTS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, purchasing ${TRADE_TOTAL} {PAIR_WITH} of {coin}!')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
#if len(coins_bought) == TRADE_SLOTS:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but failed cool off period of {COOLOFF_PERIOD} minutes! Curr COP is {volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD)}{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = buy_external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + len(volatile_coins)) < TRADE_SLOTS:
#(len(coins_bought) + exnumber + len(volatile_coins)) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f"External signal received on {excoin}, purchasing ${TRADE_TOTAL} {PAIR_WITH} value of {excoin}!")
balance_report(last_price)
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def buy_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.buy")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def sell_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.sell")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
if DEBUG: print(f'{symbol} added to sell_external_signals() list')
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external SELL signalling file{txcolors.DEFAULT}')
return external_list
def balance_report(last_price):
global trade_wins, trade_losses, session_profit_incfees_perc, session_profit_incfees_total
unrealised_session_profit_incfees_perc = 0
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
exposure_calcuated = 0
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
exposure_calcuated = exposure_calcuated + round(float(coins_bought[coin]['bought_at']) * float(coins_bought[coin]['volume']),0)
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
# unrealised_session_profit_incfees_perc = float(unrealised_session_profit_incfees_perc + PriceChangeIncFees_Perc)
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
unrealised_session_profit_incfees_perc = (unrealised_session_profit_incfees_total / BUDGET) * 100
DECIMALS = int(decimals())
# CURRENT_EXPOSURE = round((TRADE_TOTAL * len(coins_bought)), DECIMALS)
CURRENT_EXPOSURE = round(exposure_calcuated, 0)
INVESTMENT_TOTAL = round((TRADE_TOTAL * TRADE_SLOTS), DECIMALS)
# truncating some of the above values to the correct decimal places before printing
WIN_LOSS_PERCENT = 0
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
print(f'')
print(f'--------')
print(f"STARTED : {str(bot_started_datetime).split('.')[0]} | Running for: {str(datetime.now() - bot_started_datetime).split('.')[0]}")
print(f'CURRENT HOLDS : {len(coins_bought)}/{TRADE_SLOTS} ({float(CURRENT_EXPOSURE):g}/{float(INVESTMENT_TOTAL):g} {PAIR_WITH})')
print(f'Buying Paused : {bot_paused}')
print(f'')
print(f'SESSION PROFIT (Inc Fees)')
print(f'Realised : {txcolors.SELL_PROFIT if session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Unrealised : {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{unrealised_session_profit_incfees_perc:.4f}% Est:${unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f' Total : {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc + unrealised_session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total+unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'')
print(f'ALL TIME DATA :')
print(f'Profit : {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est:${historic_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Completed Trades: {trade_wins+trade_losses} (Wins:{trade_wins} Losses:{trade_losses})')
print(f'Win Ratio : {float(WIN_LOSS_PERCENT):g}%')
print(f'--------')
print(f'')
#msg1 = str(bot_started_datetime) + " | " + str(datetime.now() - bot_started_datetime)
msg1 = str(datetime.now()).split('.')[0]
msg2 = " | " + str(len(coins_bought)) + "/" + str(TRADE_SLOTS) + " | PBOT: " + str(bot_paused)
msg2 = msg2 + ' SPR%: ' + str(round(session_profit_incfees_perc,2)) + ' SPR$: ' + str(round(session_profit_incfees_total,4))
msg2 = msg2 + ' SPU%: ' + str(round(unrealised_session_profit_incfees_perc,2)) + ' SPU$: ' + str(round(unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' SPT%: ' + str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,2)) + ' SPT$: ' + str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' ATP%: ' + str(round(historic_profit_incfees_perc,2)) + ' ATP$: ' + str(round(historic_profit_incfees_total,4))
msg2 = msg2 + ' CTT: ' + str(trade_wins+trade_losses) + ' CTW: ' + str(trade_wins) + ' CTL: ' + str(trade_losses) + ' CTWR%: ' + str(round(WIN_LOSS_PERCENT,2))
msg_discord_balance(msg1, msg2)
history_log(session_profit_incfees_perc, session_profit_incfees_total, unrealised_session_profit_incfees_perc, unrealised_session_profit_incfees_total, session_profit_incfees_perc + unrealised_session_profit_incfees_perc, session_profit_incfees_total+unrealised_session_profit_incfees_total, historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins+trade_losses, trade_wins, trade_losses, WIN_LOSS_PERCENT)
return msg1 + msg2
def history_log(sess_profit_perc, sess_profit, sess_profit_perc_unreal, sess_profit_unreal, sess_profit_perc_total, sess_profit_total, alltime_profit_perc, alltime_profit, total_trades, won_trades, lost_trades, winloss_ratio):
global last_history_log_date
time_between_insertion = datetime.now() - last_history_log_date
# only log balance to log file once every 60 seconds
if time_between_insertion.seconds > 60:
last_history_log_date = datetime.now()
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(HISTORY_LOG_FILE):
with open(HISTORY_LOG_FILE,'a+') as f:
f.write('Datetime\tCoins Holding\tTrade Slots\tPausebot Active\tSession Profit %\tSession Profit $\tSession Profit Unrealised %\tSession Profit Unrealised $\tSession Profit Total %\tSession Profit Total $\tAll Time Profit %\tAll Time Profit $\tTotal Trades\tWon Trades\tLost Trades\tWin Loss Ratio\n')
with open(HISTORY_LOG_FILE,'a+') as f:
f.write(f'{timestamp}\t{len(coins_bought)}\t{TRADE_SLOTS}\t{str(bot_paused)}\t{str(round(sess_profit_perc,2))}\t{str(round(sess_profit,4))}\t{str(round(sess_profit_perc_unreal,2))}\t{str(round(sess_profit_unreal,4))}\t{str(round(sess_profit_perc_total,2))}\t{str(round(sess_profit_total,4))}\t{str(round(alltime_profit_perc,2))}\t{str(round(alltime_profit,4))}\t{str(total_trades)}\t{str(won_trades)}\t{str(lost_trades)}\t{str(winloss_ratio)}\n')
def msg_discord_balance(msg1, msg2):
global last_msg_discord_balance_date, discord_msg_balance_data
time_between_insertion = datetime.now() - last_msg_discord_balance_date
# only put the balance message to discord once every 60 seconds and if the balance information has changed since last times
if time_between_insertion.seconds > 60:
if msg2 != discord_msg_balance_data:
msg_discord(msg1 + msg2)
discord_msg_balance_data = msg2
else:
# ping msg to know the bot is still running
msg_discord(".")
def msg_discord(msg):
message = msg + '\n\n'
if MSG_DISCORD:
#Webhook of my channel. Click on edit channel --> Webhooks --> Creates webhook
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
#BB
# print(response.content)
def pause_bot():
'''Pause the script when external indicators detect a bearish trend in the market'''
global bot_paused, session_profit_incfees_perc, hsp_head, session_profit_incfees_total
# start counting for how long the bot has been paused
start_time = time.perf_counter()
while os.path.exists("signals/pausebot.pause"):
# do NOT accept any external signals to buy while in pausebot mode
remove_external_signals('buy')
if bot_paused == False:
print(f'{txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Buying paused due to negative market conditions, stop loss and take profit will continue to work.'
msg_discord(msg)
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
last_price = get_price(True)
# pausing here
if hsp_head == 1:
# print(f'Paused...Session profit: {session_profit_incfees_perc:.2f}% Est: ${session_profit_incfees_total:.{decimals()}f} {PAIR_WITH}')
balance_report(last_price)
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Resuming buying due to positive market conditions, total sleep time: ' + str(time_elapsed)
msg_discord(msg)
bot_paused = False
return
def convert_volume():
'''Converts the volume given in TRADE_TOTAL from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from TRADE_TOTAL in PAIR_WITH (default)
volume[coin] = float(TRADE_TOTAL / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
# original code: volume[coin] = float('{:.1f}'.format(volume[coin]))
volume[coin] = int(volume[coin])
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
#volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
volume[coin] = truncate(volume[coin], lot_size[coin])
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} of {coin} @ ${last_price[coin]['price']}{txcolors.DEFAULT}")
msg1 = str(datetime.now()) + ' | BUY: ' + coin + '. V:' + str(volume[coin]) + ' P$:' + str(last_price[coin]['price'])
msg_discord(msg1)
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
#if LOG_TRADES:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
write_signallsell(coin.removesuffix(PAIR_WITH))
continue
# try to create a real order if the test orders did not raise an exception
try:
order_details = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(f'buy() exception: {e}')
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
if not TEST_MODE:
orders[coin] = extract_order_data(order_details)
write_log(f"\tBuy\t{coin}\t{orders[coin]['volume']}\t{orders[coin]['avgPrice']}\t{PAIR_WITH}")
else:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
write_signallsell(coin)
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins(tpsl_override = False):
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit_incfees_perc, session_profit_incfees_total, coin_order_id, trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total, sell_all_coins
externals = sell_external_signals()
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
BUDGET = TRADE_TOTAL * TRADE_SLOTS
# table stuff
my_table = PrettyTable()
my_table.field_names = ["Symbol", "Volume", "Bought At", "Now At", "TP %", "SL %", "Change %", "Profit $", "Time Held"]
my_table.align["Symbol"] = "l"
my_table.align["Volume"] = "r"
my_table.align["Bought At"] = "r"
my_table.align["Now At"] = "r"
my_table.align["TP %"] = "r"
my_table.align["SL %"] = "r"
my_table.align["Change %"] = "r"
my_table.align["Profit $"] = "r"
my_table.align["Time Held"] = "l"
for coin in list(coins_bought):
time_held = timedelta(seconds=datetime.now().timestamp()-coins_bought[coin]['timestamp'])
#if HODLMODE_ENABLED and (time_held >= HODLMODE_TIME_THRESHOLD):
# move_coin_to_hodl(coin)
# continue
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
sellFeeTotal = (coins_bought[coin]['volume'] * LastPrice) * (TRADING_FEE/100)
LastPriceLessFees = LastPrice - sellFee
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
buyFeeTotal = (coins_bought[coin]['volume'] * BuyPrice) * (TRADING_FEE/100)
BuyPricePlusFees = BuyPrice + buyFee
ProfitAfterFees = LastPriceLessFees - BuyPricePlusFees
PriceChange_Perc = float((LastPrice - BuyPrice) / BuyPrice * 100)
#PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
PriceChangeIncFees_Perc = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Unit = float((LastPrice-sellFee) - (BuyPrice+buyFee))
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['take_profit']) / 100))
SL = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['stop_loss']) / 100))
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
#if LastPrice > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override:
if LastPriceLessFees > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
#if PriceChange_Perc >= 0.8:
if PriceChangeIncFees_Perc >= 0.8:
# price has changed by 0.8% or greater, a big change. Make the STOP LOSS trail closely to the TAKE PROFIT
# so you don't lose this increase in price if it falls back
#coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['take_profit'] = PriceChangeIncFees_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
else:
# price has changed by less than 0.8%, a small change. Make the STOP LOSS trail loosely to the TAKE PROFIT
# so you don't get stopped out of the trade prematurely
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
#coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['take_profit'] = PriceChangeIncFees_Perc + TRAILING_TAKE_PROFIT
# we've got a negative stop loss - not good, we don't want this.
if coins_bought[coin]['stop_loss'] <= 0:
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] * .25
#if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.{decimals()}f} and SL {coins_bought[coin]['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
my_table.add_row([f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coin + ' TP up!'}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['volume']:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{BuyPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{LastPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['take_profit']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['stop_loss']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
sellCoin = False
sell_reason = ""
if SELL_ON_SIGNAL_ONLY:
# only sell if told to by external signal
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
else:
#if LastPrice < SL:
if LastPriceLessFees < SL:
sellCoin = True
if USE_TRAILING_STOP_LOSS:
#if PriceChange_Perc >= 0:PriceChangeIncFees_Perc
if PriceChangeIncFees_Perc >= 0:
sell_reason = "TTP " + str(SL) + " reached"
else:
sell_reason = "TSL " + str(SL) + " reached"
else:
sell_reason = "SL " + str(SL) + " reached"
sell_reason = sell_reason
#if LastPrice > TP:
if LastPriceLessFees > TP:
sellCoin = True
sell_reason = "TP " + str(TP) + " reached"
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
if sell_all_coins:
sellCoin = True
sell_reason = 'Sell All Coins'
if tpsl_override:
sellCoin = True
sell_reason = 'Session TPSL Override reached'
if sellCoin:
print(f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}Sell: {coins_bought[coin]['volume']} of {coin} | {sell_reason} | ${float(LastPrice):g} - ${float(BuyPrice):g} | Profit: {PriceChangeIncFees_Perc:.2f}% Est: {((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH} (Inc Fees){txcolors.DEFAULT}")
msg1 = str(datetime.now()) + '| SELL: ' + coin + '. R:' + sell_reason + ' P%:' + str(round(PriceChangeIncFees_Perc,2)) + ' P$:' + str(round(((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100,4))
msg_discord(msg1)
# try to create a real order
try:
if not TEST_MODE:
#lot_size = coins_bought[coin]['step_size']
#if lot_size == 0:
# lot_size = 1
#lot_size = lot_size.index('1') - 1
#if lot_size < 0:
# lot_size = 0
order_details = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
#if repr(e).upper() == "APIERROR(CODE=-1111): PRECISION IS OVER THE MAXIMUM DEFINED FOR THIS ASSET.":
print(f"sell_coins() Exception occured on selling the coin! Coin: {coin}\nSell Volume coins_bought: {coins_bought[coin]['volume']}\nPrice:{LastPrice}\nException: {e}")
# run the else block if coin has been sold and create a dict for each coin sold
else:
if not TEST_MODE:
coins_sold[coin] = extract_order_data(order_details)
LastPrice = coins_sold[coin]['avgPrice']
sellFee = coins_sold[coin]['tradeFeeUnit']
coins_sold[coin]['orderid'] = coins_bought[coin]['orderid']
priceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# update this from the actual Binance sale information
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Unit = float((LastPrice-sellFee) - (BuyPrice+buyFee))
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
if DEBUG:
print(f"sell_coins() | Coin: {coin} | Sell Volume: {coins_bought[coin]['volume']} | Price:{LastPrice}")
# Log trade
#BB profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume']) * (1-(buyFee + sellFeeTotal))
profit_incfees_total = coins_sold[coin]['volume'] * PriceChangeIncFees_Unit
#write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit_incfees_total:.{decimals()}f} {PAIR_WITH} ({PriceChange_Perc:.2f}%)")
#write_log(f"\tSell\t{coin}\t{coins_sold[coin]['volume']}\t{BuyPrice}\t{PAIR_WITH}\t{LastPrice}\t{profit_incfees_total:.{decimals()}f}\t{PriceChange_Perc:.2f}\t{sell_reason}")
write_log(f"\tSell\t{coin}\t{coins_sold[coin]['volume']}\t{BuyPrice}\t{PAIR_WITH}\t{LastPrice}\t{profit_incfees_total:.{decimals()}f}\t{PriceChangeIncFees_Perc:.2f}\t{sell_reason}")
#this is good
session_profit_incfees_total = session_profit_incfees_total + profit_incfees_total
session_profit_incfees_perc = session_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
historic_profit_incfees_total = historic_profit_incfees_total + profit_incfees_total
historic_profit_incfees_perc = historic_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
#TRADE_TOTAL*PriceChangeIncFees_Perc)/100
#if (LastPrice+sellFee) >= (BuyPrice+buyFee):
if (LastPrice-sellFee) >= (BuyPrice+buyFee):
trade_wins += 1
else:
trade_losses += 1
update_bot_stats()
if not sell_all_coins:
# within sell_all_coins, it will print display to screen
balance_report(last_price)
# sometimes get "rate limited" errors from Binance if we try to sell too many coins at once
# so wait 1 second in between sells
time.sleep(1)
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
#print(f"Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
my_table.add_row([f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coin}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['volume']:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{BuyPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{LastPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['take_profit']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['stop_loss']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
my_table.sortby = 'Change %'
#my_table.reversesort = True
if len(coins_bought) == 0:
if hsp_head == 1:
print(f"No trade slots are currently in use")
else:
if len(my_table._rows) > 0: print_table(my_table)
# if tpsl_override: is_bot_running = False
return coins_sold
def extract_order_data(order_details):
global TRADING_FEE, STOP_LOSS, TAKE_PROFIT
transactionInfo = {}
# This code is from GoranJovic - thank you!
#
# adding order fill extractions here
#
# just to explain what I am doing here:
# Market orders are not always filled at one price, we need to find the averages of all 'parts' (fills) of this order.
#
# reset other variables to 0 before use
FILLS_TOTAL = 0
FILLS_QTY = 0
FILLS_FEE = 0
BNB_WARNING = 0
# loop through each 'fill':
for fills in order_details['fills']:
FILL_PRICE = float(fills['price'])
FILL_QTY = float(fills['qty'])
FILLS_FEE += float(fills['commission'])
# check if the fee was in BNB. If not, log a nice warning:
if (fills['commissionAsset'] != 'BNB') and (TRADING_FEE == 0.075) and (BNB_WARNING == 0):
print(f"WARNING: BNB not used for trading fee, please enable it in Binance!")
BNB_WARNING += 1
# quantity of fills * price
FILLS_TOTAL += (FILL_PRICE * FILL_QTY)
# add to running total of fills quantity
FILLS_QTY += FILL_QTY
# increase fills array index by 1
# calculate average fill price:
FILL_AVG = (FILLS_TOTAL / FILLS_QTY)
#tradeFeeApprox = (float(FILLS_QTY) * float(FILL_AVG)) * (TRADING_FEE/100)
# Olorin Sledge: I only want fee at the unit level, not the total level
tradeFeeApprox = float(FILL_AVG) * (TRADING_FEE/100)
# the volume size is sometimes outside of precision, correct it
try:
info = client.get_symbol_info(order_details['symbol'])
step_size = info['filters'][2]['stepSize']
lot_size = step_size.index('1') - 1
if lot_size <= 0:
FILLS_QTY = int(FILLS_QTY)
else:
FILLS_QTY = truncate(FILLS_QTY, lot_size)
except Exception as e:
print(f"extract_order_data(): Exception getting coin {order_details['symbol']} step size! Exception: {e}")
# create object with received data from Binance
transactionInfo = {
'symbol': order_details['symbol'],
'orderId': order_details['orderId'],
'timestamp': order_details['transactTime'],
'avgPrice': float(FILL_AVG),
'volume': float(FILLS_QTY),
'tradeFeeBNB': float(FILLS_FEE),
'tradeFeeUnit': tradeFeeApprox,
}
return transactionInfo
def check_total_session_profit(coins_bought, last_price):
global is_bot_running, session_tpsl_override_msg
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
allsession_profits_perc = session_profit_incfees_perc + ((unrealised_session_profit_incfees_total / BUDGET) * 100)
if DEBUG: print(f'Session Override SL Feature: ASPP={allsession_profits_perc} STP {SESSION_TAKE_PROFIT} SSL {SESSION_STOP_LOSS}')
if allsession_profits_perc >= float(SESSION_TAKE_PROFIT):
session_tpsl_override_msg = "Session TP Override target of " + str(SESSION_TAKE_PROFIT) + "% met. Sell all coins now!"
is_bot_running = False
if allsession_profits_perc <= float(SESSION_STOP_LOSS):
session_tpsl_override_msg = "Session SL Override target of " + str(SESSION_STOP_LOSS) + "% met. Sell all coins now!"
is_bot_running = False
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
# print(orders)
for coin in orders:
try:
coin_step_size = float(next(
filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(orders[coin][0]['symbol'])['filters'])
)['stepSize'])
except Exception as ExStepSize:
coin_step_size = .1
if not TEST_MODE:
coins_bought[coin] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['orderId'],
'timestamp': orders[coin]['timestamp'],
'bought_at': orders[coin]['avgPrice'],
'volume': orders[coin]['volume'],
'volume_debug': volume[coin],
'buyFeeBNB': orders[coin]['tradeFeeBNB'],
'buyFee': orders[coin]['tradeFeeUnit'] * orders[coin]['volume'],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin]["symbol"]} with ID {orders[coin]["orderId"]} placed and saved to file.')
else:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin][0]["symbol"]} with ID {orders[coin][0]["orderId"]} placed and saved to file.')
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def update_bot_stats():
global trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total
bot_stats = {
'total_capital' : str(TRADE_SLOTS * TRADE_TOTAL),
'botstart_datetime' : str(bot_started_datetime),
'historicProfitIncFees_Percent': historic_profit_incfees_perc,
'historicProfitIncFees_Total': historic_profit_incfees_total,
'tradeWins': trade_wins,
'tradeLosses': trade_losses,
}
#save session info for through session portability
with open(bot_stats_file_path, 'w') as file:
json.dump(bot_stats, file, indent=4)
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
# code below created by getsec <3
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if os.path.exists('signalsell_tickers.txt'):
os.remove('signalsell_tickers.txt')
for coin in coins_bought:
write_signallsell(coin.removesuffix(PAIR_WITH))
def write_log(logline):
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(LOG_FILE):
with open(LOG_FILE,'a+') as f:
f.write('Datetime\tType\tCoin\tVolume\tBuy Price\tCurrency\tSell Price\tProfit $\tProfit %\tSell Reason\n')
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
def write_signallsell(symbol):
with open('signalsell_tickers.txt','a+') as f:
f.write(f'{symbol}\n')
def remove_external_signals(fileext):
signals = glob.glob('signals/*.{fileext}')
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
def sell_all(msgreason, session_tspl_ovr = False):
global sell_all_coins
msg_discord(f'{str(datetime.now())} | SELL ALL COINS: {msgreason}')
# stop external signals so no buying/selling/pausing etc can occur
stop_signal_threads()
# sell all coins NOW!
sell_all_coins = True
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
# display final info to screen
#last_price = get_price()
last_price = wrap_get_price()
discordmsg = balance_report(last_price)
msg_discord(discordmsg)
def stop_signal_threads():
try:
for signalthread in signalthreads:
print(f'Terminating thread {str(signalthread.name)}')
signalthread.terminate()
except:
pass
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
Better than rounding
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
def wrap_get_price():
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
global tickers
if CUSTOM_LIST:
if CUSTOM_LIST_AUTORELOAD:
while True:
if not os.path.exists(TICKERS_LIST):
print(f"Autoreload tickers cannot find {TICKERS_LIST} file. Will retry in 1 second.")
time.sleep(1)
else:
break
prevcoincount = len(tickers)
tickers=[line.strip() for line in open(TICKERS_LIST)]
if DEBUG:
print(f"Reloaded tickers from {TICKERS_LIST} file. Prev coin count: {prevcoincount} | New coin count: {len(tickers)}")
return get_price()
if __name__ == '__main__':
req_version = (3,9)
if sys.version_info[:2] < req_version:
print(f'This bot requires Python version 3.9 or higher/newer. You are running version {sys.version_info[:2]} - please upgrade your Python version!!')
sys.exit()
# Load arguments then parse settings
args = parse_args()
mymodule = {}
discord_msg_balance_data = ""
last_msg_discord_balance_date = datetime.now()
last_history_log_date = datetime.now()
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
# LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
HISTORY_LOG_FILE = "history.txt"
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
TRADE_TOTAL = parsed_config['trading_options']['TRADE_TOTAL']
TRADE_SLOTS = parsed_config['trading_options']['TRADE_SLOTS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
#COOLOFF_PERIOD = parsed_config['trading_options']['COOLOFF_PERIOD']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
CUSTOM_LIST_AUTORELOAD = parsed_config['trading_options']['CUSTOM_LIST_AUTORELOAD']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
# Code modified from DJCommie fork
# Load Session OVERRIDE values - used to STOP the bot when current session meets a certain STP or SSL value
SESSION_TPSL_OVERRIDE = parsed_config['trading_options']['SESSION_TPSL_OVERRIDE']
SESSION_TAKE_PROFIT = parsed_config['trading_options']['SESSION_TAKE_PROFIT']
SESSION_STOP_LOSS = parsed_config['trading_options']['SESSION_STOP_LOSS']
# Borrowed from DJCommie fork
# If TRUE, coin will only sell based on an external SELL signal
SELL_ON_SIGNAL_ONLY = parsed_config['trading_options']['SELL_ON_SIGNAL_ONLY']
# Discord integration
# Used to push alerts, messages etc to a discord channel
MSG_DISCORD = parsed_config['trading_options']['MSG_DISCORD']
# Trashcan settings
#HODLMODE_ENABLED = parsed_config['trading_options']['HODLMODE_ENABLED']
#HODLMODE_TIME_THRESHOLD = parsed_config['trading_options']['HODLMODE_TIME_THRESHOLD']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'Loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
if MSG_DISCORD:
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
sell_all_coins = False
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
# path to the saved coins_bought file
coins_bought_file_path = file_prefix + 'coins_bought.json'
# The below mod was stolen and altered from GoGo's fork, a nice addition for keeping a historical history of profit across multiple bot sessions.
# path to the saved bot_stats file
bot_stats_file_path = file_prefix + 'bot_stats.json'
# use separate files for testing and live trading
LOG_FILE = file_prefix + LOG_FILE
HISTORY_LOG_FILE = file_prefix + HISTORY_LOG_FILE
bot_started_datetime = datetime.now()
total_capital_config = TRADE_SLOTS * TRADE_TOTAL
if os.path.isfile(bot_stats_file_path) and os.stat(bot_stats_file_path).st_size!= 0:
with open(bot_stats_file_path) as file:
bot_stats = json.load(file)
# load bot stats:
try:
bot_started_datetime = datetime.strptime(bot_stats['botstart_datetime'], '%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
print (f'Exception on reading botstart_datetime from {bot_stats_file_path}. Exception: {e}')
bot_started_datetime = datetime.now()
try:
total_capital = bot_stats['total_capital']
except Exception as e:
print (f'Exception on reading total_capital from {bot_stats_file_path}. Exception: {e}')
total_capital = TRADE_SLOTS * TRADE_TOTAL
historic_profit_incfees_perc = bot_stats['historicProfitIncFees_Percent']
historic_profit_incfees_total = bot_stats['historicProfitIncFees_Total']
trade_wins = bot_stats['tradeWins']
trade_losses = bot_stats['tradeLosses']
if total_capital != total_capital_config:
historic_profit_incfees_perc = (historic_profit_incfees_total / total_capital_config) * 100
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-C to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: Test mode is disabled in the configuration, you are using _LIVE_ funds.')
print('WARNING: Waiting 10 seconds before live trading as a security measure!')
time.sleep(10)
remove_external_signals('buy')
remove_external_signals('sell')
remove_external_signals('pause')
# load signalling modules
signalthreads = []
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
# t = threading.Thread(target=mymodule[module].do_work, args=())
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
# add process to a list. This is so the thread can be terminated at a later time
signalthreads.append(t)
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
if str(e) == "object of type 'NoneType' has no len()":
print(f'No external signal modules running')
else:
print(f'Loading external signals exception: {e}')
# seed initial prices
#get_price()
wrap_get_price()
TIMEOUT_COUNT=0
READ_CONNECTERR_COUNT=0
BINANCE_API_EXCEPTION=0
while is_bot_running:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
if SESSION_TPSL_OVERRIDE:
check_total_session_profit(coins_bought, last_price)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
update_bot_stats()
except ReadTimeout as rt:
TIMEOUT_COUNT += 1
print(f'We got a timeout error from Binance. Re-loop. Connection Timeouts so far: {TIMEOUT_COUNT}')
except ConnectionError as ce:
READ_CONNECTERR_COUNT += 1
print(f'We got a connection error from Binance. Re-loop. Connection Errors so far: {READ_CONNECTERR_COUNT}')
except BinanceAPIException as bapie:
BINANCE_API_EXCEPTION += 1
print(f'We got an API error from Binance. Re-loop. API Errors so far: {BINANCE_API_EXCEPTION}.\nException:\n{bapie}')
except KeyboardInterrupt as ki:
# stop external signal threads
stop_signal_threads()
# ask user if they want to sell all coins
print(f'\n\n\n')
sellall = input(f'{txcolors.WARNING}Program execution ended by user!\n\nDo you want to sell all coins (y/N)?{txcolors.DEFAULT}')
if sellall.upper() == "Y":
# sell all coins
sell_all('Program execution ended by user!')
sys.exit(0)
if not is_bot_running:
if SESSION_TPSL_OVERRIDE:
print(f'')
print(f'')
print(f'{txcolors.WARNING}{session_tpsl_override_msg}{txcolors.DEFAULT}')
sell_all(session_tpsl_override_msg, True)
sys.exit(0)
else:
print(f'')
print(f'')
print(f'Bot terminated for some reason.')
|
ea_players_game.py
|
import argparse
import sys
from multiprocessing import JoinableQueue, Process, Value
import numpy as np
from ai.EAPlayer import EAPlayer
from game.Game import game_process, Game
from gui.Gui import gui_process
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('players', help="number of players [2, 6]", type=int, default=4)
parser.add_argument('human_players', help="number of human players [0, 2]", type=int, default=2)
parser.add_argument('-c', '--config-path', default="./config.json")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if 2 > args.players > 6:
raise Exception("Number of players must be in [2, 6]")
if 0 > args.human_players > 2:
raise Exception("Number of human players must be in [0, 2]")
terminate_flag = Value('i', 0)
data_queue = JoinableQueue()
action_queue = JoinableQueue()
mp_utils = [terminate_flag, data_queue, action_queue]
scores = np.zeros(args.players)
action_space = ["left", "straight", "right"]
game = Game(args.players, 0, config_path=args.config_path)
game.init_round(args.players, 0)
game_state = game.get_game_state()
try:
weights = np.genfromtxt(f"output/EA_multi_pretrain_4/best_0.txt")
except Exception as e:
print(e)
sys.exit(-1)
players = [EAPlayer(str(i), weights) for i in range(args.players)]
game_process = Process(target=game_process, args=(players, args.human_players, args.config_path, mp_utils))
game_process.daemon = True
gui_process = Process(target=gui_process, args=(args.config_path, mp_utils))
gui_process.daemon = True
game_process.start()
gui_process.start()
gui_process.join()
action_queue.put([])
game_process.join()
|
Crawler.py
|
import requests
# import scrapy
import re
import time
import datetime
import pickle
import threading
from threading import Timer
from tornado import ioloop, httpclient
import os.path
from bs4 import BeautifulSoup
# import _thread
# from parser import Parser
"""
This file consists of crawler class which is mainly responsible for crawling
webpages.
"""
PARAMS_DIR = "params.pickle"
ENTITY_LIST_DIR = "entityList.csv"
IDs_DIR = "IDs.pickle"
class Crawler():
name="kijiji_standard"
def __init__(self):
# self.current_URL=input("Please enter the URL to start crawling from : ")
# self.current_URL='http://www.kijiji.ca/b-buy-sell/edmonton/c10l1700203'
# self.restartInterval = 60 # in munutes
self.Parser=Parser(parser_type=1)
self.pagesCrawled=0
self.pagesCrawlMax=100
self.eachPageSize=25
self.linksCrawlMax = self.pagesCrawlMax * self.eachPageSize
self.linksCrawled=[]
self.entityListFileName=ENTITY_LIST_DIR
self.allIds = set()
self.data = {}
self.metaData = {}
self.paramsFileName = PARAMS_DIR
self.IDsFileName = IDs_DIR
self.lastSaveTime = time.time()
self.lastCrawlTime = time.time()
self.runsSoFar = 0
self.crawling = False
self.crawled = 0
self.toExit = False
self.no_linksToFetch = 0
self.page_signatures = []
self.http_client = httpclient.AsyncHTTPClient()
self.loadALL()
# print("self.toCrawlSize: ", self.toCrawlSize)
def runLoop(self):
# print("right here!!!!!!! 4: ", self.toCrawlSize," ", self.crawled)
if self.crawled < self.toCrawlSize :
# print("right here!!!!!!! 4.1")
if not self.crawling:
# self.loadParams()
self.restartRun()
self.isToSave()
self.lastCrawlTime = time.time()
self.crawling = True
self.current_URL = next(self.toCrawl)
self.crawled += 1
self.http_client.fetch(self.current_URL.strip(), self.handle_request, method='GET')
# print("right here!!!!!!! 5")
ioloop.IOLoop.instance().start()
self.no_linksToFetch = 0 # In case timeout occurs and linksRequested != 0
self.pagesCrawled = 0
# print("right here!!!!!!! 6")
threading.Thread(target=self.runLoop).start()
else:
self.runsSoFar += 1
print("self.runsSoFar: ", self.runsSoFar, " . RUN DURATION: ", time.time() - self.runDuration)
self.isToSave()
if self.runsSoFar < self.numberOfRuns : #Need to check whether the number of run sessions exceeded the maximum preset number of runs
self.crawling = False
self.crawled = 0
toSleep = self.lastCrawlTime + self.crawlPeriod - time.time()
self.thread=Timer(toSleep, self.runLoop)
self.thread.start()
# def crawl_parse(self, response):
# print("crawled URL: ", response.effective_url)
# print("self.lastSaveTime: ", self.lastSaveTime)
# print("self.lastCrawlTime: ", self.lastCrawlTime)
# # print("self.runsSoFar: ", self.runsSoFar)
# print("self.crawling : ", self.crawling)
# print("self.crawled : ", self.crawled)
# ioloop.IOLoop.instance().stop()
def createSignatures(self):
if self.page_signatures == []:
self.page_signatures = ["page-%d"%(i) for i in range(2, self.pagesCrawlMax+1)]
self.toCrawlSignatures = [url.split('/')[-2] for url in self.toCrawl]
def restartRun(self):
self.runDuration = time.time()
self.loadALL()
self.pagesCrawled = 0
self.linksCrawled = 0
# print(type(self.toCrawl[0].split('/')))
self.createSignatures()
self.toCrawl = iter(self.toCrawl)
def handle_request(self, response):
self.crawl_parse(response)
if (self.pagesCrawled >= self.pagesCrawlMax and self.no_linksToFetch == 0) :
ioloop.IOLoop.instance().stop()
def isListPage(self, response):
# for part in response.request.url.split('/')[-2:]:
# if part in self.toCrawlSignatures:
# return True
splitted = response.request.url.split('/')
if ( (splitted[-2] in self.toCrawlSignatures) or (splitted[-2] in self.page_signatures) ):
if "page" not in splitted[-2]:
print("to crawl: ",splitted[-2])
return True
return False
def crawl_parse(self, response):
# allOld=False
# new=0
if self.isListPage(response) :
# In case if response's url is the url for the ads list page
if self.pagesCrawled >= self.pagesCrawlMax:
self.printStats()
return
parsed=self.Parser.parse(response, type=1)
parsed_iter = {}
for k,v in parsed.items():
parsed_iter[k]=iter(v)
# for link, date in zip(parsed['links'],parsed['dates']):
for link in parsed['links']:
id=self.extractID_fromLink(link)
if id in self.allIds:
for k in parsed.keys():
next(parsed_iter[k])
continue
else:
self.data[id]={}
for k in parsed.keys():
self.data[id][k]=next(parsed_iter[k])
self.allIds.add(id)
url="http://www.kijiji.ca"+link
self.no_linksToFetch += 1
self.http_client.fetch(url.strip(), self.handle_request, method='GET')
self.pagesCrawled += 1
crawlNext=self.nextPage()
if crawlNext != None and (not self.pagesCrawled >= self.pagesCrawlMax):
self.http_client.fetch(crawlNext, self.handle_request, method='GET')
else:
# In case if response's url is the url for an individual ad's page
parsed=self.Parser.parse(response, type=2)
self.linksCrawled+=1
self.no_linksToFetch -= 1
id = self.extractID_fromLink(response.request.url)
# print(id)
for k,v in parsed.items():
self.data[id][k] = v
# if self.linksCrawled >= self.linksCrawlMax:
# self.printStats()
# return
# for key in parsed.keys():
# print("For key = '%s', the following are extracted: "%(key))
# print(parsed[key])
def nextPage(self):
if self.pagesCrawled >= 1:
next_page_str="page-"+str(self.pagesCrawled+1)
url_splitted=self.current_URL.split('/')
nextPage_link = '/'.join(url_splitted[0:-1] + [next_page_str] + [url_splitted[-1]])
return nextPage_link
else:
return None
def printStats(self):
print("Links crawled so far: ", self.linksCrawled)
# def nextURL(self):
# """
# Decides what the next URL to crawl shall be and returns it.
# """
# pass
# def scrape(self):
# """
# Responsible for extracting information from webpage.
# It depends on
# Returns a list containing the data extracted
# eg, [[item_name1, description], [item_name2, description] ,... ]
# """
# pass
# def inferFromData(self):
# pass
def save(self):
# Saving Data
fileName =(self.saveDataDirectory
+ datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M')
+ ".pickle"
)
with open(fileName, 'wb') as f:
pickle.dump([self.data, self.metaData], f)
self.data = {}
self.metaData = {}
self.lastSaveTime = time.time()
print("Saved at : %s"%(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
self.saveIDs()
def loadALL(self):
self.loadIDs()
self.loadParams()
def loadIDs(self):
if os.path.isfile(self.IDsFileName):
with open(self.IDsFileName, 'rb') as f:
self.allIds = pickle.load(f)
def saveIDs(self):
with open(self.IDsFileName, 'wb') as f:
pickle.dump(self.allIds, f)
def loadParams(self):
# Loading params
try:
with open(self.paramsFileName, 'rb') as f:
allParams=pickle.load(f)
self.toCrawlSize = len(allParams['toCrawl'])
self.toCrawl=allParams['toCrawl']
self.savingPeriod=allParams['savingPeriod']
self.crawlPeriod=allParams['crawlPeriod'] * 3600
self.numberOfRuns=allParams['numberOfRuns']
self.saveDataDirectory=allParams['saveDataDirectory']
except Exception as e:
print("ERROR! ", e)
exit()
def extractID_fromLink(self, link):
splitted = link.split("/")
id_str = splitted[-1].split("?")[0]
if id_str.isdigit():
return int(id_str)
else:
return id_str
def loadEntityNames(self):
f=open(self.entityListFileName, 'r')
entities=f.read()
self.entityList=entities.split(',')
f.close()
def isToSave(self):
# Checks whether it is time to save
if (self.runsSoFar % self.savingPeriod == 0) and (self.data != {}):
self.save()
class Parser():
def __init__(self, parser_type=1):
if parser_type==1:
self.parse=self.parse_Normal
self.pattern_dateListed='<span class="date-posted">.*</span>'
pass
def parse_Normal(self, response, type):
"""
response is an object of tornado.httpclient.HTTPResponse class
"""
soup = BeautifulSoup(response.body, 'html.parser')
parsed={}
# For parsing the advertisement list page( eg the list that has 20 Ads)
if type == 1:
titleLinks_response=soup.select('div.clearfix div.info div.info-container div.title a')
parsed['titles']=[title.get_text().strip() for title in titleLinks_response]
parsed['links']=[link.get('href').strip() for link in titleLinks_response]
# print("number of links : ", len(parsed['links']))
# for link in parsed['links']:
# print(link)
dates=[]
for item in soup.select("div.clearfix div.info div.info-container"):
date_parsed = self.parse_date(str(item))
if date_parsed == None:
dates=dates+[None]
else:
dates=dates+date_parsed
parsed['dates']=dates
# parsed['titleLinks']=zip(titles,links)
#Extracting the price of each item
parsed['prices'] = [price.get_text().strip() for price in soup.select("div.clearfix div.info div.info-container div.price")];
# For parsing and extracting the descriptions from within a specific ad.
elif type == 2:
# desc = soup.select('div[id="UserContent"] span[itemprop="description"]')[0]
# parsed['description'] = desc.get_text().strip()
desc = soup.select('div[id="UserContent"] span[itemprop="description"]')
if len(desc) == 0:
# print("FOR TYPE 2, the URL : ", response.request.url)
parsed['description'] = ""
else:
parsed['description'] = desc[0].get_text().strip()
return parsed
def parse_date(self, response_text):
"""
Note that html.parser is needed for BeautifulSoup. Otherwise for datefields beginning with '<' wont be recognized
"""
dates=[]
for found in re.findall(self.pattern_dateListed, response_text):
datePosted = re.match( r'<span class="date-posted">(.*)</span', found, re.M|re.I).group(1).strip()
if datePosted[0:5] == '< ':
datePosted=datePosted[5:]
splitted=datePosted.split(" ")
time_=0
if ("hours" or "hour") in splitted:
time_ += int(splitted[0].strip()) * 60
if ("minutes" or "minute") in splitted:
time_ += int(splitted[0].strip())
time_ *= 60 # Converting to seconds
time_ = time.time() - time_
dates.append(time_)
else :
splitted=datePosted.split("/")
if len(splitted) == 3:
day = int(splitted[0])
month = int(splitted[1])
year = int(splitted[2])
dates.append( time.mktime(datetime.datetime(year, month, day).timetuple()) )
else:
dates.append(datePosted)
if len(dates) == 0:
return None
else:
return dates
def param_assertion(self, method, params):
if method==self.__init__:
possible_perserTypes=[1]
assert(params["parser_type"] in possible_perserTypes), "Constructor 'input param': 'parser_type' not correct"
if __name__=="__main__":
myCrawler=Crawler()
myCrawler.runLoop()
|
gorun.py
|
#!/usr/bin/env python
#
# Wrapper on pyinotify for running commands
# (c) 2009 Peter Bengtsson, peter@fry-it.com
#
# TODO: Ok, now it does not start a command while another is runnnig
# But! then what if you actually wanted to test a modification you
# saved while running another test
# Yes, we could stop the running command and replace it by the new test
# But! django tests will complain that a test db is already here
import os
from subprocess import Popen
from threading import Lock, Thread
__version__='1.6'
class SettingsClass(object):
VERBOSE = False
settings = SettingsClass()
try:
from pyinotify import WatchManager, Notifier, ThreadedNotifier, ProcessEvent, EventsCodes
except ImportError:
print "pyinotify not installed. Try: easy_install pyinotify"
raise
def _find_command(path):
# path is a file
assert os.path.isfile(path)
# in dictionary lookup have keys as files and directories.
# if this path exists in there, it's a simple match
try:
return lookup[path]
except KeyError:
pass
# is the parent directory in there?
while path != '/':
path = os.path.dirname(path)
try:
return lookup[path]
except KeyError:
pass
def _ignore_file(path):
if path.endswith('.pyc'):
return True
if path.endswith('~'):
return True
basename = os.path.basename(path)
if basename.startswith('.#'):
return True
if basename.startswith('#') and basename.endswith('#'):
return True
if '.' in os.path.basename(path) and \
basename.split('.')[-1] in settings.IGNORE_EXTENSIONS:
return True
if os.path.split(os.path.dirname(path))[-1] in settings.IGNORE_DIRECTORIES:
return True
if not os.path.isfile(path):
return True
class PTmp(ProcessEvent):
def __init__(self):
super(PTmp, self).__init__()
self.lock = Lock()
def process_IN_CREATE(self, event):
if os.path.basename(event.pathname).startswith('.#'):
# backup file
return
print "Creating:", event.pathname
command = _find_command(event.pathname)
#def process_IN_DELETE(self, event):
# print "Removing:", event.pathname
# command = _find_command(event.pathname)
def process_IN_MODIFY(self, event):
if _ignore_file(event.pathname):
return
def execute_command(event, lock):
# By default trying to acquire a lock is blocking
# In this case it will create a queue of commands to run
#
# If you try to acquire the lock in the locked state non-blocking
# style, it will immediatly returns False and you know that a
# command is already running, and in this case we don't want to run
# this command at all.
block = settings.RUN_ON_EVERY_EVENT
if not lock.acquire(block):
# in this case we just want to not execute the command
return
print "Modifying:", event.pathname
command = _find_command(event.pathname)
if command:
if settings.VERBOSE:
print "Command: ",
print command
p = Popen(command, shell=True)
sts = os.waitpid(p.pid, 0)
lock.release()
command_thread = Thread(target=execute_command, args=[event, self.lock])
command_thread.start()
def start(actual_directories):
wm = WatchManager()
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_MODIFY'] #| flags['IN_CREATE']
p = PTmp()
notifier = Notifier(wm, p)
for actual_directory in actual_directories:
print "DIRECTORY", actual_directory
wdd = wm.add_watch(actual_directory, mask, rec=True)
# notifier = Notifier(wm, p, timeout=10)
try:
print "Waiting for stuff to happen..."
notifier.loop()
except KeyboardInterrupt:
pass
return 0
lookup = {}
def configure_more(directories):
actual_directories = set()
#print "directories", directories
# Tune the configured directories a bit
for i, (path, cmd) in enumerate(directories):
if isinstance(path, (list, tuple)):
actual_directories.update(configure_more(
[(x, cmd) for x in path]))
continue
if not path.startswith('/'):
path = os.path.join(os.path.abspath(os.path.dirname('.')), path)
if not (os.path.isfile(path) or os.path.isdir(path)):
raise OSError, "%s neither a file or a directory" % path
path = os.path.normpath(path)
if os.path.isdir(path):
if path.endswith('/'):
# tidy things up
path = path[:-1]
if path == '.':
path = ''
actual_directories.add(path)
else:
# because we can't tell pyinotify to monitor files,
# when a file is configured, add it's directory
actual_directories.add(os.path.dirname(path))
lookup[path] = cmd
return actual_directories
if __name__=='__main__':
import sys
import imp
args = sys.argv[1:]
if not args and os.path.isfile('gorun_settings.py'):
print >>sys.stderr, "Guessing you want to use gorun_settings.py"
args = ['gorun_settings.py']
if not args and os.path.isfile('gorunsettings.py'):
print >>sys.stderr, "Guessing you want to use gorunsettings.py"
args = ['gorunsettings.py']
if not args:
print >>sys.stderr, "USAGE: %s importable_py_settings_file" %\
__file__
sys.exit(1)
settings_file = args[-1]
sys.path.append(os.path.abspath(os.curdir))
x = imp.load_source('gorun_settings', settings_file)
settings.DIRECTORIES = x.DIRECTORIES
settings.VERBOSE = getattr(x, 'VERBOSE', settings.VERBOSE)
settings.IGNORE_EXTENSIONS = getattr(x, 'IGNORE_EXTENSIONS', tuple())
settings.IGNORE_DIRECTORIES = getattr(x, 'IGNORE_DIRECTORIES', tuple())
settings.RUN_ON_EVERY_EVENT = getattr(x, 'RUN_ON_EVERY_EVENT', False)
actual_directories = configure_more(settings.DIRECTORIES)
sys.exit(start(actual_directories))
|
test_remote_account.py
|
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.cluster.cluster_spec import ClusterSpec
from ducktape.services.service import Service
from ducktape.tests.test import Test
from ducktape.errors import TimeoutError
from ducktape.mark.resource import cluster
import os
import pytest
import random
import shutil
from six import iteritems
import tempfile
from threading import Thread
import time
import logging
def generate_tempdir_name():
"""Use this ad-hoc function instead of the tempfile module since we're creating and removing
this directory with ssh commands.
"""
return "/tmp/" + "t" + str(int(time.time()))
class RemoteAccountTestService(Service):
"""Simple service that allocates one node for performing tests of RemoteAccount functionality"""
def __init__(self, context):
super(RemoteAccountTestService, self).__init__(context, num_nodes=1)
self.temp_dir = generate_tempdir_name()
self.logs = {
"my_log": {
"path": self.log_file,
"collect_default": True
},
"non_existent_log": {
"path": os.path.join(self.temp_dir, "absent.log"),
"collect_default": True
}
}
@property
def log_file(self):
return os.path.join(self.temp_dir, "test.log")
def start_node(self, node):
node.account.ssh("mkdir -p " + self.temp_dir)
node.account.ssh("touch " + self.log_file)
def stop_node(self, node):
pass
def clean_node(self, node):
node.account.ssh("rm -rf " + self.temp_dir)
def write_to_log(self, msg):
self.nodes[0].account.ssh("echo -e -n " + repr(msg) + " >> " + self.log_file)
class GenericService(Service):
"""Service which doesn't do anything - just a group of nodes, each of which has a scratch directory."""
def __init__(self, context, num_nodes):
super(GenericService, self).__init__(context, num_nodes)
self.worker_scratch_dir = "scratch"
for node in self.nodes:
node.account.mkdirs(self.worker_scratch_dir)
def stop_node(self, node):
# noop
pass
def clean_node(self, node):
node.account.remove(self.worker_scratch_dir, allow_fail=True)
class FileSystemTest(Test):
"""
Note that in an attempt to isolate the file system methods, validation should be done with ssh/shell commands.
"""
def setup(self):
self.service = GenericService(self.test_context, 1)
self.node = self.service.nodes[0]
self.scratch_dir = self.service.worker_scratch_dir
@cluster(num_nodes=1)
def create_file_test(self):
expected_contents = "hello world"
fname = "myfile.txt"
fpath = "%s/%s" % (self.scratch_dir, fname)
self.node.account.create_file(fpath, expected_contents)
# validate existence and contents
self.node.account.ssh("test -f %s" % fpath)
contents = "\n".join([l for l in self.node.account.ssh_capture("cat %s" % fpath)])
assert contents == expected_contents
# TODO also check absolute path
@cluster(num_nodes=1)
def mkdir_test(self):
dirname = "%s/mydir" % self.scratch_dir
self.node.account.mkdir(dirname)
# TODO - important!! check mode
self.node.account.ssh("test -d %s" % dirname, allow_fail=False)
# mkdir should not succeed if the base directories do not already exist
dirname = "%s/a/b/c/d" % self.scratch_dir
with pytest.raises(IOError):
self.node.account.mkdir(dirname)
# TODO also check absolute path
@cluster(num_nodes=1)
def mkdirs_nested_test(self):
dirname = "%s/a/b/c/d" % self.scratch_dir
# TODO important!! check mode
self.node.account.mkdirs(dirname)
self.node.account.ssh("test -d %s" % dirname, allow_fail=False)
# TODO also check absolute path
@cluster(num_nodes=1)
def open_test(self):
"""Try opening, writing, reading a file."""
fname = "%s/myfile.txt" % self.scratch_dir
expected_contents = b"hello world\nhooray!"
with self.node.account.open(fname, "w") as f:
f.write(expected_contents)
with self.node.account.open(fname, "r") as f:
contents = f.read()
assert contents == expected_contents
# Now try opening in append mode
append = b"hithere"
expected_contents = expected_contents + append
with self.node.account.open(fname, "a") as f:
f.write(append)
with self.node.account.open(fname, "r") as f:
contents = f.read()
assert contents == expected_contents
@cluster(num_nodes=1)
def exists_file_test(self):
"""
Create various kinds of files and symlinks, verifying that exists works as expected.
Note that because
"""
# create file, test existence with relative and absolute path
self.node.account.ssh("touch %s/hi" % self.scratch_dir)
assert self.node.account.exists("%s/hi" % self.scratch_dir)
# TODO abspath
# create symlink, test existence with relative and absolute path
self.node.account.ssh("ln -s %s/hi %s/hi-link" % (self.scratch_dir, self.scratch_dir))
assert self.node.account.exists("%s/hi-link" % self.scratch_dir)
# TODO abspath
def exists_dir_test(self):
# check bad path doesn't exist
assert not self.node.account.exists("a/b/c/d")
# create dir, test existence with relative and absolute path
dpath = "%s/mydir" % self.scratch_dir
self.node.account.ssh("mkdir %s" % dpath)
assert self.node.account.exists(dpath)
# TODO abspath
# create symlink, test existence with relative and absolute path
self.node.account.ssh("ln -s %s %s/mydir-link" % (dpath, self.scratch_dir))
assert self.node.account.exists("%s/mydir-link" % self.scratch_dir)
# # TODO abspath
def remove_test(self):
"""Test functionality of remove method"""
# remove a non-empty directory
dpath = "%s/mydir" % self.scratch_dir
self.node.account.ssh("mkdir %s" % dpath)
self.node.account.ssh("touch %s/hi.txt" % dpath)
self.node.account.ssh("test -d %s" % dpath)
self.node.account.remove(dpath)
self.node.account.ssh("test ! -d %s" % dpath)
# remove a file
fpath = "%s/hello.txt" % self.scratch_dir
self.node.account.ssh("echo 'hello world' > %s" % fpath)
self.node.account.remove(fpath)
# remove non-existent path
with pytest.raises(RuntimeError):
self.node.account.remove("a/b/c/d")
# remove non-existent path with allow_fail = True should be ok
self.node.account.remove("a/b/c/d", allow_fail=True)
# Representation of a somewhat arbitrary directory structure for testing copy functionality
# A key which has a string as its value represents a file
# A key which has a dict as its value represents a subdirectory
DIR_STRUCTURE = {
"d00": {
"another_file": b"1\n2\n3\n4\ncats and dogs",
"d10": {
"fasdf": b"lasdf;asfd\nahoppoqnbasnb"
},
"d11": {
"f65": b"afasdfsafdsadf"
}
},
"a_file": b"hello world!"
}
def make_dir_structure(base_dir, dir_structure, node=None):
"""Make a file tree starting at base_dir with structure specified by dir_structure.
if node is None, make the structure locally, else make it on the given node
"""
for k, v in iteritems(dir_structure):
if isinstance(v, dict):
# it's a subdirectory
subdir_name = k
subdir_path = os.path.join(base_dir, subdir_name)
subdir_structure = v
if node:
node.account.mkdir(subdir_path)
else:
os.mkdir(subdir_path)
make_dir_structure(subdir_path, subdir_structure, node)
else:
# it's a file
file_name = k
file_path = os.path.join(base_dir, file_name)
file_contents = v
if node:
with node.account.open(file_path, "wb") as f:
f.write(file_contents)
else:
with open(file_path, "wb") as f:
f.write(file_contents)
def verify_dir_structure(base_dir, dir_structure, node=None):
"""Verify locally or on the given node whether the file subtree at base_dir matches dir_structure."""
for k, v in iteritems(dir_structure):
if isinstance(v, dict):
# it's a subdirectory
subdir_name = k
subdir_path = os.path.join(base_dir, subdir_name)
subdir_structure = v
if node:
assert node.account.isdir(subdir_path)
else:
assert os.path.isdir(subdir_path)
verify_dir_structure(subdir_path, subdir_structure, node)
else:
# it's a file
file_name = k
file_path = os.path.join(base_dir, file_name)
expected_file_contents = v
if node:
with node.account.open(file_path, "r") as f:
contents = f.read()
else:
with open(file_path, "rb") as f:
contents = f.read()
assert expected_file_contents == contents, contents
class CopyToAndFroTest(Test):
"""These tests check copy_to, and copy_from functionality."""
def setup(self):
self.service = GenericService(self.test_context, 1)
self.node = self.service.nodes[0]
self.remote_scratch_dir = self.service.worker_scratch_dir
self.local_temp_dir = tempfile.mkdtemp()
self.logger.info("local_temp_dir: %s" % self.local_temp_dir)
self.logger.info("node: %s" % str(self.node.account))
@cluster(num_nodes=1)
def test_copy_to_dir_with_rename(self):
# make dir structure locally
make_dir_structure(self.local_temp_dir, DIR_STRUCTURE)
dest = os.path.join(self.remote_scratch_dir, "renamed")
self.node.account.copy_to(self.local_temp_dir, dest)
# now validate the directory structure on the remote machine
verify_dir_structure(dest, DIR_STRUCTURE, node=self.node)
@cluster(num_nodes=1)
def test_copy_to_dir_as_subtree(self):
# copy directory "into" a directory; this should preserve the original directoryname
make_dir_structure(self.local_temp_dir, DIR_STRUCTURE)
self.node.account.copy_to(self.local_temp_dir, self.remote_scratch_dir)
local_temp_dir_name = self.local_temp_dir
if local_temp_dir_name.endswith(os.path.sep):
local_temp_dir_name = local_temp_dir_name[:-len(os.path.sep)]
verify_dir_structure(os.path.join(self.remote_scratch_dir, local_temp_dir_name), DIR_STRUCTURE)
@cluster(num_nodes=1)
def test_copy_from_dir_with_rename(self):
# make dir structure remotely
make_dir_structure(self.remote_scratch_dir, DIR_STRUCTURE, node=self.node)
dest = os.path.join(self.local_temp_dir, "renamed")
self.node.account.copy_from(self.remote_scratch_dir, dest)
# now validate the directory structure locally
verify_dir_structure(dest, DIR_STRUCTURE)
@cluster(num_nodes=1)
def test_copy_from_dir_as_subtree(self):
# copy directory "into" a directory; this should preserve the original directoryname
make_dir_structure(self.remote_scratch_dir, DIR_STRUCTURE, node=self.node)
self.node.account.copy_from(self.remote_scratch_dir, self.local_temp_dir)
verify_dir_structure(os.path.join(self.local_temp_dir, "scratch"), DIR_STRUCTURE)
def teardown(self):
# allow_fail in case scratch dir was not successfully created
if os.path.exists(self.local_temp_dir):
shutil.rmtree(self.local_temp_dir)
class CopyDirectTest(Test):
def setup(self):
self.service = GenericService(self.test_context, 2)
self.src_node, self.dest_node = self.service.nodes
self.remote_scratch_dir = self.service.worker_scratch_dir
self.logger.info("src_node: %s" % str(self.src_node.account))
self.logger.info("dest_node: %s" % str(self.dest_node.account))
@cluster(num_nodes=2)
def test_copy_file(self):
"""Verify that a file can be correctly copied directly between nodes.
This should work with or without the recursive flag.
"""
file_path = os.path.join(self.remote_scratch_dir, "myfile.txt")
expected_contents = b"123"
self.src_node.account.create_file(file_path, expected_contents)
self.src_node.account.copy_between(file_path, file_path, self.dest_node)
assert self.dest_node.account.isfile(file_path)
with self.dest_node.account.open(file_path, "r") as f:
contents = f.read()
assert expected_contents == contents
@cluster(num_nodes=2)
def test_copy_directory(self):
"""Verify that a directory can be correctly copied directly between nodes.
"""
make_dir_structure(self.remote_scratch_dir, DIR_STRUCTURE, node=self.src_node)
self.src_node.account.copy_between(self.remote_scratch_dir, self.remote_scratch_dir, self.dest_node)
verify_dir_structure(os.path.join(self.remote_scratch_dir, "scratch"), DIR_STRUCTURE, node=self.dest_node)
class TestClusterSpec(Test):
@cluster(cluster_spec=ClusterSpec.simple_linux(2))
def test_create_two_node_service(self):
self.service = GenericService(self.test_context, 2)
for node in self.service.nodes:
node.account.ssh("echo hi")
class RemoteAccountTest(Test):
def __init__(self, test_context):
super(RemoteAccountTest, self).__init__(test_context)
self.account_service = RemoteAccountTestService(test_context)
def setup(self):
self.account_service.start()
@cluster(num_nodes=1)
def test_ssh_capture_combine_stderr(self):
"""Test that ssh_capture correctly captures stderr and stdout from remote process.
"""
node = self.account_service.nodes[0]
# swap stdout and stderr in the echo process
cmd = "for i in $(seq 1 5); do echo $i 3>&1 1>&2 2>&3; done"
ssh_output = node.account.ssh_capture(cmd, combine_stderr=True)
bad_ssh_output = node.account.ssh_capture(cmd, combine_stderr=False) # Same command, but don't capture stderr
lines = [int(l.strip()) for l in ssh_output]
assert lines == [i for i in range(1, 6)]
bad_lines = [int(l.strip()) for l in bad_ssh_output]
assert bad_lines == []
@cluster(num_nodes=1)
def test_ssh_output_combine_stderr(self):
"""Test that ssh_output correctly captures stderr and stdout from remote process.
"""
node = self.account_service.nodes[0]
# swap stdout and stderr in the echo process
cmd = "for i in $(seq 1 5); do echo $i 3>&1 1>&2 2>&3; done"
ssh_output = node.account.ssh_output(cmd, combine_stderr=True)
bad_ssh_output = node.account.ssh_output(cmd, combine_stderr=False) # Same command, but don't capture stderr
assert ssh_output == b"\n".join([str(i).encode('utf-8') for i in range(1, 6)]) + b"\n", ssh_output
assert bad_ssh_output == b"", bad_ssh_output
@cluster(num_nodes=1)
def test_ssh_capture(self):
"""Test that ssh_capture correctly captures output from ssh subprocess.
"""
node = self.account_service.nodes[0]
cmd = "for i in $(seq 1 5); do echo $i; done"
ssh_output = node.account.ssh_capture(cmd, combine_stderr=False)
lines = [int(l.strip()) for l in ssh_output]
assert lines == [i for i in range(1, 6)]
@cluster(num_nodes=1)
def test_ssh_output(self):
"""Test that ssh_output correctly captures output from ssh subprocess.
"""
node = self.account_service.nodes[0]
cmd = "for i in $(seq 1 5); do echo $i; done"
ssh_output = node.account.ssh_output(cmd, combine_stderr=False)
assert ssh_output == b"\n".join([str(i).encode('utf-8') for i in range(1, 6)]) + b"\n", ssh_output
@cluster(num_nodes=1)
def test_monitor_log(self):
"""Tests log monitoring by writing to a log in the background thread"""
node = self.account_service.nodes[0]
# Make sure we start the log with some data, including the value we're going to grep for
self.account_service.write_to_log("foo\nbar\nbaz")
# Background thread that simulates a process writing to the log
self.wrote_log_line = False
def background_logging_thread():
# This needs to be large enough that we can verify we've actually
# waited some time for the data to be written, but not too long that
# the test takes a long time
time.sleep(3)
self.wrote_log_line = True
self.account_service.write_to_log("foo\nbar\nbaz")
with node.account.monitor_log(self.account_service.log_file) as monitor:
logging_thread = Thread(target=background_logging_thread)
logging_thread.start()
monitor.wait_until('foo', timeout_sec=10, err_msg="Never saw expected log")
assert self.wrote_log_line
logging_thread.join(5.0)
if logging_thread.isAlive():
raise Exception("Timed out waiting for background thread.")
@cluster(num_nodes=1)
def test_monitor_log_exception(self):
"""Tests log monitoring correctly throws an exception when the regex was not found"""
node = self.account_service.nodes[0]
# Make sure we start the log with some data, including the value we're going to grep for
self.account_service.write_to_log("foo\nbar\nbaz")
timeout = 3
try:
with node.account.monitor_log(self.account_service.log_file) as monitor:
start = time.time()
monitor.wait_until('foo', timeout_sec=timeout, err_msg="Never saw expected log")
assert False, "Log monitoring should have timed out and thrown an exception"
except TimeoutError:
# expected
end = time.time()
assert end - start > timeout, "Should have waited full timeout period while monitoring the log"
class TestIterWrapper(Test):
def setup(self):
self.line_num = 6
self.eps = 0.01
self.service = GenericService(self.test_context, num_nodes=1)
self.node = self.service.nodes[0]
self.temp_file = "ducktape-test-" + str(random.randint(0, 100000))
contents = ""
for i in range(self.line_num):
contents += "%d\n" % i
self.node.account.create_file(self.temp_file, contents)
def test_iter_wrapper(self):
"""Test has_next functionality on the returned iterable item."""
output = self.node.account.ssh_capture("cat " + self.temp_file)
for i in range(self.line_num):
assert output.has_next() # with timeout in case of hang
assert output.next().strip() == str(i)
start = time.time()
assert output.has_next() is False
stop = time.time()
assert stop - start < self.eps, "has_next() should return immediately"
def test_iter_wrapper_timeout(self):
"""Test has_next with timeout"""
output = self.node.account.ssh_capture("tail -F " + self.temp_file)
# allow command to be executed before we check output with timeout_sec = 0
time.sleep(.5)
for i in range(self.line_num):
assert output.has_next(timeout_sec=0)
assert output.next().strip() == str(i)
timeout = .25
start = time.time()
# This check will last for the duration of the timeout because the the remote tail -F process
# remains running, and the output stream is not closed.
assert output.has_next(timeout_sec=timeout) is False
stop = time.time()
assert (stop - start >= timeout) and (stop - start) < timeout + self.eps, \
"has_next() should return right after %s second" % str(timeout)
def teardown(self):
# tail -F call above will leave stray processes, so clean up
cmd = "for p in $(ps ax | grep -v grep | grep \"%s\" | awk '{print $1}'); do kill $p; done" % self.temp_file
self.node.account.ssh(cmd, allow_fail=True)
self.node.account.ssh("rm -f " + self.temp_file, allow_fail=True)
class RemoteAccountCompressedTest(Test):
def __init__(self, test_context):
super(RemoteAccountCompressedTest, self).__init__(test_context)
self.account_service = RemoteAccountTestService(test_context)
self.test_context.session_context.compress = True
self.tar_msg = False
self.tar_error = False
def setup(self):
self.account_service.start()
@cluster(num_nodes=1)
def test_log_compression_with_non_existent_files(self):
"""Test that log compression with tar works even when a specific log file has not been generated
(e.g. heap dump)
"""
self.test_context.logger.addFilter(CompressionErrorFilter(self))
self.copy_service_logs(None)
if not self.tar_msg:
raise Exception("Never saw attempt to compress log")
if self.tar_error:
raise Exception("Failure when compressing logs")
class CompressionErrorFilter(logging.Filter):
def __init__(self, test):
super(CompressionErrorFilter, self).__init__()
self.test = test
def filter(self, record):
if 'tar czf' in record.msg:
self.test.tar_msg = True
if 'Error' in record.msg:
self.test.tar_error = True
return True
|
fxcmpy.py
|
#
# fxcmpy -- A Python Wrapper Class for the
# RESTful API as provided by FXCM Forex Capital Markets Ltd.
#
# The codes contained herein come without warranties or representations,
# to the extent permitted by applicable law.
#
# Read the RISK DISCLAIMER carefully.
#
# (c) FXCM Forex Capital Markets Ltd.
#
import requests
import socketio
from threading import Thread
import json
import pandas as pd
import sys
import time
import datetime as dt
import configparser
import logging
import fxcmpy.fxcmpy_instruments as fxcmpy_instruments
from fxcmpy.fxcmpy_closed_position import fxcmpy_closed_position
from fxcmpy.fxcmpy_open_position import fxcmpy_open_position
from fxcmpy.fxcmpy_oco_order import fxcmpy_oco_order
from fxcmpy.fxcmpy_order import fxcmpy_order
from urllib.parse import unquote
socketIO = socketio.Client()
socket = socketIO;
class ServerError(Exception):
pass
class fxcmpy(object):
""" A wrapper class for the FXCM API. """
# Class attributes
# auth_url = 'https://www-beta2.fxcorporate.com'
# trading_url = 'https://api-demo.fxcm.com'
# port = 443
models = ['Offer', 'Account', 'Order', 'OpenPosition', 'ClosedPosition',
'Summary', 'Properties', 'LeverageProfile']
PERIODS = ['m1', 'm5', 'm15', 'm30', 'H1', 'H2', 'H3', 'H4', 'H6', 'H8',
'D1', 'W1', 'M1']
CANDLES_COLUMNS = ['date', 'bidopen', 'bidclose', 'bidhigh', 'bidlow',
'askopen', 'askclose', 'askhigh', 'asklow', 'tickqty']
CANDLES_COLUMNS_ASK = ['date', 'askopen', 'askclose', 'askhigh', 'asklow']
CANDLES_COLUMNS_BID = ['date', 'bidopen', 'bidclose', 'bidhigh', 'bidlow']
LOG_LEVELS = {'error': 40, 'warn': 30, 'info': 20, 'debug': 10}
LOG_FORMAT = '|%(levelname)s|%(asctime)s|%(message)s'
PROXY_TYPES = ['http', 'socks4', 'socks5']
SERVERS = {
'demo': 'https://api-demo.fxcm.com',
'real': 'https://api.fxcm.com',
'demo2': 'https://api-demo.fuhuisupport.com',
'real2': 'https://api.fuhuisupport.com'
}
port = 443
debug = False
def __init__(self, access_token='', config_file='',
log_file=None, log_level='', server='demo',
proxy_url=None, proxy_port=None, proxy_type=None):
""" Constructor.
Arguments:
access_token: string (default: ''),
an access token for your FXCM account. To create an access token
visit https://tradingstation.fxcm.com/
config_file: string (default: ''),
path of an optional configuration file, fxcm tries to read all
other parameter which are not given from that file. The file must
be readable by configparser.
log_file: string (default: None),
path of an optional log file. If not given (and not found in the
optional configuration file), log messages are printed to stdout.
log_level: string (default: 'warn'),
the log level. Must be one of 'error', 'warn', 'info' or 'debug'.
If not given (and not found in the optional configuration file),
'warn' is used.
server: one of 'demo' or 'real' (default: 'demo'),
wheter to use the fxcm demo or real trading server.
proxy_url, string (default: None):
if given (or found in the optional configuration file), the url is
used for pproxy.
proxy_port, integer (default: None):
if proxy_url is given (or found in the optional configuration file),
this is the port of the proxy server.
proxy_type, one of 'http', 'socks4', 'socks5' or None (default: 'http'),
if proxy_url is given (or found in the optional configuration file),
this is the type of the proxy server.
"""
config = configparser.SafeConfigParser()
found_config_file = config.read(config_file)
if config_file:
if len(found_config_file) == 0:
raise IOError("Can not open config file: {0}"
.format(config_file))
if 'FXCM' not in config.sections():
raise ValueError("Can not find section [FXCM] in {0}"
.format(config_file))
else:
config.add_section('FXCM')
if not log_level:
log_level = config['FXCM'].get('log_level','warn')
log_level = log_level.strip('"').strip("'")
if log_level in self.LOG_LEVELS:
log_level = self.LOG_LEVELS[log_level]
else:
raise ValueError("log_level must be one of {0}"
.format(self.LOG_LEVELS.keys()))
if not log_file:
log_file = config['FXCM'].get('log_file')
if log_file:
log_file = log_file.strip('"').strip("'")
if config_file == log_file:
raise Exception("config_file and log_file must be different")
logging.basicConfig(filename=log_file, level=log_level,
format=self.LOG_FORMAT)
else:
logging.basicConfig(level=log_level, format=self.LOG_FORMAT)
self.logger = logging.getLogger('FXCM')
if not server:
server = config['FXCM'].get('server','demo')
server = server.strip('"').strip("'")
if server in self.SERVERS:
self.trading_url = self.SERVERS[server]
else:
raise ValueError("server must be one of {0}"
.format(self.SERVERS.keys()))
self.access_token = access_token
if not self.access_token:
self.access_token = config['FXCM'].get('access_token')
if not self.access_token:
raise ValueError("access_token not provided")
self.access_token = self.access_token.strip('"').strip("'")
if len(self.access_token) != 40:
raise ValueError("access_token must have a length of 40 characters")
if not proxy_url:
proxy_url = config['FXCM'].get('proxy_url')
if proxy_url:
proxy_url = proxy_url.strip('"').strip("'")
if not proxy_port:
proxy_port = config['FXCM'].get('proxy_port')
if not proxy_port:
raise ValueError("proxy_port not provided")
try:
proxy_port = int(proxy_port)
except ValueError:
pass
if not isinstance(proxy_port,int):
raise ValueError("proxy_port must be an integer")
if not 1 <= proxy_port <= 65535:
raise ValueError("proxy_port must be between 1 and 65535")
if not proxy_type:
proxy_type = config['FXCM'].get('proxy_type','http')
proxy_type = proxy_type.strip('"').strip("'")
if proxy_type not in self.PROXY_TYPES:
raise ValueError("proxy_type must be one of {0}"
.format(", ".join(str(t) for t in self.PROXY_TYPES)))
sec_proxy_type = 'https' if proxy_type == 'http' else proxy_type
self.proxies = {
'https': "{0}://{1}:{2}"
.format(sec_proxy_type, proxy_url, proxy_port),
'http': "{0}://{1}:{2}"
.format(proxy_type, proxy_url, proxy_port)
}
else:
self.proxies = {}
self.socket = None
self.socket_thread = None
self.orders_set = False
self.oco_orders_set = False
self.offers_set = False
self.positions_set = False
self.request_header = None
self.default_account = None
self.instruments = None
self.number_update_requests = 0
self.prices = dict()
self.account_ids = set()
self.orders = dict()
self.old_orders = dict()
self.offers = dict()
self.open_pos = dict()
self.closed_pos = dict()
self.oco_orders = dict()
self.add_callbacks = dict()
self.connection_status = 'unset'
self.max_prices = 10000
self.connect()
count = 0
while ((self.connection_status == 'pending' or
self.connection_status == 'unset') and count < 50):
count += 1
time.sleep(1)
if self.connection_status == 'pending' and count == 50:
raise ServerError('Can not find FXCM Server.')
elif self.connection_status == 'aborted':
raise ServerError('Can not connect to FXCM Server.')
self.__collect_account_ids__()
self.default_account = self.account_ids[0]
msg = 'Default account set to %s, to change use set_default_account().'
self.logger.warning(msg % self.default_account)
self.__collect_orders__()
self.__collect_oco_orders__()
self.__collect_offers__()
self.__collect_positions__()
self.instruments = self.get_instruments()
self.subscribe_data_model('Order')
self.subscribe_data_model('OpenPosition')
self.subscribe_data_model('ClosedPosition')
self.__disconnected__ = False
def close(self):
self.__disconnected__ = True
if self.is_connected():
self.socket.disconnect()
self.socket_thread.join()
time.sleep(2)
self.socket = None
self.request_header = None
self.default_account = None
self.instruments = None
self.prices = dict()
self.account_ids = set()
self.orders = dict()
self.old_orders = dict()
self.offers = dict()
self.open_pos = dict()
self.closed_pos = dict()
self.oco_orders = dict()
self.add_callbacks = dict()
self.connection_status = 'unset'
def connect(self):
""" Connect to the FXCM server."""
self.connection_status = 'pending'
self.logger.info('Connecting FXCM Server')
self.socket_thread = Thread(target=self.__connect__)
self.socket_thread.start()
def is_connected(self):
""" Return True if socket connection is established and False else."""
if (self.socket is not None and self.socket.connected and
self.socket_thread.is_alive()):
return True
else:
return False
def get_default_account(self):
""" Return the default account id."""
return self.default_account
def set_default_account(self, account_id):
"""" Set the default account id to account_id."""
if account_id not in self.account_ids:
raise ValueError("Unknown account id")
else:
self.default_account = account_id
def get_max_prices(self):
""" Return the max length of the market price tables."""
return self.max_prices
def set_max_prices(self, max_prices):
""" Set the max lenght of the market price tables.
Arguments:
max_prices, int or None (Default 10000): The max length of the price
tables, if set to None, the price tables are unlimited.
"""
if max_prices is not None:
try:
max_prices = int(max_prices)
except Exception:
raise TypeError('max_prices must be an integer')
if max_prices < 1:
raise ValueError('max_prices must be positive')
self.max_prices = max_prices
def get_instruments(self):
""" Return the tradeable instruments of FXCM as a list."""
self.logger.debug('Fetching available instruments')
data = self.__get_instruments_table__()
if 'data' in data and 'instrument' in data['data']:
instruments = [ins['symbol'] for ins in data['data']['instrument']]
else:
instruments = list()
return instruments
def get_model(self, models=None, summary=False):
""" Return a snapshot of the the specified model(s).
Arguments:
models: list,
list of the required models, entries must be out of
['Offer', 'Account', 'Order', 'OpenPosition', 'ClosedPosition',
'Summary', 'Properties', 'LeverageProfile'].
Returns:
The current data of the specified model(s) in a json like manner.
"""
if models is None:
models = list()
if len(models) == 0:
raise ValueError('Please specify one or more models')
for model in models:
if model not in self.models:
raise ValueError('Models have to be of %s' % self.models)
data = self.__handle_request__(method='trading/get_model',
params={'models': list(models)})
total = dict()
for table in data:
total[table] = list()
for dataset in data[table]:
if 'isTotal' in dataset and dataset['isTotal'] is True:
total[table].append(dataset)
data[table].remove(dataset)
if summary:
return total
else:
return data
def get_open_positions(self, kind='dataframe'):
""" Return a snapshot of the 'Open Position' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Open Position' model.
"""
data = self.get_model(('OpenPosition',))
open_pos = data['open_positions']
if kind == 'list':
return open_pos
else:
return pd.DataFrame(open_pos)
def get_closed_positions(self, kind='dataframe'):
""" Return a snapshot of the 'Closed Position' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Closed Position' model.
"""
data = self.get_model(('ClosedPosition',))
closed_pos = data['closed_positions']
if kind == 'list':
return closed_pos
else:
return pd.DataFrame(closed_pos)
def get_offers(self, kind='dataframe'):
""" Return a snapshot of the 'Offer' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Offer' model.
"""
data = self.get_model(('Offer',))
offers = data['offers']
if kind == 'list':
return offers
else:
return pd.DataFrame(offers)
def get_orders(self, kind='dataframe'):
""" Return a snapshot of the 'Order' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Order' model.
"""
data = self.get_model(('Order',))
orders = data['orders']
if kind == 'list':
return orders
else:
return pd.DataFrame(orders)
def get_summary(self, kind='dataframe'):
""" Return a snapshot of the 'Summary' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Summary' model.
"""
data = self.get_model(('Summary',))
summary = data['summary']
if kind == 'list':
return summary
else:
return pd.DataFrame(summary)
def get_open_positions_summary(self, kind='dataframe'):
""" Return a summary of the 'Open Position' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The summary of the current data of the 'Open Position' model.
"""
data = self.get_model(('OpenPosition',), summary=True)
open_pos = data['open_positions']
if kind == 'list':
return open_pos
else:
return pd.DataFrame(open_pos)
def get_closed_positions_summary(self, kind='dataframe'):
""" Return a summary of the 'Closed Position' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The summary of the current data of the 'Closed Position' model.
"""
data = self.get_model(('ClosedPosition',), summary=True)
closed_pos = data['closed_positions']
if kind == 'list':
return closed_pos
else:
return pd.DataFrame(closed_pos)
def get_accounts(self, kind='dataframe'):
""" Return a snapshot of the 'Account' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The current data of the 'Account' model.
"""
data = self.get_model(('Account',))
accounts = data['accounts']
if kind == 'list':
return accounts
else:
return pd.DataFrame(accounts)
def get_accounts_summary(self, kind='dataframe'):
""" Return a summary of the 'Account' model.
Arguments:
kind: one of 'dataframe' (default) or 'list',
how to return the data, either as list or as a pandas DataFrame.
Returns:
The summary of the current data of the 'Account' model.
"""
data = self.get_model(('Account',), summary=True)
accounts = data['accounts']
if kind == 'list':
return accounts
else:
return pd.DataFrame(accounts)
def get_account_ids(self):
""" Return a list of available account ids."""
return self.account_ids
def get_order_ids(self):
""" Return a list of available order ids."""
return list(self.orders.keys())
def get_open_trade_ids(self):
""" Return a list of all available trade ids of open positions."""
return list(self.open_pos.keys())
def get_closed_trade_ids(self):
""" Return a list of all available trade ids of closed positions."""
return list(self.closed_pos.keys())
def get_all_trade_ids(self):
"""Returns a list of all available trade ids."""
ids = set(self.get_open_trade_ids())
ids = ids.union(set(self.get_closed_trade_ids()))
ids = list(ids)
ids.sort()
return ids
def get_open_position(self, position_id):
""" Return the open position with given id.
Arguments:
position_id: (integer),
the id of the position.
Returns:
The fxcmpy_open_position object.
"""
try:
position_id = int(position_id)
except:
self.logger.error('position id must be an integer.')
raise TypeError('position id must be an integer.')
if position_id in self.open_pos:
return self.open_pos[position_id]
else:
self.logger.warning('No open position with id %s.' % position_id)
raise ValueError('No open position with id %s.' % position_id)
def get_closed_position(self, position_id):
""" Return the closed position with given id.
Arguments:
position_id: (integer),
the id of the position.
Returns:
The fxcmpy_open_position object.
"""
try:
position_id = int(position_id)
except:
self.logger.error('position id must be an integer.')
raise TypeError('position id must be an integer.')
if position_id in self.closed_pos:
return self.closed_pos[position_id]
else:
self.logger.warn('No closed position with given id %s.'
% position_id)
raise ValueError('No closed position with given id %s.'
% position_id)
def get_order(self, order_id):
""" Returns the order object for a given order id.
Arguments:
order_id: (integer),
the id of the order.
Returns:
The fxcmpy_order object.
"""
if order_id in self.orders:
return self.orders[order_id]
elif order_id in self.old_orders:
return self.old_orders[order_id]
else:
raise ValueError('No order with id %s' % order_id)
def get_oco_order_ids(self):
""" Return a list of the available oco order ids."""
return list(self.oco_orders.keys())
def get_oco_order(self, order_id):
""" Returns the oco order object for a given order id.
Arguments:
order_id: (integer),
the id of the oco order.
Returns:
The fxcmpy_oco_order_object.
"""
if order_id not in self.oco_orders:
raise ValueError('No oco order with id %s' % order_id)
else:
return self.oco_orders[order_id]
def get_prices(self, symbol):
""" Return the prices of a given subscribed instrument.
Arguments:
symbol: string,
the symbol of the instrument as given by get_instruments().
"""
if symbol in self.prices:
return self.prices[symbol]
else:
return pd.DataFrame(columns=['Bid', 'Ask', 'High', 'Low'])
def get_last_price(self, symbol):
""" Return the last prices of a given subscribed instrument.
Arguments:
symbol: string,
the symbol of the instrument as given by get_instruments().
"""
if symbol in self.prices:
return self.prices[symbol].iloc[-1]
else:
raise ValueError('Symbol %s is not subscripted' % symbol)
def get_subscribed_symbols(self):
""" Returns a list of symbols for the subscribed instruments."""
return list(self.prices.keys())
def is_subscribed(self, symbol):
""" Returns True if the instrument is subscribed and False else.
Arguments:
symbol: string,
the symbol of the instrument in question as given by
get_instruments().
"""
return symbol in self.prices
def subscribe_market_data(self, symbol='', add_callbacks=()):
""" Stream the prices of an instrument.
Arguments:
symbol: string,
the symbol of the instrument in question as given by
get_instruments().
add_callbacks: list of callables,
all methods in that list will be called for every incoming dataset
of the instrument. Such a method has to accept two positional
arguments, data and dataframe, say. The first should be a json like
object with the new price data received by the stream and the
second should be a Pandas DataFrame with the collected price data
as given by get_prices().
"""
if symbol == '':
raise ValueError('No symbol given.')
self.logger.info('Try to subscribe for %s.' % symbol)
for func in add_callbacks:
if not callable(func):
self.logger.error('Callback method is not callable.')
raise ValueError('Content of add_callbacks is not callable.')
else:
if symbol not in self.add_callbacks:
self.add_callbacks[symbol] = dict()
self.logger.info('Adding callback method %s for symbol %s.'
% (func.__name__, symbol))
self.add_callbacks[symbol][func.__name__] = func
params = {'pairs': symbol}
data = self.__handle_request__(method='subscribe', params=params,
protocol='post')
if symbol not in self.prices:
data = data['pairs'][0]
date = pd.to_datetime(int(data['Updated']), unit='ms')
self.prices[symbol] = pd.DataFrame([data['Rates'][0:4]],
columns=['Bid', 'Ask', 'High', 'Low'],
index=[date])
self.socket.on(symbol, self.__on_price_update__)
def subscribe_data_model(self, model='', add_callbacks=()):
""" Stream data of a model.
Arguments:
model: string,
the model, must be one of ['Offer', 'Account', 'Order',
'OpenPosition', 'ClosedPosition', 'Summary', 'Properties',
'LeverageProfile'].
add_callbacks: list of callables,
all methods in that list will be called for every incoming dataset
of the model. Such a method has to accept two positional
arguments, data and dataframe, say. The first should be a json like
object with the new data received by the stream and the second
should be a Pandas DataFrame with the collected data.
"""
if model == '':
raise ValueError('No model given.')
if model not in ['Offer', 'Account', 'Order', 'OpenPosition',
'ClosedPosition', 'Summary']:
msg = "model must on of 'Offer', 'Account', 'Order',"
msg += "'OpenPosition', 'ClosedPosition' or 'Summary'"
raise ValueError(msg)
self.logger.info('Try to subscribe for %s.' % model)
for func in add_callbacks:
if not callable(func):
self.logger.error('Callback method is not callable.')
raise ValueError('Content of add_callbacks must be callable.')
else:
if model not in self.add_callbacks:
self.add_callbacks[model] = dict()
self.logger.info('Adding callback method %s for model %s.'
% (func.__name__, model))
self.add_callbacks[model][func.__name__] = func
params = {'models': model}
self.__handle_request__(method='trading/subscribe',
params=params, protocol='post')
if model == 'Order':
self.socket.on('Order', self.__on_order_update__)
elif model == 'OpenPosition':
self.socket.on('OpenPosition', self.__on_open_pos_update__)
elif model == 'ClosedPosition':
self.socket.on('ClosedPosition', self.__on_closed_pos_update__)
else:
self.socket.on(model, self.__on_model_update__)
def subscribe_instrument(self, symbol):
""" Subscribe an instrument so that it appears in the offers table.
Arguments:
symbol, string:
the symbol of the instrument to activate.
Returns:
True by success and False else.
"""
ret = self.__update__instrument_subscription__(symbol, True)
return ret
def unsubscribe_instrument(self, symbol):
""" Unsubscribe an instrument so that it does not appears in the
offers table.
Arguments:
symbol, string:
the symbol of the instrument to activate.
Returns:
True by success and False else.
"""
ret = self.__update__instrument_subscription__(symbol, False)
return ret
def unsubscribe_market_data(self, symbol=''):
""" Unsubscribe for instrument prices of the given symbol."""
if symbol == '':
raise ValueError('No symbol given.')
self.logger.info('Try to unsubscribe for %s.' % symbol)
params = {'pairs': symbol}
self.__handle_request__(method='unsubscribe', params=params,
protocol='post')
if symbol in self.prices:
del self.prices[symbol]
if symbol in self.add_callbacks:
del self.add_callbacks[symbol]
def unsubscribe_data_model(self, model=''):
""" Unsubscribe for the given model.
Arguments:
model: string,
the model, must be one of ['Offer', 'Account', 'Order',
'OpenPosition', 'ClosedPosition', 'Summary', 'Properties',
'LeverageProfile'].
"""
if model == '':
raise ValueError('No symbol given.')
self.logger.info('Try to unsubscribe for %s.' % model)
if model not in ['Order', 'OpenPosition', 'ClosedPosition']:
params = {'models': model}
self.__handle_request__(method='trading/unsubscribe',
params=params, protocol='post')
else:
msg = 'Model %s is used by intern routines, cancel unsubscibtion, '
msg += 'only remove custom callbacks.'
self.logger.warn(msg % model)
if model in self.add_callbacks:
del self.add_callbacks[model]
def close_all_for_symbol(self, symbol, order_type='AtMarket',
time_in_force='GTC', account_id=None):
""" Close all positions for a given symbol.
Arguments:
account_id: string,
the order's account id.
symbol: string,
the trades symbol as given by get_instruments.
order_type: string (default: 'AtMarket'),
the type of order execution, one of 'AtMarket' or 'MarketRange'.
time_in_force: string (default: 'GTC'),
the time in force of the order execution, must be one of
'IOC', 'GTC', 'FOK' or 'DAY'.
account_id: integer (Default None),
the order's account id. If not given, the default account is used.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
if order_type not in ['AtMarket', 'MarketRange']:
msg = "order_type must be 'AtMarket' or 'MarketRange'."
raise ValueError(msg)
if time_in_force not in ['IOC', 'GTC', 'FOK', 'DAY']:
msg = "time_in_force must be in 'IOC', 'GTC', 'FOK', 'DAY'."
raise ValueError(msg)
params = {
'account_id': account_id,
'forSymbol': 'true',
'symbol': symbol,
'order_type': order_type,
'time_in_force': time_in_force
}
self.__handle_request__(method='trading/close_all_for_symbol',
params=params, protocol='post')
def close_all(self, order_type='AtMarket', time_in_force='GTC',
account_id=None):
""" Close all positions.
Arguments:
account_id: string,
the order's account id.
order_type: string (default: 'AtMarket'),
the type of order execution, one of 'AtMarket' or 'MarketRange'.
time_in_force: string (default: 'GTC'),
the time in force of the order execution, must be one of
'IOC', 'GTC', 'FOK' or 'DAY'.
account_id: integer (Default None),
the order's account id. If not given, the default account is used.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
if order_type not in ['AtMarket', 'MarketRange']:
msg = "order_type must be 'AtMarket' or 'MarketRange'."
raise ValueError(msg)
if time_in_force not in ['IOC', 'GTC', 'FOK', 'DAY']:
msg = "time_in_force must be in 'IOC', 'GTC', 'FOK', 'DAY'."
raise ValueError(msg)
params = {
'account_id': account_id,
'forSymbol': 'false',
'symbol': '',
'order_type': order_type,
'time_in_force': time_in_force
}
self.__handle_request__(method='trading/close_all_for_symbol',
params=params, protocol='post')
def open_trade(self, symbol, is_buy,
amount, time_in_force, order_type, rate=0,
is_in_pips=True, limit=None, at_market=0, stop=None,
trailing_step=None, account_id=None):
""" Opens a trade for a given instrument.
Arguments:
symbol: string,
the symbol of the instrument to trade as given by
get_instruments().
is_buy: boolean,
True if the trade is a buy, False else.
amount: integer,
the trades amount in lots.
order_type: string,
the order type, must be 'AtMarket' or 'MarketRange'.
time_in_force: string,
the time in force of the order execution, must be one of
'IOC', 'GTC', 'FOK' or 'DAY'.
rate: float (default 0),
the trades rate.
is_in_pips: boolean (default True),
whether the trades stop/limit rates are in pips.
limit: float (default 0),
the trades limit rate.
at_market: float (default 0),
the markets range.
stop: float or None (default None),
the trades stop rate.
trailing_step: int or None (default None),
the trailing step for the stop rate.
account_id: integer (Default None),
the trade's account id. If not given, the default account is used.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
try:
amount = int(amount)
except:
raise TypeError('amount must be an integer.')
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
if limit is not None:
try:
limit = float(limit)
except:
raise TypeError('limit must be a number.')
try:
at_market = float(at_market)
except:
raise TypeError('at_market must be a number.')
if order_type not in ['AtMarket', 'MarketRange']:
msg = "order_type must be 'AtMarket' or 'MarketRange'."
raise ValueError(msg)
if time_in_force not in ['IOC', 'GTC', 'FOK', 'DAY']:
msg = "time_in_force must be in 'IOC', 'GTC', 'FOK', 'DAY'."
raise ValueError(msg)
if is_in_pips is True:
is_in_pips = 'true'
elif is_in_pips is False:
is_in_pips = 'false'
else:
raise ValueError('is_in_pips must be True or False.')
if is_buy is True:
is_buy = 'true'
elif is_buy is False:
is_buy = 'false'
else:
raise ValueError('is_buy must be True or False.')
if stop is not None:
try:
stop = float(stop)
except:
raise TypeError('stop must be a number.')
if trailing_step is not None:
try:
trailing_step = int(trailing_step) # DH change from float to int
except:
raise ValueError('trailing step must be a number.')
params = {
'account_id': account_id,
'symbol': symbol,
'is_buy': is_buy,
'rate': rate,
'amount': amount,
'at_market': at_market,
'order_type': order_type,
'time_in_force': time_in_force,
'limit': limit,
'is_in_pips': is_in_pips
}
if limit is not None:
params['limit'] = limit
if stop is not None:
params['stop'] = stop
if trailing_step is not None:
params['trailing_step'] = trailing_step
data = self.__handle_request__(method='trading/open_trade',
params=params, protocol='post')
if 'data' in data and 'orderId' in data['data']:
order_id = int(data['data']['orderId'])
# return data
else:
self.logger.warn('Missing orderId in servers answer.')
return 0
count = 0
order = None
while count < 10:
try:
order = self.get_order(order_id)
break
except:
time.sleep(1)
count += 1
# order = self.get_order(order_id)
if order is None:
self.logger.warn('Can not find Order object, returning None.')
return order
def change_trade_stop_limit(self, trade_id, is_stop, rate, is_in_pips=True,
trailing_step=0):
""" Change an trade's stop or limit rate.
Arguments:
trade_id: integer,
the id of the trade to change.
is_stop: boolean,
defines whether the trade's limit (False) or the stop rate (True)
is to be changed.
rate: float,
the new stop or limit rate.
is_in_pips: boolean (Default True),
whether the trade's stop/limit rates are in pips.
trailing_step: int (Default 0),
the trailing step for the stop rate.
"""
try:
trade_id = int(trade_id)
except:
raise TypeError('trade_id must be an integer.')
if is_stop is True:
is_stop = 'true'
elif is_stop is False:
is_stop = 'false'
else:
raise ValueError('is_stop must be a boolean.')
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
if is_in_pips is True:
is_in_pips = 'true'
elif is_in_pips is False:
is_in_pips = 'false'
else:
raise ValueError('is_in_pips must be a boolean.')
try:
trailing_step = int(trailing_step)
except:
raise TypeError('trailing_step must be a number.')
params = {
'trade_id': trade_id,
'is_in_pips': is_in_pips,
'is_stop': is_stop,
'rate': rate,
'trailing_step': trailing_step,
}
meth = 'trading/change_trade_stop_limit'
self.__handle_request__(method=meth, params=params,
protocol='post')
def close_trade(self, trade_id, amount, order_type='AtMarket',
time_in_force='IOC', rate=None, at_market=None):
""" Close a given trade.
Arguments:
trade_id: integer,
the id of the trade.
amount: integer (default 0),
the trades amount in lots.
order_type: string (default 'AtMarket'),
the order type, must be 'AtMarket' or 'MarketRange'.
time_in_force: string (default 'IOC'),
the time in force of the order execution, must be one of
'IOC', 'GTC', 'FOK' or 'DAY''.
rate: float (default 0),
the trades rate.
at_market: float (default 0),
the markets range.
"""
try:
trade_id = int(trade_id)
except:
raise TypeError('trade_id must be an integer.')
try:
amount = float(amount)
except:
raise TypeError('amount must be a number.')
if order_type == 'MarketRange' and rate is not None:
self.logger.warn("rate is ignored for order_type='MarketRange'")
if rate is not None:
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
if order_type == 'MarketRange' and at_market is None:
raise ValueError("at_market is required for order_type='MarketRange'")
if at_market is not None:
try:
at_market = float(at_market)
except:
raise TypeError('at_market must be a number.')
if at_market < 0:
raise ValueError('at_market must bo greater or equal to zero')
if order_type not in ['AtMarket', 'MarketRange']:
msg = "order_type must be 'AtMarket' or 'MarketRange'."
raise ValueError(msg)
if time_in_force not in ['IOC', 'GTC', 'FOK', 'DAY']:
msg = "time_in_force must be in 'IOC', 'GTC', 'FOK' or 'DAY'."
raise ValueError(msg)
params = {
'trade_id': trade_id,
'amount': amount,
'order_type': order_type,
'time_in_force': time_in_force
}
if rate is not None and order_type != 'MarketRange':
params['rate'] = rate
if at_market is not None:
params['at_market'] = at_market
self.__handle_request__(method='trading/close_trade',
params=params, protocol='post')
def change_order(self, order_id, amount, rate, order_range=0,
trailing_step=None):
""" Change amount, rate, order_range and / or trailling_step of an
order.
Arguments:
order_id: int,
the id of the order to change.
amount: int,
the new amount of the order.
rate: float,
the new rate of the order.
order_range: float,
the new range of the order. Only used for 'RangeEntry' orders,
for other orders, it is 0 (default).
trailing_step: int,
the new trailing step for the order. Defaults to None.
"""
try:
order_id = int(order_id)
except:
raise TypeError('order_id must be an integer.')
try:
amount = float(amount)
except:
raise TypeError('amount must be a number.')
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
try:
order_range = float(order_range)
except:
raise TypeError('order_range must be a number.')
if trailing_step is not None:
try:
trailing_step = int(trailing_step)
except:
raise ValueError('trailing step must be a number.')
params = {
'order_id': order_id,
'rate': rate,
'range': order_range,
'amount': amount
}
if trailing_step is not None:
params['trailing_step'] = trailing_step
self.__handle_request__(method='trading/change_order',
params=params, protocol='post')
def delete_order(self, order_id):
""" Delete an order.
Arguments:
order_id: integer,
the id of the order to delete.
"""
try:
order_id = int(order_id)
except:
raise TypeError('order_id must be an integer.')
if order_id in self.old_orders:
self.logger.warn('Order is allready deleted.')
return
if order_id not in self.orders:
raise ValueError('No order with order id %s' % order_id)
params = {
'order_id': order_id
}
self.__handle_request__(method='trading/delete_order',
params=params, protocol='post')
def create_market_buy_order(self, symbol, amount, account_id=None):
""" Create an order to buy at market price.
Arguments:
symbol: string,
the symbol of the instrument to trade as given by
get_instruments().
amount: integer,
the trades amount in lots.
account_id: integer (Default None),
the trade's account id. If not given, the default account is used.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
if symbol not in self.instruments:
raise ValueError('Unknown symbol %s.' % symbol)
try:
amount = int(amount)
except:
raise TypeError('amount must be an integer.')
order = self.open_trade(symbol, True, amount, 'FOK', 'AtMarket',
account_id)
return order
def create_market_sell_order(self, symbol, amount, account_id=None):
""" Create an order to sell at market price.
Arguments:
symbol: string,
the symbol of the instrument to trade as given by
get_instruments().
amount: integer,
the trades amount in lots.
account_id: integer (Default None),
the trade's account id. If not given, the default account is used.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
if symbol not in self.instruments:
raise ValueError('Unknown symbol %s.' % symbol)
try:
amount = int(amount)
except:
raise TypeError('amount must be an integer.')
order = self.open_trade(symbol, False, amount, 'FOK', 'AtMarket',
account_id)
return order
def create_entry_order(self, symbol, is_buy, amount, time_in_force,
order_type="Entry", limit=0, is_in_pips=True,
rate=0, stop=None, trailing_step=None,
trailing_stop_step=None, order_range=None,
expiration=None,
account_id=None):
""" Creates an entry order for a given instrument.
Arguments:
account_id: integer (Default None),
the trading account's id. If None, the default account is used.
symbol: string,
the symbol of the instrument to trade as given by
get_instruments().
is_buy: boolean,
True if the trade is a buy, False else.
amount: integer,
the trades amount in lots.
order_type: string,
the order type, must be 'Entry' (default) or 'RangeEntry'.
time_in_force: string,
the time in force of the order execution, must be one of
'GTC', 'DAY', 'IOC', 'FOK' or 'GTD'.
rate: float (default 0),
the trades rate.
is_in_pips: boolean (default True),
whether the trade's stop/limit rates are in pips.
limit: float (default 0),
the trades limit rate.
stop: float or None (default None),
the trades stop rate.
trailing_step: int or None (default None),
the trailing step for the stop rate.
trailing_stop_step: float or None (default None),
the trailing step for the order stop rate.
order_range: float or None (default),
the order's range if order type is 'RangeEntry'.
expiration: datetime.datetime, datetime.date or string (defaut None),
order's expiration date for 'GTD'.
Returns:
The id of the new order.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
try:
amount = int(amount)
except:
raise TypeError('amount must be an integer.')
if symbol not in self.instruments:
raise ValueError('Unknown symbol %s.' % symbol)
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
try:
limit = float(limit)
except:
raise TypeError('limit must be a number.')
if order_type not in ['Entry', 'RangeEntry']:
msg = "order_type must be 'Entry' or 'RangeEntry'."
raise ValueError(msg)
if time_in_force not in ['GTC', 'DAY', 'GTD', 'IOC', 'FOK'] :
msg = "time_in_force must be 'GTC', 'DAY', 'IOC', 'FOK', or 'GTD'."
raise ValueError(msg)
if is_in_pips is True:
is_in_pips = 'true'
elif is_in_pips is False:
is_in_pips = 'false'
else:
raise ValueError('is_in_pips must be True or False.')
if is_buy is True:
is_buy = 'true'
elif is_buy is False:
is_buy = 'false'
else:
raise ValueError('is_buy must be True or False.')
if stop is not None:
try:
stop = float(stop)
except:
raise TypeError('stop must be a number.')
if trailing_step is not None:
try:
trailing_step = int(trailing_step)
except:
raise ValueError('trailing_step must be a number.')
if trailing_stop_step is not None:
try:
trailing_stop_step = int(trailing_stop_step)
except:
raise ValueError('trailing_stop_step must be a number.')
if order_range is not None:
try:
order_range = float(order_range)
except:
raise ValueError('order_range must be a number.')
elif order_type == 'RangeEntry':
msg = "If order type is 'RangeEntry', order_range must be given."
raise ValueError(msg)
if expiration:
if isinstance(expiration, str):
try:
expiration = pd.Timestamp(expiration).to_pydatetime()
except:
msg = "Can not convert parameter expiration to datetime."
raise ValueError(msg)
elif (isinstance(expiration, dt.datetime) or
isinstance(expiration, dt.date)):
pass
else:
msg = "expiration must either be a datetime object or a string"
raise ValueError(msg)
expi = expiration.strftime('%Y-%m-%d %H:%M')
elif time_in_force == 'GTD':
msg = "If time_in_force is 'GTD', expiration must be given."
raise ValueError(msg)
params = {
'account_id': account_id,
'symbol': symbol,
'is_buy': is_buy,
'rate': rate,
'amount': amount,
'limit': limit,
'order_type': order_type,
'is_in_pips': is_in_pips,
'time_in_force': time_in_force
}
if stop is not None:
params['stop'] = stop
if trailing_step is not None:
params['trailing_step'] = trailing_step
if trailing_stop_step is not None:
params['trailing_stop_step'] = trailing_stop_step
if order_range is not None:
params['range'] = order_range
if expiration is not None:
params['expiration'] = expi
data = self.__handle_request__(method='trading/create_entry_order',
params=params, protocol='post')
if 'data' in data and 'orderId' in data['data']:
order_id = int(data['data']['orderId'])
else:
self.logger.warn('Missing orderId in servers answer.')
return 0
try:
order = self.get_order(order_id)
except:
time.sleep(1)
order = self.get_order(order_id)
return order
def change_order_stop_limit(self, order_id, stop=None, limit=None,
is_stop_in_pips=True, is_limit_in_pips=True):
""" Change an order's stop and / or limit rate. To let an attribute
unchanged set the values parameter to None.
Arguments:
order_id: integer,
the id of the order to change.
stop: float or None (Default),
the new stop rate.
limit: float or None (Default),
the new limit rate.
is_stop_in_pips: boolean (Default True),
whether the order's stop rate is in pips.
is_limit_in_pips: boolean (Default True),
whether the order's limit rate is in pips.
"""
try:
order_id = int(order_id)
except:
raise TypeError('order_id must be an integer.')
params = {
'order_id': order_id
}
if stop is not None:
if is_stop_in_pips is True:
is_stop_in_pips = 'true'
elif is_stop_in_pips is False:
is_stop_in_pips = 'false'
else:
raise ValueError('is_stop_in_pips must be a boolean.')
try:
stop = float(stop)
except:
raise TypeError('stop must be a number.')
params['is_stop_in_pips'] = is_stop_in_pips
params['stop'] = stop
if limit is not None:
if is_limit_in_pips is True:
is_limit_in_pips = 'true'
elif is_limit_in_pips is False:
is_limit_in_pips = 'false'
else:
raise ValueError('is_limit_in_pips must be a boolean.')
try:
limit = float(limit)
except:
raise TypeError('limit')
params['is_limit_in_pips'] = is_limit_in_pips
params['limit'] = limit
meth = 'trading/change_order_stop_limit'
self.__handle_request__(method=meth, params=params,
protocol='post')
def create_oco_order(self, symbol, is_buy, is_buy2, amount, is_in_pips,
time_in_force, at_market, order_type,
limit=0, limit2=0, rate=0, rate2=0, stop=0, stop2=0,
trailing_step=0, trailing_step2=0,
trailing_stop_step=0, trailing_stop_step2=0,
account_id=None):
""" Creates an entry order for a given instrument.
Arguments:
account_id: integer (Default None),
the id of the trading account. If None, the default account is
used.
symbol: string,
the symbol of the instrument to trade as given by
get_instruments().
is_buy: boolean,
True if the first order is a buy, False else.
is_buy2: boolean,
True if the second order is a buy, False else.
amount: integer,
the trades amount in lots.
is_in_pips: boolean (default True),
whether the order's stop/limit rates are in pips.
time_in_force: string,
the time in force of the order execution, must be one of
'GTC', 'DAY' or 'GTD'.
at_market: float (default 0),
the order's markets range.
order_type: string,
the order type, must be 'Entry'.
limit: float (default 0),
the first order's limit rate.
limit2: float (default 0),
the second order's limit rate.
rate: float (default 0),
the first order's rate.
rate:2 float (default 0),
the second order's rate.
stop: float (default 0),
the first order's stop rate.
stop2: float (default 0),
the second order's stop rate.
trailing_step: int (default 0),
the trailing step for the first order.
trailing_step2: int (default 0),
the trailing step for the second order.
trailing_stop_step: float (default 0),
the trailing step for the first order's stop rate.
trailing_stop_step: float (default 0),
the trailing step for the second order's stop rate.
Returns:
The id of the new order.
"""
if account_id is None:
account_id = self.default_account
else:
try:
account_id = int(account_id)
except:
raise TypeError('account_id must be an integer.')
if account_id not in self.account_ids:
raise ValueError('Unknown account id %s.' % account_id)
if is_buy is True:
is_buy = 'true'
elif is_buy is False:
is_buy = 'false'
else:
raise ValueError('is_buy must be a boolean.')
if is_buy2 is True:
is_buy2 = 'true'
elif is_buy2 is False:
is_buy2 = 'false'
else:
raise ValueError('is_buy2 must be a boolean.')
try:
amount = int(amount)
except:
raise TypeError('amount must be an integer.')
if is_in_pips is True:
is_in_pips = 'true'
elif is_in_pips is False:
is_in_pips = 'false'
else:
raise ValueError('is_in_pips must be a boolean.')
if time_in_force not in ['IOC', 'GTC', 'FOK', 'DAY']:
msg = "time_in_force must be 'IOC', 'GTC', 'FOK' or 'DAY'."
raise ValueError(msg)
try:
at_market = float(at_market)
except:
raise TypeError('at_market must be a number.')
if order_type not in ['AtMarket', 'MarketRange']:
msg = "order_type must one of 'AtMarket' or 'MarketRange'."
raise ValueError(msg)
try:
limit = float(limit)
except:
raise TypeError('limit must be a number.')
try:
limit2 = float(limit2)
except:
raise TypeError('limit2 must be a number.')
try:
rate = float(rate)
except:
raise TypeError('rate must be a number.')
try:
rate2 = float(rate2)
except:
raise TypeError('rate2 must be a number.')
try:
stop = float(stop)
except:
raise TypeError('stop must be a number.')
try:
stop2 = float(stop2)
except:
raise TypeError('stop2 must be a number.')
try:
trailing_step = int(trailing_step)
except:
raise ValueError('trailing step must be a number.')
try:
trailing_step2 = int(trailing_step2)
except:
raise ValueError('trailing step must be a number.')
try:
trailing_stop_step = int(trailing_stop_step) # DH replace float to int
except:
raise ValueError('trailing_stop_step must be a number.')
try:
trailing_stop_step2 = int(trailing_stop_step2) # DH replace float to int
except:
raise ValueError('trailing_stop_step2 must be a number.')
params = {
'account_id': account_id,
'symbol': symbol,
'amount': amount,
'at_market': at_market,
'order_type': order_type,
'is_in_pips': is_in_pips,
'time_in_force': time_in_force,
'is_buy': is_buy,
'is_buy2': is_buy2,
'rate': rate,
'rate2': rate2,
'stop': stop,
'stop2': stop2,
'limit': limit,
'limit2': limit2,
'trailing_step': trailing_step,
'trailing_step2': trailing_step2,
'trailing_stop_step': trailing_stop_step,
'trailing_stop_step2': trailing_stop_step2,
}
data = self.__handle_request__(method='trading/simple_oco',
params=params, protocol='post')
if 'data' not in data:
self.logger.error('Missing data in server response: %s.' % data)
raise ServerError('Missing data in server response.')
orders = list()
for data_set in data['data']:
count = 0
while count < 100:
if int(data_set['orderId']) not in self.orders:
time.sleep(1)
count += 1
else:
orders.append(self.get_order(int(data_set['orderId'])))
break
else:
raise ValueError('No order with id %s' % data_set['orderId'])
bulk_id = orders[0].__ocoBulkId__
oco_order = fxcmpy_oco_order(bulk_id, orders, self, self.logger)
self.oco_orders[bulk_id] = oco_order
return oco_order
def add_to_oco(self, order_ids, oco_bulk_id=0):
""" Add orders to OCO Orders.
Arguments:
order_ids: list of order_ids,
the ids of the orders to add to the OCO Order.
co_bulk_id: integer, default = 0,
the id of the OCO Order, if 0 a new OCO Order will be created.
"""
try:
_ = (int(i) for i in order_ids)
except:
raise TypeError('order_ids must be a list of integers.')
try:
oco_bulk_id = int(oco_bulk_id)
except:
raise TypeError('oco_bulk_id must be an integer.')
params = {
'orderIds': order_ids,
'ocoBulkId': oco_bulk_id
}
self.__handle_request__(method='trading/add_to_oco',
params=params, protocol='post')
def remove_from_oco(self, order_ids):
""" Remove orders from OCO Orders.
Arguments:
order_ids: list of order_ids,
the ids of the orders to remove from OCO Orders.
"""
try:
_ = (int(i) for i in order_ids)
except:
raise TypeError('order_ids must be a list of integers.')
params = {
'orderIds': order_ids
}
self.__handle_request__(method='trading/remove_from_oco',
params=params, protocol='post')
def edit_oco(self, oco_bulk_id, add_order_ids=None,
remove_order_ids=None):
"""Add or remove orders to or from OCO Orders.
Arguments:
oco_bulk_id: integer,
the id of the OCO Order.
add_order_ids: list of order_ids,
the id's of the orders to add to OCO Orders.
remove_order_ids: list of order_ids,
the id's of the orders to remove from OCO Orders.
"""
if add_order_ids is None:
add_order_ids = list()
if remove_order_ids is None:
remove_order_ids = list()
try:
_ = (int(i) for i in add_order_ids)
except:
raise TypeError('add_order_ids must be a list of integers.')
try:
_ = (int(i) for i in remove_order_ids)
except:
raise TypeError('remove_order_ids must be a list of integers.')
params = {
'ocoBulkId': oco_bulk_id,
'addOrderIds': add_order_ids,
'removeOrderIds': remove_order_ids
}
self.__handle_request__(method='trading/edit_oco',
params=params, protocol='post')
def get_instruments_for_candles(self):
""" Return a list of all available instruments to receive historical
data for."""
return list(self.offers.keys())
def get_candles(self, instrument='', offer_id=None, period='H1', number=10,
start=None, end=None, with_index=True, columns=[],
stop=None):
"""Return historical data from the fxcm database as pandas.DataFrame.
Arguments:
instrument: string (default ''):
the instrument for which data is requested. For a list of all
available instruments for historical data, use
get_instruments_for_candles().
If the value is equal to '' (default), offer_id must have a value.
If both, instrument and offer_id are given, the value of instrument
is used.
offer_id: integer (default None):
the id of the instrument for which data is requested as given in the
offer trading table as given by get_offers(). If offer_id is equal
to None (default), the parameter instrument must have a value.
If both, instrument and offer_id are given, the value of instrument
is used.
period: string,
the granularity of the data. Must be one of
'm1', 'm5', 'm15', 'm30', 'H1', 'H2', 'H3', 'H4', 'H6', 'H8',
'D1', 'W1', or 'M1'.
number: integer (default 10),
the number of candles to receive.
start: datetime.datetime, datetime.date or string (defaut None),
the first date to receive data for.
end: datetime.datetime, datetime.date or string (default None),
the last date to receive data for.
with_index: boolean (default True),
whether the column 'date' should server as index in the resulting
pandas.DataFrame.
columns: list (default list()),
a list of column labels the result should include. If empty, all
columns are returned.
Available column labels are
'date', 'bidopen', 'bidclose', 'bidhigh', 'bidlow',
'askopen', 'askclose', 'askhigh', 'asklow', 'tickqty'.
Also available is 'asks' as shortcut for all ask related columns
and 'bids' for all bid related columns, respectively.
The column 'date' is always included.
Returns:
A pandas DataFrame containing the requested data.
"""
if instrument == '' and offer_id is None:
self.logger.error('Error in get_candles: No instrument given!.')
msg = 'Please provide either an instrument or an offer_id'
raise ValueError(msg)
elif instrument != '':
if offer_id is not None:
msg = "Both, instrument and offer_id are given, "
msg += "taking the value of instrument."
self.logger.warn(msg)
if instrument in self.offers:
offer_id = self.offers[instrument]
else:
self.logger.error('Unknown instrument: %s' % instrument)
raise ValueError('Instrument must be one of %s.'
% str(tuple(self.offers.keys())))
else:
if offer_id not in self.offers.values():
self.logger.error('Unknown offer_id: %s' % offer_id)
raise ValueError('Unknown offer_id: %s' % offer_id)
if period not in self.PERIODS:
self.logger.error('Error in get_candles: Illegal period: %s.'
% period)
raise ValueError('period must be one of %s.' % self.PERIODS)
if type(number) != int or number < 1 or number > 10000:
self.logger.error('Error in get_candles: Illegal param. number: %s'
% number)
raise ValueError('number must be a integer betwenn 0 and 10000.')
params = {
'num': number,
}
if start:
if isinstance(start, str):
try:
start = pd.Timestamp(start).to_pydatetime()
except:
msg = "Can not convert parameter start to datetime."
raise ValueError(msg)
elif isinstance(start, dt.datetime) or isinstance(start, dt.date):
pass
else:
msg = "start must either be a datetime object or a string"
raise ValueError(msg)
start = ((start - dt.datetime(1970, 1, 1)) /
dt.timedelta(seconds=1))
try:
start = int(start)
except:
self.logger.error('Error in get_candles:')
self.logger.error('Illegal value for start: %s.' % start)
raise ValueError('start must be a datetime object.')
params['from'] = max(start, 1)
if end == None and stop is not None:
end = stop
if end:
if isinstance(end, str):
try:
end = pd.Timestamp(end).to_pydatetime()
except:
msg = "Can not convert parameter end to datetime."
raise ValueError(msg)
elif isinstance(end, dt.datetime) or isinstance(end, dt.date):
pass
else:
msg = "end must either be a datetime object or a string"
raise ValueError(msg)
end = ((end - dt.datetime(1970, 1, 1)) / dt.timedelta(seconds=1))
try:
end = int(end)
except:
self.logger.error('Error in get_candles:')
self.logger.error('Illegal value for end: %s.' % stop)
raise ValueError('end must be a datetime object.')
params['to'] = max(end, 1)
data = self.__handle_request__(method='candles/%s/%s'
% (offer_id, period), params=params)
if len(columns) == 0:
to_add = self.CANDLES_COLUMNS
else:
to_add = ['date', ]
for field in columns:
if field == 'asks':
for ask_field in self.CANDLES_COLUMNS_ASK:
if ask_field not in to_add:
to_add.append(ask_field)
elif field == 'bids':
for bid_field in self.CANDLES_COLUMNS_BID:
if bid_field not in to_add:
to_add.append(bid_field)
elif field in self.CANDLES_COLUMNS:
if field not in to_add:
to_add.append(field)
else:
msg = "Unknown field '%s', please use one or more of \
'%s', 'asks', 'bids'."
raise ValueError(msg % (field,
"','".join(self.CANDLES_COLUMNS)))
if 'candles' in data:
while None in data['candles']:
data['candles'].remove(None)
ret = pd.DataFrame(data['candles'], columns=self.CANDLES_COLUMNS)
ret['date'] = pd.to_datetime(ret['date'], unit='s')
else:
ret = pd.DataFrame(columns=self.CANDLES_COLUMNS)
ret = ret[to_add]
if with_index:
ret.set_index('date', inplace=True)
return ret
def __collect_account_ids__(self):
""" Collects account ids and stores them in self.account_ids."""
self.account_ids = set()
data = self.get_accounts('list')
for acc in data:
if 'accountId' in acc and acc['accountId'] != '':
self.account_ids.add(int(acc['accountId']))
self.account_ids = list(self.account_ids)
def __collect_orders__(self):
""" Collects available orders and stores them in self.orders."""
data = self.get_orders('list')
for order in data:
if 'orderId' in order and order['orderId'] != '':
self.orders[int(order['orderId'])] = fxcmpy_order(self, order)
self.orders_set = True
def __collect_oco_orders__(self):
""" Collect available oco orders and stores them in self.oco_orders."""
for order in self.orders.values():
if order.__ocoBulkId__ != 0:
if order.__ocoBulkId__ in self.oco_orders:
self.oco_orders[order.__ocoBulkId__].__add__(order)
else:
oco = fxcmpy_oco_order(order.__ocoBulkId__, [order, ], self,
self.logger)
self.oco_orders[order.__ocoBulkId__] = oco
self.oco_orders_set = True
def __get_instruments_table__(self):
""" Return the instruments table of FXCM."""
self.logger.debug('Fetching instruments table.')
try:
data = self.__handle_request__(method='trading/get_instruments')
except:
self.logger.warning('Can not fetch instruments table from server.')
data = list()
return data
def __collect_offers__(self):
""" Collect available offers and stores them in self.offers, a dict
with key symbol and value offer_id."""
self.offers = fxcmpy_instruments.inst
data = self.__get_instruments_table__()
to_add = list()
to_unsubscribe = list()
if 'instrument' in data['data']:
instruments = data['data']['instrument']
for ins in instruments:
if ins['symbol'] not in self.offers:
to_add.append(ins['symbol'])
if ins['visible'] is False:
to_unsubscribe.append(ins['symbol'])
self.subscribe_instrument(ins['symbol'])
offers = self.get_offers('list')
for offer in offers:
if 'currency' in offer and 'offerId' in offer:
if offer['currency'] in to_add:
self.offers[offer['currency']] = int(offer['offerId'])
if offer['currency'] in to_unsubscribe:
self.unsubscribe_instrument(offer['currency'])
self.offers_set = True
def __collect_positions__(self):
data = self.get_open_positions('list')
for pos in data:
if 'tradeId' in pos and pos['tradeId'] != '':
self.open_pos[int(pos['tradeId'])] = fxcmpy_open_position(self,
pos)
data = self.get_closed_positions('list')
for po in data:
if 'tradeId' in po and po['tradeId'] != '':
self.closed_pos[int(po['tradeId'])] = fxcmpy_closed_position(self,
po)
self.positions_set = True
def __update__instrument_subscription__(self, symbol, visible):
""" Update the subscription of an instrument to the offers table.
Arguments:
symbol, string:
the symbol of the instrument to activate.
visible, bool:
flag whether to subscribe or unsubscribe the instrument.
Returns:
True by success and False else.
"""
if visible:
visible = 'true'
else:
visible = 'false'
if self.number_update_requests > 48:
msg = 'Max. number of update request reached, renewing connection.'
self.logger.warning(msg)
if self.is_connected():
self.socket.disconnect()
time.sleep(1)
self.number_update_requests = 0
else:
self.number_update_requests += 1
try:
self.__handle_request__(method='trading/update_subscriptions',
params={'symbol':symbol, 'visible':visible},
protocol='post')
except:
self.logger.warning('Can not unsubscribe instrument %s' % symbol)
return False
return True
def __connect__(self):
try:
self.logger.debug('Access token: %s.' % self.access_token)
if self.proxies == {}:
socketIO.connect(self.trading_url+':443' +"/?access_token=" + self.access_token + '&agent=pythonquants&wait_for_connection=False')
else:
socketIO.connect(self.trading_url+':443' +"/?access_token=" + self.access_token + '&agent=pythonquants&wait_for_connection=False&proxies='+ self.proxies)
self.socket = socketIO;
self.logger.info('Socket established: %s.' % self.socket)
# self.socket_id = self.socket._engineIO_session.id
# self.logger.warn('Got socket session id: %s.' % self.socket_id)
except ConnectionError as inst:
self.connection_status = 'aborted'
self.logger.error('Socket returns an error: %s.'
% inst.args[0])
except:
self.connection_status = 'aborted'
self.logger.error('Socket returns unknown error.')
else:
self.__disconnected__ = False
self.socket.on('connect',self.__on_connect__)
self.socket.on('disconnect',self.__on_disconnect__)
self.socket.on('connect_error',self.__on_connection_error__)
self.socket.on('error', self.__on_error__)
self.socket.wait()
def __reconnect__(self, count):
self.logger.warning('Not connected, try to reconnect. (%s)' % count)
try:
self.socket_thread.join()
except:
self.logger.error('Failed to join task')
self.socket = None
self.request_header = None
self.default_account = None
self.instruments = None
self.prices = dict()
self.account_ids = set()
self.orders = dict()
self.old_orders = dict()
self.offers = dict()
self.open_pos = dict()
self.closed_pos = dict()
self.oco_orders = dict()
# self.add_callbacks = dict()
self.connection_status = 'unset'
time.sleep(5)
self.connect()
time.sleep(5)
self.subscribe_data_model('Order')
self.subscribe_data_model('OpenPosition')
self.subscribe_data_model('ClosedPosition')
for symbol in self.prices:
params = {'pairs': symbol}
self.__handle_request__(method='subscribe', params=params,
protocol='post')
self.socket.on(symbol, self.__on_price_update__)
def __handle_request__(self, method='', params=None, protocol='get'):
""" Sends server requests. """
if params is None:
params = {}
# print("params=", params)
if method == '':
self.logger.error('Error in __handle__requests__: No method given')
raise ValueError('No method given.')
if type(params) is not dict:
self.logger.debug('Error in __handle__requests__:')
self.logger.debug('params must be of type dict.')
raise TypeError('params must be of type dict.')
self.logger.info('In handle_request with method %s' % method)
self.logger.info('Connection status: %s' % self.is_connected())
self.logger.info('2. connection status: %s' % self.connection_status)
self.logger.info('Socket state: %s' % self.socket.connected)
self.logger.info('Thread state: %s' % self.socket_thread.is_alive())
self.logger.info('Thread name: %s' % self.socket_thread.name)
self.logger.info('Socket id: %s' % self.socket.sid)
if not self.is_connected():
self.__connect__()
if not self.is_connected():
self.logger.error('Connection aborted, failed to reconnect')
raise IOError('Connection aborted, failed to reconnect')
self.socket_id = self.socket.sid
self.bearer_token = 'Bearer '+self.socket_id+self.access_token
self.logger.info('Created bearer token: %s' % self.bearer_token)
self.request_headers = {
'User-Agent': 'request',
'Authorization': self.bearer_token,
'Accept': 'application/json',
'Content-Type':
'application/x-www-form-urlencoded'
}
if method == 'trading/close_all_for_symbol':
if ('forSymbol' in params and params['forSymbol'] == 'false'
and len(self.open_pos) == 0):
self.logger.warning('No open positions to close')
return False
elif 'forSymbol' in params and params['forSymbol'] == 'true':
count = 0
for pos in self.open_pos.values():
if pos.__currency__ == params['symbol']:
count += 1
if count == 0:
self.logger.warn('No open positions to close.')
return False
self.logger.info('Sending request to %s/%s, parameter: %s.'
% (self.trading_url, method, params))
if protocol == 'post':
req = requests.post('%s:443/%s' % (self.trading_url, method),
headers=self.request_headers, data=params,
proxies=self.proxies)
self.logger.info('Sending POST Request:')
self.logger.info('URL: %s' % req.url)
self.logger.info('Payload: %s' % req.request.body)
self.logger.info('Headers: %s' % req.request.headers)
self.logger.info('Params: %s' % params)
self.logger.info('Proxies: %s' % self.proxies)
else:
req = requests.get('%s:443/%s' % (self.trading_url, method),
headers=self.request_headers, params=params,
proxies=self.proxies)
self.logger.info('Sending GET Request:')
self.logger.info('URL: %s' % req.url)
self.logger.info('Headers: %s' % req.request.headers)
self.logger.info('Params: %s' % params)
self.logger.info('Proxies: %s' % self.proxies)
if req.status_code != 200:
self.logger.error('FXCM reject req %s with status %s and msg %s.'
% (method, req.status_code, req.text))
raise ServerError('Request returns status code %s and message "%s"'
% (req.status_code,
unquote(req.text)))
data = None
try:
data = req.json()
if 'response' not in data or 'executed' not in data['response']:
self.logger.error('Malformed response %s' % data)
raise ServerError('Malformed response')
if not data['response']['executed']:
if 'error' in data['response'] and data['response']['error'] != '':
self.logger.error('Server reports an error: %s.'
% data['response'])
self.logger.error('URL: %s' % req.url)
self.logger.error('Headers: %s' % req.request.headers)
self.logger.error('Params: %s' % params)
self.logger.error('Bearer token: %s' % self.bearer_token)
self.logger.error('Connection status: %s'
% self.connection_status)
self.logger.error('Socket session id: %s'
% self.socket.sid)
raise ServerError('FXCM Server reports an error: %s.'
% data['response']['error'])
else:
self.logger.error('FXCM Server reports an unknown error: %s.'
% data['response'])
raise ServerError('FXCM Server returns an unknown error.')
except:
self.logger.error('Can not parse server answer to json object: %s.'
% req.text)
self.logger.debug('Server answer: %s.' % data)
return data
def __on_price_update__(self, msg):
data = json.loads(msg)
symbol = data['Symbol']
date = pd.to_datetime(int(data['Updated']), unit='ms')
temp_data = pd.DataFrame([data['Rates']],
columns=['Bid', 'Ask', 'High', 'Low'],
index=[date])
if symbol not in self.prices:
self.prices[symbol] = temp_data
else:
self.prices[symbol] = pd.concat([self.prices[symbol], temp_data])
if self.max_prices is not None and (len(self.prices[symbol]) >
self.max_prices):
msg = 'Max. length of prices exceeded (%s), dropping oldest.'
self.logger.info(msg % self.max_prices)
self.prices[symbol] = self.prices[symbol].iloc[-self.max_prices:]
if symbol in self.add_callbacks:
callbacks = self.add_callbacks[symbol]
for func in callbacks:
try:
# t = Thread(target=callbacks[func],
# args=(data, self.prices[symbol]))
# t.start()
callbacks[func](data, self.prices[symbol])
except:
self.logger.error('Call of %s raised an error:' % func)
self.logger.error(sys.exc_info()[0])
self.logger.error(sys.exc_info()[1])
raise
def __on_model_update__(self, msg):
# Answers not always json objects, so we have to log the raw answer
# data = json.loads(msg)
try:
self.logger.debug(msg)
except:
pass
def __on_message__(self, msg):
# Answers not always json objects, so we have to log the raw answer
try:
data = json.loads(msg)
self.logger.debug(data)
except:
pass
def __on_order_update__(self, msg):
""" Gets called when the order stream sends new data for the order
table.
Arguments:
msg: string,
a json like data object.
"""
if not self.orders_set:
return 0
try:
data = json.loads(msg)
except:
self.logger.warning('Got a non json answer in order stream, ignoring')
self.logger.warning(msg)
return -1
if 'action' in data and data['action'] == 'I':
self.logger.info('Got a insert event for orders: %s.' % data)
order_id = int(data['orderId'])
self.orders[order_id] = fxcmpy_order(self, data)
elif 'action' in data and data['action'] == 'D':
self.logger.warning('Got a delete event for orders: %s.' % data)
order_id = int(data['orderId'])
if order_id in self.orders:
order = self.orders[order_id]
if order.get_ocoBulkId() != 0:
try:
self.oco_orders[order.get_ocoBulkId()].__remove__(order)
except:
pass
self.old_orders[order_id] = order
del self.orders[order_id]
elif ('action' in data and
data['action'] != 'I' and data['action'] != 'D' and
data['action'] != 'U'):
msg = 'Found an unknown action in Order stream: %s.' % data
self.logger.error(msg)
else:
self.logger.debug('Update data without action:')
self.logger.debug(data)
if 'orderId' in data:
try:
order = self.orders[int(data['orderId'])]
except:
msg = 'Got update for unknown order id: %s.' % data['orderId']
self.logger.warn(msg)
return 0
for field in data:
if (field == 'ocoBulkId' and
order.get_ocoBulkId() != data['ocoBulkId']):
if data['ocoBulkId'] == 0:
bulkId = order.get_ocoBulkId()
self.oco_orders[bulkId].__remove__(order)
else:
if data['ocoBulkId'] not in self.oco_orders:
self.__collect_oco_orders__()
self.oco_orders[data['ocoBulkId']].__add__(order)
value = data['ocoBulkId']
else:
value = data[field]
order.__set_attribute__(field, value)
if 'Order' in self.add_callbacks:
callbacks = self.add_callbacks['Order']
for func in callbacks:
try:
callbacks[func](data)
except:
self.logger.error('Call of %s raised an error:' % func)
self.logger.error(sys.exc_info()[0])
self.logger.error(sys.exc_info()[1])
self.logger.error(sys.exc_info()[2])
def __on_open_pos_update__(self, msg):
""" Gets called when the open_position stream sends new data.
Arguments:
msg: string,
a json like data object.
"""
if not self.positions_set:
return 0
try:
data = json.loads(msg)
except:
msg = 'Got non json answer in open pos stream, ignoring.'
self.logger.warning(msg)
self.logger.warning(msg)
return -1
if 'tradeId' in data and data['tradeId'] != '':
trade_id = int(data['tradeId'])
if 'action' in data and data['action'] == 'I':
self.logger.warning('Got a insert event for open positions: %s.'
% data)
self.open_pos[trade_id] = fxcmpy_open_position(self, data)
elif 'action' in data and data['action'] == 'D':
self.logger.warning('Got a delete event for open posi: %s' % data)
del self.open_pos[trade_id]
elif ('action' in data and
data['action'] != 'I' and data['action'] != 'D' and
data['action'] != 'U'):
msg = 'Found an unknown action in open pos stream: %s.' % data
self.logger.error(msg)
else:
self.logger.debug('Update data without action:')
self.logger.debug(data)
pos = self.open_pos[trade_id]
for field in data:
pos.__set_attribute__(field, data[field])
if 'OpenPosition' in self.add_callbacks:
callbacks = self.add_callbacks['OpenPosition']
for func in callbacks:
try:
callbacks[func](data)
except:
self.logger.error('Call of %s raised an error:' % func)
self.logger.error(sys.exc_info()[0])
self.logger.error(sys.exc_info()[1])
self.logger.error(sys.exc_info()[2])
def __on_closed_pos_update__(self, msg):
""" Gets called when the closed_position stream sends new data.
Arguments:
msg: string,
a json like data object.
"""
if not self.positions_set:
return 0
try:
data = json.loads(msg)
except:
msg = 'Got non json answer in close pos stream, ignoring.'
self.logger.warning(msg)
self.logger.warning(msg)
return -1
if 'tradeId' in data and data['tradeId'] != '':
trade_id = int(data['tradeId'])
if 'action' in data and data['action'] == 'I':
self.logger.warning('Got a insert event for closed positions: %s.'
% data)
self.closed_pos[trade_id] = fxcmpy_closed_position(self, data)
elif 'action' in data and data['action'] == 'D':
self.logger.warning('Got delete event for closed pos: %s' % data)
del self.closed_pos[trade_id]
elif ('action' in data and
data['action'] != 'I' and data['action'] != 'D' and
data['action'] != 'U'):
msg = 'Found unknown action in closed pos stream: %s.' % data
self.logger.error(msg)
else:
self.logger.debug('Update data without action:')
self.logger.debug(data)
pos = self.closed_pos[trade_id]
for field in data:
pos.__set_attribute__(field, data[field])
if 'ClosedPosition' in self.add_callbacks:
callbacks = self.add_callbacks['ClosedPosition']
for func in callbacks:
try:
callbacks[func](data)
except:
self.logger.error('Call of %s raised an error:' % func)
self.logger.error(sys.exc_info()[0])
self.logger.error(sys.exc_info()[1])
self.logger.error(sys.exc_info()[2])
def __on_error__(self, msg=''):
self.logger.error('Error: %s' % msg)
raise ServerError(msg)
def __on_disconnect__(self, msg=''):
self.bearer_token = None
self.socket_id = None
self.connection_status = 'unset'
self.logger.info('Disconnected.')
def __on_connect__(self, msg=''):
self.connection_status = 'established'
self.logger.info('Connection established.')
def __on_connection_error__(self, msg=''):
self.logger.error('Connection error: %s' % msg)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return False
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
# Uses the same mechanism as above to selectively enable TFRT.
def is_tfrt_enabled():
return False
try:
from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if is_tfrt_enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if is_tfrt_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
Subscription.py
|
import yaml, utils, parameters, time, os
from multiprocessing import Process, Pipe
class Subscription():
def __init__(self, socket):
(self.receiver, self.sender) = Pipe(False) # Open unidirectionnal pipe
self.callback = utils.default_notifcation_callback
self.s = socket
def setCallback(self, callback):
self.callback = callback
#Used by the server
def addClient(self, nbre, client, resource, rule, qos):
f = open('server_subscriptions.yaml', 'a+')
data = {nbre: {"client": client, "resource": resource, "rule": rule, "qos": qos }}
yaml.dump(data, f, default_flow_style=False)
f.close()
print(">>Subs class: The client '{}' subscribed to resource '{}', with the rule '{}' and QoS {}".format(client, resource, rule, qos))
'''
-------------------------------------------
Methods used by the client: the subscriber:
-addServer
-removeServer
--------------------------------------------
'''
def addServer(self, nbre, client, resource, rule, qos):
notificaton_server = Process(target=utils.listen_to_notifications, args=(self.callback, self.s, self.receiver, nbre))
notificaton_server.start()
f = open('client_subscriptions.yaml', 'a+')
data = {nbre: {"client": client, "resource": resource, "rule": rule, "qos": qos, "pid": notificaton_server.pid}}
yaml.dump(data, f, default_flow_style=False)
f.close()
print(">>{} subscriber: Just subscribed to resource '{}', with the rule '{}' and QoS {}.".format(client, resource, rule, qos))
def removeServer(self, notif_id):
f = open('client_subscriptions.yaml', 'r')
data = yaml.load(f)
f.close()
pid = None if data is None else next((data[e]['pid'] for e in data if notif_id in data),None)
if pid is None:
print(">>Subs class: The registration nbre doesnt exist")
return False
data.pop(notif_id)
os.system('kill -9 {}'.format(pid))
f = open('client_subscriptions.yaml', 'w')
yaml.dump(data, f, default_flow_style=False) if data != {} else None
f.close()
print(">>Subs class: Unsubscription to the notification {}".format(notif_id))
return True
def getResourceId(self, resource):
f = open('client_subscriptions.yaml', 'r')
data = yaml.load(f)
f.close()
return None if data is None else next((e for e in data if resource in data[e]['resource']), None)
#Used by the server
def removeClient(self, notif_id):
f = open('server_subscriptions.yaml', 'r')
data = yaml.load(f)
f.close()
try:
data.pop(notif_id)
except:
print(">>Subs class: The registration nbre doesnt exist")
return False
f = open('server_subscriptions.yaml', 'w')
yaml.dump(data, f, default_flow_style=False) if data != {} else None
f.close()
print(">>Subs class: Unsubscription to the notification {}".format(notif_id))
return True
#Used by the client
def _listen_to_notifications(self, callback, s, recv_pipe):
while True:
print("Listening...")
if recv_pipe.poll(0.1):
print("Closing the notification server...\n")
break
try:
#raw_data, self.client = self.s.recvfrom(parameters.BUFFERSIZE)
raw_data, client = s.recvfrom(parameters.BUFFERSIZE)
message = Message(raw_data=raw_data)
print(message)
rule_nbre = next((x['Notification'] for x in message.options if 'Notification' in x), 0)
callback(rule_nbre=rule_nbre, message=message.message)
except socket.timeout:
print("Still listening until unsubscription....")
|
dag_processing.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
import enum
from typing import Optional, NamedTuple, Iterable
import psutil
from setproctitle import setproctitle
import six
from six.moves import reload_module
from sqlalchemy import or_
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.settings import Stats
from airflow.models import errors
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
if six.PY2:
ConnectionError = IOError
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance(object):
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:type safe_mode: bool
:param include_examples: include example DAGs
:type include_examples: bool
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in f.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
reload_module(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
reload_module(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
multi_inference_20200804110857.py
|
# coding=utf-8
import os
import cv2
import sys
import pdb
import subprocess
import multiprocessing
import inference_utils
import common
def single_process(index, task, gpu):
print(("任务%d处理%d张图片" % (index, len(task))))
# 写文件
filename = inference_utils.dump_testfile(task, index)
out_str = subprocess.check_output(["python", file, "--gpuid=%s" % str(gpu), "--img_list=%s" % filename, "--out_dir=%s" % out_dir, "--batch_size=%d" % batch_size])
print(("任务%d处理完毕!" % (index)))
if "__main__" == __name__:
gpu_list = [1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7]
file = "tools/multi_process_inference/inference.py"
img_dir = '/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/train/'
out_dir = './tmp/file/train_nonms/'
batch_size = 1
# 解析dir
img_list = common.load_filepaths(img_dir, suffix=('.jpg', '.png', '.jpeg'), recursive=True)
#names = demo_utils.parse_testfile(testfile)
print(f"总共{len(img_list)}张图片")
# 分任务
task_num = len(gpu_list)
tasks = inference_utils.chunks(img_list, task_num)
# 创建进程
processes=list()
for idx, (task, gpu) in enumerate(zip(tasks, gpu_list)):
processes.append(multiprocessing.Process(target=single_process,args=(idx, task, gpu)))
for process in processes:
process.start()
for process in processes:
process.join()
|
test_end2end.py
|
"""End to end tests for Selenium Wire."""
import json
import os
import shutil
import tempfile
import threading
from contextlib import contextmanager
from glob import glob
from pathlib import Path
from unittest.mock import patch
import pytest
from selenium.common.exceptions import TimeoutException
import seleniumwire
from seleniumwire import webdriver
from seleniumwire.thirdparty.mitmproxy.exceptions import ServerException
from tests import utils as testutils
@pytest.fixture(scope='module')
def httpbin():
# This module scoped Httpbin fixture uses HTTPS
with create_httpbin() as httpbin:
yield httpbin
@contextmanager
def create_httpbin(port=8085, use_https=True):
httpbin = testutils.get_httpbin(port, use_https)
try:
yield httpbin
finally:
httpbin.close()
@pytest.fixture(scope='module')
def httpproxy():
with create_httpproxy() as proxy:
yield proxy
@contextmanager
def create_httpproxy(port=8086, mode='http', auth=''):
httpproxy = testutils.get_proxy(port, mode, auth)
try:
yield httpproxy
finally:
httpproxy.close()
@pytest.fixture(scope='module')
def socksproxy():
httpproxy = testutils.get_proxy(port=8087, mode='socks')
yield httpproxy
httpproxy.close()
@pytest.fixture
def driver_path():
return str(Path(__file__).parent / Path('linux', 'chromedriver'))
@pytest.fixture
def chrome_options():
options = webdriver.ChromeOptions()
options.binary_location = testutils.get_headless_chromium()
return options
@pytest.fixture
def driver(driver_path, chrome_options):
with create_driver(driver_path, chrome_options) as driver:
yield driver
@contextmanager
def create_driver(
driver_path,
chrome_options,
seleniumwire_options=None,
desired_capabilities=None,
):
driver = webdriver.Chrome(
executable_path=driver_path,
options=chrome_options,
seleniumwire_options=seleniumwire_options,
desired_capabilities=desired_capabilities,
)
try:
yield driver
finally:
driver.quit()
def teardown_function():
try:
(Path(__file__).parent / Path('linux', 'chrome_debug.log')).unlink()
except FileNotFoundError:
pass
try:
(Path(__file__).parent / Path('html.html')).unlink()
except FileNotFoundError:
pass
shutil.rmtree(Path(__file__).parent / Path('linux', 'locales'), ignore_errors=True)
shutil.rmtree(Path(__file__).parent / 'chrome_tmp', ignore_errors=True)
def test_capture_requests(driver, httpbin):
driver.get(f'{httpbin}/html')
assert driver.requests
assert all(r.response is not None for r in driver.requests)
del driver.requests
assert not driver.requests
def test_last_request(driver, httpbin):
driver.get(f'{httpbin}/html')
driver.get(f'{httpbin}/anything')
assert driver.last_request.url == f'{httpbin}/anything'
def test_wait_for_request(driver, httpbin):
driver.get(f'{httpbin}/html')
driver.get(f'{httpbin}/anything/hello/world')
driver.get(f'{httpbin}/anything/foo/bar/baz?spam=eggs')
request = driver.wait_for_request(r'\/hello\/')
assert request.url == f'{httpbin}/anything/hello/world'
def test_wait_for_request_timeout(driver, httpbin):
driver.get(f'{httpbin}/html')
with pytest.raises(TimeoutException):
driver.wait_for_request(r'\/hello\/', timeout=2)
def test_scopes(driver, httpbin):
driver.scopes = ['.*/anything/.*']
driver.get(f'{httpbin}/anything/hello/world')
driver.get(f'{httpbin}/html')
assert len(driver.requests) == 1
assert driver.requests[0].url == f'{httpbin}/anything/hello/world'
def test_add_request_header(driver, httpbin):
def interceptor(req):
req.headers['X-New-Header'] = 'test'
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/headers')
data = json.loads(driver.last_request.response.body.decode('utf-8'))
assert data['headers']['X-New-Header'] == 'test'
def test_replace_request_header(driver, httpbin):
def interceptor(req):
del req.headers['User-Agent']
req.headers['User-Agent'] = 'test_user_agent'
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/headers')
data = json.loads(driver.last_request.response.body.decode('utf-8'))
assert data['headers']['User-Agent'] == 'test_user_agent'
def test_add_duplicate_request_header(driver, httpbin):
def interceptor(req):
del req.headers['Referer']
req.headers['Referer'] = 'some_referer'
# Adding a header that already exists will add a duplicate
# header rather than overwriting the existing header.
req.headers['Referer'] = 'another_referer'
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/headers')
data = json.loads(driver.last_request.response.body.decode('utf-8'))
assert data['headers']['Referer'] == 'some_referer,another_referer'
def test_add_response_header(driver, httpbin):
def interceptor(req, res):
# Causes the browser to trigger a download rather
# than render the page.
res.headers['Content-Disposition'] = 'attachment'
driver.response_interceptor = interceptor
driver.get(f'{httpbin}/html')
# We don't expect to find this text in the page because
# the HTML wasn't rendered.
assert 'Herman Melville' not in driver.page_source
def test_add_request_parameter(driver, httpbin):
def interceptor(req):
params = req.params
params['foo'] = 'bar'
req.params = params
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/get?spam=eggs')
data = json.loads(driver.last_request.response.body.decode('utf-8'))
assert data['args'] == {'foo': 'bar', 'spam': 'eggs'}
def test_update_json_post_request(driver_path, chrome_options, httpbin):
# We need to start Chrome with --disable-web-security so that it
# can post JSON from a file-based form to our httpbin endpoint.
# Without that option the AJAX post would be blocked by CORS.
chrome_options.add_argument('--disable-web-security')
chrome_data_dir = Path(__file__).parent / 'chrome_tmp'
chrome_options.add_argument(f'--user-data-dir={str(chrome_data_dir)}')
def interceptor(req):
if req.method == 'POST' and req.headers['Content-Type'] == 'application/json':
# We expect the request body to contain the JSON:
# '{ "hello": "world", "spam": "eggs" }'
body = req.body.decode('utf-8')
data = json.loads(body)
data['foo'] = 'bar' # Add a new property
req.body = json.dumps(data).encode('utf-8')
del req.headers['Content-Length']
req.headers['Content-Length'] = str(len(req.body))
with create_driver(driver_path, chrome_options) as driver:
driver.request_interceptor = interceptor
form = Path(__file__).parent / 'jsonform.html'
driver.get(f'file:///{str(form)}')
button = driver.find_element_by_id('submit')
button.click() # Makes Ajax request so need to wait for it
request = driver.wait_for_request('/post')
resp_body = json.loads(request.response.body.decode('utf-8'))
assert resp_body['json'] == {'hello': 'world', 'spam': 'eggs', 'foo': 'bar'}
def test_block_a_request(driver, httpbin):
def interceptor(req):
req.abort()
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/image/png')
assert driver.last_request.response.status_code == 403
def test_mock_a_response(driver, httpbin):
def interceptor(req):
if req.url == f'{httpbin}/html':
req.create_response(
status_code=200, headers={'Content-Type': 'text/html'}, body='<html>Hello World!</html>'
)
driver.request_interceptor = interceptor
driver.get(f'{httpbin}/html')
assert 'Hello World!' in driver.page_source
def test_upstream_http_proxy(driver_path, chrome_options, httpbin, httpproxy):
sw_options = {'proxy': {'https': f'{httpproxy}'}}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a http proxy' in driver.page_source
def test_upstream_http_proxy_basic_auth(driver_path, chrome_options, httpbin):
with create_httpproxy(port=8888, auth='test:test') as httpproxy:
sw_options = {'proxy': {'https': f'{httpproxy}'}}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a authenticated http proxy' in driver.page_source
def test_upstream_http_proxy_basic_auth_empty_pass(driver_path, chrome_options, httpbin):
with create_httpproxy(port=8888, auth='test:') as httpproxy:
sw_options = {'proxy': {'https': f'{httpproxy}'}}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a authenticated http proxy' in driver.page_source
def test_upstream_http_proxy_custom_auth(driver_path, chrome_options, httpbin):
with create_httpproxy(port=8088, auth='test:test'):
sw_options = {
'proxy': {
'https': 'https://localhost:8088',
'custom_authorization': 'Basic dGVzdDp0ZXN0', # Omit newline from end of the string
},
}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a authenticated http proxy' in driver.page_source
def test_upstream_socks_proxy(driver_path, chrome_options, httpbin, socksproxy):
"""Note that authenticated socks proxy is not supported by mitmproxy currently
so we're only able to test unauthenticated.
"""
sw_options = {'proxy': {'https': f'{socksproxy}'}}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a socks proxy' in driver.page_source
def test_bypass_upstream_proxy_when_target_http(driver_path, chrome_options, httpproxy):
sw_options = {'proxy': {'https': f'{httpproxy}', 'no_proxy': 'localhost:9091'}}
with create_httpbin(port=9091, use_https=False) as httpbin:
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html') # Scheme is http://
assert 'Moby-Dick' in driver.page_source
assert 'This passed through a http proxy' not in driver.page_source
def test_bypass_upstream_proxy_when_target_https(driver_path, chrome_options, httpbin, httpproxy):
sw_options = {'proxy': {'https': f'{httpproxy}', 'no_proxy': 'localhost:8085'}}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html') # Scheme is https://
assert 'Moby-Dick' in driver.page_source
assert 'This passed through a http proxy' not in driver.page_source
def test_upstream_http_proxy_env_var(driver_path, chrome_options, httpbin, httpproxy):
with patch.dict(os.environ, {'HTTPS_PROXY': f'{httpproxy}'}):
with create_driver(driver_path, chrome_options) as driver:
driver.get(f'{httpbin}/html')
assert 'This passed through a http proxy' in driver.page_source
def test_no_auto_config(driver_path, chrome_options, httpbin):
sw_options = {'auto_config': False}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert not driver.requests
def test_no_auto_config_manual_proxy(driver_path, chrome_options, httpbin):
"""This demonstrates how you would separate browser proxy configuration
from Selenium Wire proxy configuration.
You might want to do this if you need the browser to address
Selenium Wire using a different IP/host than what Selenium Wire uses
by default. E.g. A dynamic hostname for a container setup.
"""
capabilities = webdriver.DesiredCapabilities.CHROME.copy()
capabilities['proxy'] = {
'proxyType': 'manual',
'sslProxy': '{}:{}'.format('localhost', 8088),
}
capabilities['acceptInsecureCerts'] = True
sw_options = {
'auto_config': False,
'addr': '127.0.0.1',
'port': 8088,
}
with create_driver(
driver_path,
chrome_options,
sw_options,
capabilities,
) as driver:
driver.get(f'{httpbin}/html')
driver.wait_for_request('/html')
def test_exclude_hosts(driver_path, chrome_options, httpbin):
httpbin2 = testutils.get_httpbin(port=8090)
sw_options = {'exclude_hosts': ['<-loopback>', 'localhost:8085']}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
driver.get(f'{httpbin2}/html')
assert len(driver.requests) == 1
assert driver.requests[0].url == f'{httpbin2}/html'
@pytest.mark.skip("Fails on GitHub Actions - chromedriver threads timeout")
def test_multiple_threads(driver_path, chrome_options, httpbin):
num_threads = 5
threads, results = [], []
def run_driver():
with create_driver(driver_path, chrome_options) as driver:
driver.get(f'{httpbin}/html')
request = driver.wait_for_request('/html')
results.append(request)
for i in range(num_threads):
t = threading.Thread(name=f'Driver thread {i + 1}', target=run_driver)
threads.append(t)
t.start()
for t in threads:
t.join(timeout=10)
assert len(results) == num_threads
def test_ignore_http_methods(driver_path, chrome_options, httpbin):
sw_options = {'ignore_http_methods': ['GET']}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert not driver.requests
def test_address_in_use(driver_path, chrome_options, httpbin):
sw_options = {
'addr': '127.0.0.1',
'port': 8089,
}
with create_driver(driver_path, chrome_options, sw_options):
with pytest.raises(ServerException, match='.*Address already in use.*'):
with create_driver(driver_path, chrome_options, sw_options):
pass
def test_har(driver_path, chrome_options, httpbin):
with create_driver(driver_path, chrome_options, {'enable_har': True}) as driver:
driver.get(f'{httpbin}/html')
har = json.loads(driver.har)
assert har['log']['creator']['comment'] == f'Selenium Wire version {seleniumwire.__version__}'
assert len(har['log']['entries']) == 1
assert har['log']['entries'][0]['request']['url'] == f'{httpbin}/html'
assert har['log']['entries'][0]['response']['status'] == 200
def test_disable_capture(driver_path, chrome_options, httpbin):
sw_options = {'disable_capture': True}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html')
assert not driver.requests
def test_in_memory_storage(driver_path, chrome_options, httpbin):
sw_options = {
'request_storage': 'memory',
'request_storage_base_dir': f'{tempfile.gettempdir()}/sw_memory',
'enable_har': True,
}
with create_driver(driver_path, chrome_options, sw_options) as driver:
driver.get(f'{httpbin}/html'),
driver.get(f'{httpbin}/anything'),
assert not glob(os.path.join(driver.proxy.storage.home_dir, 'storage*'))
assert len(driver.requests) == 2
assert driver.last_request.url == f'{httpbin}/anything'
assert driver.wait_for_request('/anything')
assert [r.url for r in driver.iter_requests()] == [f'{httpbin}/html', f'{httpbin}/anything']
assert [e['request']['url'] for e in json.loads(driver.har)['log']['entries']] == [
f'{httpbin}/html',
f'{httpbin}/anything',
]
assert driver.last_request.cert
def test_switch_proxy_on_the_fly():
pass
|
progress.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Progression module
------------------
This module provides the (so far) four variants to display progress information:
* :py:class:`.ProgressBar`
This class monitors one or multiple processes showing the total elapsed time (TET), the current speed
estimated from the most recent updated, a colored bar showing the progress and an
estimate for the remaining time, also called time to go (TTG).
.. raw:: html
<div class="widget-html">
<style>.widget-html{font-family:monospace;
color: #c0c0c0;
background-color:black}</style>
<pre> 5.83s [7.2c/s] <span style="color:#00ff00"><b>[=====================> ]</b></span> TTG 8.05s</pre>
</div>
* :py:class:`.ProgressBarCounter`
If a single process is intended to do several sequential task, the :py:class:`.ProgressBarCounter` class can keep track of the number
of accomplished tasks on top of monitoring the individual task just like :py:class:`.ProgressBar` does.
.. raw:: html
<div class="widget-html">
<style>.widget-html{font-family:monospace;
color: #c0c0c0;
background-color:black}</style>
<pre><span style="color:#00ff00"><b> [</b><b>TET</b>-5.83s-----[7.2c/s]-<b>TTG</b>-8.05s-></span> 42.0% <b>ETA</b> 20161011_16:52:52 <b>ORT</b> 00:00:13<b><span style="color:#00ff00">]</span></b></pre>
</div>
* :py:class:`.ProgressBarFancy`
This class intends to be a replacement for :py:class:`.ProgressBar` with slightly more information and
better handling of small terminal widths.
.. raw:: html
<div class="widget-html">
<style>.widget-html{font-family:monospace;
color: #c0c0c0;
background-color:black}</style>
<pre> 00:00:35 [1.4c/min] <span style="color:#00ff00">#3</span> - 5.83s [7.2c/s] <span style="color:#00ff00"><b>[===========> ]</b></span> TTG 8.05s</pre>
</div>
* :py:class:`.ProgressBarCounterFancy`
Just as :py:class:`.ProgressBarFancy` this replaces :py:class:`.ProgressBarCounter`.
.. raw:: html
<div class="widget-html">
<style>.widget-html{font-family:monospace;
color: #c0c0c0;
background-color:black}</style>
<pre> 00:00:35 [1.4c/min] <span style="color:#00ff00">#3</span> - <span style="color:#800000"></span><span style="color:#00ff00"><b>[</b><b>E</b>-5.83s-----[7.2c/s]-<b>G</b>-8</span>.05s 42.0% <b>O</b> 00:00:13<b><span style="color:#00ff00">]</span></b></pre>
</div>
.. autoclass:: Progress
:members:
:inherited-members:
.. autoclass:: ProgressBar
:members:
.. autoclass:: ProgressBarCounter
:members:
.. autoclass:: ProgressBarFancy
:members:
.. autoclass:: ProgressBarCounterFancy
:members:
.. autofunction:: UnsignedIntValue
.. autofunction:: FloatValue
.. autofunction:: StringValue
"""
from __future__ import division, print_function
import datetime
import io
import logging
from logging.handlers import QueueHandler, QueueListener
import math
import multiprocessing as mp
from multiprocessing.sharedctypes import Synchronized
import os
import sys
import signal
import subprocess as sp
import threading
import time
import traceback
import warnings
from . import terminal
import platform
_IPYTHON = True
try:
import ipywidgets
except ImportError:
_IPYTHON = False
warnings.warn("could not load ipywidgets (IPython HTML output will not work)", category=ImportWarning)
except DeprecationWarning:
pass
try:
from IPython.display import display
except ImportError:
_IPYTHON = False
warnings.warn("could not load IPython (IPython HTML output will not work)", category=ImportWarning)
# Magic conversion from 3 to 2
if sys.version_info[0] == 2:
ProcessLookupError = OSError
inMemoryBuffer = io.BytesIO
old_math_ceil = math.ceil
def my_int_ceil(f):
return int(old_math_ceil(f))
math.ceil = my_int_ceil
_jm_compatible_bytearray = lambda x: x
class TimeoutError(Exception):
pass
elif sys.version_info[0] == 3:
inMemoryBuffer = io.StringIO
_jm_compatible_bytearray = bytearray
class MultiLineFormatter(logging.Formatter):
"""pads a multiline log message with spaces such that
<HEAD> msg_line1
msg_line2
...
"""
def format(self, record):
_str = logging.Formatter.format(self, record)
header = _str.split(record.message)[0]
_str = _str.replace('\n', '\n' + ' '*len(header))
return _str
# def_handl = logging.StreamHandler(stream = sys.stderr) # the default handler simply uses stderr
# def_handl.setLevel(logging.DEBUG) # ... listens to all messaged
fmt = MultiLineFormatter('%(asctime)s %(name)s %(levelname)s : %(message)s')
# def_handl.setFormatter(fmt) # ... and pads multiline messaged
log = logging.getLogger(__name__) # creates the default log for this module
# log.addHandler(def_handl)
class LoopExceptionError(RuntimeError):
pass
class LoopInterruptError(Exception):
pass
class StdoutPipe(object):
"""replacement for stream objects such as stdout which
forwards all incoming data using the send method of a
connection
example usage:
>>> import sys
>>> from multiprocessing import Pipe
>>> from progression import StdoutPipe
>>> conn_recv, conn_send = Pipe(False)
>>> sys.stdout = StdoutPipe(conn_send)
>>> print("hallo welt", end='') # this is no going through the pipe
>>> msg = conn_recv.recv()
>>> sys.stdout = sys.__stdout__
>>> print(msg)
hallo welt
>>> assert msg == "hallo welt"
"""
def __init__(self, conn):
self.conn = conn
def flush(self):
pass
def write(self, b):
self.conn.send(b)
class PipeToPrint(object):
def __call__(self, b):
print(b, end='')
def close(self):
pass
class PipeFromProgressToIPythonHTMLWidget(object):
def __init__(self):
self.htmlWidget = ipywidgets.widgets.HTML()
display(self.htmlWidget)
self._buff = ""
def __call__(self, b):
self._buff += b
if b.endswith(terminal.ESC_MY_MAGIC_ENDING):
buff = terminal.ESC_SEQ_to_HTML(self._buff)
self.htmlWidget.value = '<style>.widget-html{font-family:monospace}</style><pre>'+buff+'</pre>'
self._buff = ""
def close(self):
self.htmlWidget.close()
PipeHandler = PipeToPrint
def choose_pipe_handler(kind = 'print', color_theme = None):
global PipeHandler
if kind == 'print':
PipeHandler = PipeToPrint
if color_theme is None:
choose_color_theme('term_default')
else:
choose_color_theme(color_theme)
elif kind == 'ipythonhtml':
if _IPYTHON:
PipeHandler = PipeFromProgressToIPythonHTMLWidget
if color_theme is None:
choose_color_theme('ipyt_default')
else:
choose_color_theme(color_theme)
else:
warnings.warn("can not choose ipythonHTML (IPython and/or ipywidgets were not loaded)")
else:
raise ValueError("unknown kind for pipe handler '{}'. Choices are 'print' and 'ipythonhtml'".format(kind))
def get_terminal_width():
if PipeHandler == PipeToPrint:
return terminal.get_terminal_width()
elif PipeHandler == PipeFromProgressToIPythonHTMLWidget:
return 80
else:
raise NotImplementedError
def get_identifier(name=None, pid=None, bold=True):
if pid is None:
pid = os.getpid()
if bold:
esc_bold = terminal.ESC_BOLD
esc_no_char_attr = terminal.ESC_NO_CHAR_ATTR
else:
esc_bold = ""
esc_no_char_attr = ""
if name is None:
return "{}PID_{}{}".format(esc_bold, pid, esc_no_char_attr)
else:
return "{}{}_{}{}".format(esc_bold, name, pid, esc_no_char_attr)
def _loop_wrapper_func(func, args, shared_mem_run, shared_mem_pause, interval, sigint, sigterm, name,
logging_level, conn_send, func_running, log_queue):
"""
to be executed as a separate process (that's why this functions is declared static)
"""
prefix = get_identifier(name) + ' '
global log
log = logging.getLogger(__name__+".log_{}".format(get_identifier(name, bold=False)))
log.setLevel(logging_level)
log.addHandler(QueueHandler(log_queue))
sys.stdout = StdoutPipe(conn_send)
log.debug("enter wrapper_func")
SIG_handler_Loop(sigint, sigterm, log, prefix)
func_running.value = True
error = False
while shared_mem_run.value:
try:
# in pause mode, simply sleep
if shared_mem_pause.value:
quit_loop = False
else:
# if not pause mode -> call func and see what happens
try:
quit_loop = func(*args)
except LoopInterruptError:
raise
except Exception as e:
log.error("error %s occurred in loop calling 'func(*args)'", type(e))
log.info("show traceback.print_exc()\n%s", traceback.format_exc())
error = True
break
if quit_loop is True:
log.debug("loop stooped because func returned True")
break
time.sleep(interval)
except LoopInterruptError:
log.debug("quit wrapper_func due to InterruptedError")
break
func_running.value = False
if error:
sys.exit(-1)
else:
log.debug("wrapper_func terminates gracefully")
# gets rid of the following warnings
# Exception ignored in: <_io.FileIO name='/dev/null' mode='rb'>
# ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='r' encoding='UTF-8'>
try:
if mp.get_start_method() == "spawn":
sys.stdin.close()
except AttributeError:
pass
class LoopTimeoutError(TimeoutError):
pass
class Loop(object):
"""
class to run a function periodically an seperate process.
In case the called function returns True, the loop will stop.
Otherwise a time interval given by interval will be slept before
another execution is triggered.
The shared memory variable _run (accessible via the class property run)
also determines if the function if executed another time. If set to False
the execution stops.
For safe cleanup (and in order to catch any Errors)
it is advisable to instantiate this class
using 'with' statement as follows:
with Loop(**kwargs) as my_loop:
my_loop.start()
...
this will guarantee you that the spawned loop process is
down when exiting the 'with' scope.
The only circumstance where the process is still running is
when you set auto_kill_on_last_resort to False and answer the
question to send SIGKILL with no.
"""
def __init__(self,
func,
args = (),
interval = 1,
verbose = None,
sigint = 'stop',
sigterm = 'stop',
auto_kill_on_last_resort = False,
raise_error = True):
"""
func [callable] - function to be called periodically
args [tuple] - arguments passed to func when calling
intervall [pos number] - time to "sleep" between each call
verbose - DEPRECATED, only kept for compatibility, use global log.level to
specify verbosity
sigint [string] - signal handler string to set SIGINT behavior (see below)
sigterm [string] - signal handler string to set SIGTERM behavior (see below)
auto_kill_on_last_resort [bool] - If set False (default), ask user to send SIGKILL
to loop process in case normal stop and SIGTERM failed. If set True, send SIDKILL
without asking.
the signal handler string may be one of the following
ing: ignore the incoming signal
stop: raise InterruptedError which is caught silently.
"""
self._proc = None
if verbose is not None:
log.warning("verbose is deprecated, only allowed for compatibility")
warnings.warn("verbose is deprecated", DeprecationWarning)
self.func = func
self.args = args
self.interval = interval
assert self.interval >= 0
self._run = mp.Value('b', False)
self._pause = mp.Value('b', False)
self._func_running = mp.Value('b', False)
self._sigint = sigint
self._sigterm = sigterm
self._auto_kill_on_last_resort = auto_kill_on_last_resort
log.debug("auto_kill_on_last_resort = %s", self._auto_kill_on_last_resort)
self._monitor_thread = None
self.pipe_handler = PipeHandler()
self.raise_error = raise_error
def __enter__(self):
return self
def __exit__(self, *exc_args):
if self.is_alive():
log.debug("loop is still running on context exit")
else:
log.debug("loop has stopped on context exit")
self.stop()
def __cleanup(self):
"""
Wait at most twice as long as the given repetition interval
for the _wrapper_function to terminate.
If after that time the _wrapper_function has not terminated,
send SIGTERM to and the process.
Wait at most five times as long as the given repetition interval
for the _wrapper_function to terminate.
If the process still running send SIGKILL automatically if
auto_kill_on_last_resort was set True or ask the
user to confirm sending SIGKILL
"""
# set run to False and wait some time -> see what happens
self._run.value = False
if check_process_termination(proc = self._proc,
timeout = 2*self.interval,
prefix = '',
auto_kill_on_last_resort = self._auto_kill_on_last_resort):
log.debug("cleanup successful")
else:
raise RuntimeError("cleanup FAILED!")
try:
self.conn_send.close()
self._log_queue_listener.stop()
except OSError:
pass
log.debug("wait for monitor thread to join")
self._monitor_thread.join()
log.debug("monitor thread to joined")
self._func_running.value = False
def _monitor_stdout_pipe(self):
while True:
try:
b = self.conn_recv.recv()
self.pipe_handler(b)
except EOFError:
break
def start(self, timeout=None):
"""
uses multiprocess Process to call _wrapper_func in subprocess
"""
if self.is_alive():
log.warning("a process with pid %s is already running", self._proc.pid)
return
self._run.value = True
self._func_running.value = False
name = self.__class__.__name__
self.conn_recv, self.conn_send = mp.Pipe(False)
self._monitor_thread = threading.Thread(target = self._monitor_stdout_pipe)
self._monitor_thread.daemon=True
self._monitor_thread.start()
log.debug("started monitor thread")
self._log_queue = mp.Queue()
self._log_queue_listener = QueueListener(self._log_queue, *log.handlers)
self._log_queue_listener.start()
args = (self.func, self.args, self._run, self._pause, self.interval,
self._sigint, self._sigterm, name, log.level, self.conn_send,
self._func_running, self._log_queue)
self._proc = mp.Process(target = _loop_wrapper_func,
args = args)
self._proc.start()
log.info("started a new process with pid %s", self._proc.pid)
log.debug("wait for loop function to come up")
t0 = time.time()
while not self._func_running.value:
if self._proc.exitcode is not None:
exc = self._proc.exitcode
self._proc = None
if exc == 0:
log.warning("wrapper function already terminated with exitcode 0\nloop is not running")
return
else:
raise LoopExceptionError("the loop function return non zero exticode ({})!\n".format(exc)+
"see log (INFO level) for traceback information")
time.sleep(0.1)
if (timeout is not None) and ((time.time() - t0) > timeout):
err_msg = "could not bring up function on time (timeout: {}s)".format(timeout)
log.error(err_msg)
log.info("either it takes too long to spawn the subprocess (increase the timeout)\n"+
"or an internal error occurred before reaching the function call")
raise LoopTimeoutError(err_msg)
log.debug("loop function is up ({})".format(humanize_time(time.time()-t0)))
def stop(self):
"""
stops the process triggered by start
Setting the shared memory boolean run to false, which should prevent
the loop from repeating. Call __cleanup to make sure the process
stopped. After that we could trigger start() again.
"""
if self.is_alive():
self._proc.terminate()
if self._proc is not None:
self.__cleanup()
if self.raise_error:
if self._proc.exitcode == 255:
raise LoopExceptionError("the loop function return non zero exticode ({})!\n".format(self._proc.exitcode)+
"see log (INFO level) for traceback information")
self.pipe_handler.close()
self._proc = None
def join(self, timeout):
"""
calls join for the spawned process with given timeout
"""
if self.is_alive():
self._proc.join(timeout)
def is_alive(self):
if self._proc is None:
return False
else:
return self._proc.is_alive()
def is_running(self):
if self.is_alive():
return self._func_running.value
else:
return False
def pause(self):
if self._run.value:
self._pause.value = True
log.debug("process with pid %s paused", self._proc.pid)
def resume(self):
if self._run.value:
self._pause.value = False
log.debug("process with pid %s resumed", self._proc.pid)
def getpid(self):
if self._proc is not None:
return self._proc.pid
else:
return None
def show_stat_base(count_value, max_count_value, prepend, speed, tet, ttg, width, **kwargs):
"""A function that formats the progress information
This function will be called periodically for each progress that is monitored.
Overwrite this function in a subclass to implement a specific formating of the progress information
:param count_value: a number holding the current state
:param max_count_value: should be the largest number `count_value` can reach
:param prepend: additional text for each progress
:param speed: the speed estimation
:param tet: the total elapsed time
:param ttg: the time to go
:param width: the width for the progressbar, when set to `"auto"` this function
should try to detect the width available
:type width: int or "auto"
"""
raise NotImplementedError
def _show_stat_wrapper_Progress(count, last_count, start_time, max_count, speed_calc_cycles,
width, q, last_speed, prepend, show_stat_function, add_args,
i, lock):
"""
calculate
"""
count_value, max_count_value, speed, tet, ttg, = Progress._calc(count,
last_count,
start_time,
max_count,
speed_calc_cycles,
q,
last_speed,
lock)
return show_stat_function(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **add_args)
def _show_stat_wrapper_multi_Progress(count, last_count, start_time, max_count, speed_calc_cycles,
width, q, last_speed, prepend, show_stat_function, len_,
add_args, lock, info_line, no_move_up=False, emtpy_lines_at_end = 0):
"""
call the static method show_stat_wrapper for each process
"""
# print(ESC_BOLD, end='')
# sys.stdout.flush()
for i in range(len_):
_show_stat_wrapper_Progress(count[i], last_count[i], start_time[i], max_count[i], speed_calc_cycles,
width, q[i], last_speed[i], prepend[i], show_stat_function,
add_args, i, lock[i])
n = len_
if info_line is not None:
s = info_line.value.decode('utf-8')
s = s.split('\n')
n += len(s)
for si in s:
if width == 'auto':
width = get_terminal_width()
if len(si) > width:
si = si[:width]
print("{0:<{1}}".format(si, width))
if no_move_up:
n = 0
print('\r\n' * emtpy_lines_at_end, end='', flush=True)
# this is only a hack to find the end
# of the message in a stream
# so ESC_HIDDEN+ESC_NO_CHAR_ATTR is a magic ending
print(terminal.ESC_MOVE_LINE_UP(n + emtpy_lines_at_end) + terminal.ESC_MY_MAGIC_ENDING, end='')
sys.stdout.flush()
#print('## HERE ##', emtpy_lines_at_end, end='', flush=True)
class Progress(Loop):
"""
Abstract Progress Class
The :py:class:`Progress` Class uses :py:class:`Loop` to provide a repeating
function which calculates progress information from a changing counter value.
The formatting of these information is done by overwriting the static member
:py:func:`Progress.show_stat`. :py:func:`Progress.show_stat` is intended to
format a single progress bar on a single line only.
The extension to multiple progresses is done
automatically base on the formatting of a single line.
"""
def __init__(self,
count,
max_count = None,
prepend = None,
width = 'auto',
speed_calc_cycles = 10,
interval = 1,
verbose = None,
sigint = 'stop',
sigterm = 'stop',
info_line = None,
show_stat = None,
emtpy_lines_at_end = 0):
"""
:param count: shared variable for holding the current state
(use :py:func:`UnsignedIntValue` for short hand creation)
:type count: list/single value of multiprocessing.Value
:param max_count: shared variable for holding the final state
:type max_count: None or list/single value of multiprocessing.Value
:param prepend: string to put in front of each progress output
:type prepend: None, str or list of str
:param width: the width to use for the progress line (fixed or automatically determined)
:type width: int or "auto"
:param speed_calc_cycles: number of updated (cycles) to use for estimating the speed
(example: ``speed_calc_sycles = 4`` and ``interval = 1`` means that the speed is estimated from
the current state and state 4 updates before where the elapsed time will roughly be 4s)
:param interval: seconds to wait before updating the progress
:param verbose: DEPRECATED: has no effect, use the global ``log.setLevel()`` to control the
output level
:param sigint: behavior of the subprocess on signal ``SIGINT`` (``"stop"`` triggers
``SystemExit`` whereas ``"ign"`` ignores the signal)
:type sigint: "stop" or "ign"
:param sigterm: behavior of the subprocess on signal ``SIGTERM`` (``"stop"`` triggers
``SystemExit`` whereas ``"ign"`` ignores the signal)
:type sigterm: "stop" or "ign"
:param info_line: additional text to show below the progress (use :py:func:`StringValue`
for short hand creation of shared strings)
:type info_line: None or multiprocessing.Array of characters
.. note::
As `Progress` is derived from :py:class:`Loop` it is highly encurraged to create
any instance of Progress with a context manager (``with`` statement).
This ensures that the subprocess showing the progress terminats on context exit.
Otherwise one has to make sure that at some point the stop() routine is called.
abstract example::
with AProgressClass(...) as p:
p.start()
# do stuff and modify counter
"""
if verbose is not None:
log.warning("verbose is deprecated, only allowed for compatibility")
warnings.warn("verbose is deprecated", DeprecationWarning)
# converts count to list and do type check
try:
for c in count:
if not isinstance(c, Synchronized):
raise ValueError("Each element of 'count' must be if the type multiprocessing.sharedctypes.Synchronized")
self.is_multi = True
except TypeError:
if not isinstance(count, Synchronized):
raise ValueError("'count' must be if the type multiprocessing.sharedctypes.Synchronized")
self.is_multi = False
count = [count]
self.len = len(count)
# converts max_count to list and do type check
if max_count is not None:
if self.is_multi:
try:
for i, m in enumerate(max_count):
if not isinstance(m, Synchronized):
max_count[i] = UnsignedIntValue(m)
except TypeError:
raise TypeError("'max_count' must be iterable")
else:
if not isinstance(max_count, Synchronized):
max_count = UnsignedIntValue(max_count)
max_count = [max_count]
else:
max_count = [None] * self.len
self.start_time = []
self.speed_calc_cycles = speed_calc_cycles
self.width = width
self.q = []
self.prepend = []
self.lock = []
self.last_count = []
self.last_speed = []
for i in range(self.len):
self.q.append(myQueue()) # queue to save the last speed_calc_cycles
# (time, count) information to calculate speed
#self.q[-1].cancel_join_thread()
self.last_count.append(UnsignedIntValue())
self.last_speed.append(FloatValue())
self.lock.append(mp.Lock())
self.start_time.append(FloatValue(val=time.time()))
if prepend is None:
# no prepend given
self.prepend.append('')
else:
if isinstance(prepend, str):
self.prepend.append(prepend)
else:
# assume list of prepend, (needs to be a sequence)
self.prepend.append(prepend[i])
self.max_count = max_count # list of multiprocessing value type
self.count = count # list of multiprocessing value type
self.interval = interval
self.verbose = verbose
self.show_on_exit = False
self.add_args = {}
self.info_line = info_line
self.show_stat = show_stat
self.emtpy_lines_at_end = emtpy_lines_at_end
# setup loop class with func
Loop.__init__(self,
func = _show_stat_wrapper_multi_Progress,
args = (self.count,
self.last_count,
self.start_time,
self.max_count,
self.speed_calc_cycles,
self.width,
self.q,
self.last_speed,
self.prepend,
show_stat,
self.len,
self.add_args,
self.lock,
self.info_line,
False, # no_move_up
self.emtpy_lines_at_end),
interval = interval,
sigint = sigint,
sigterm = sigterm,
auto_kill_on_last_resort = True)
def __exit__(self, *exc_args):
self.stop()
@staticmethod
def _calc(count,
last_count,
start_time,
max_count,
speed_calc_cycles,
q,
last_speed,
lock):
"""do the pre calculations in order to get TET, speed, TTG
:param count: count
:param last_count: count at the last call, allows to treat the case of no progress
between sequential calls
:param start_time: the time when start was triggered
:param max_count: the maximal value count
:type max_count:
:param speed_calc_cycles:
:type speed_calc_cycles:
:param q:
:type q:
:param last_speed:
:type last_speed:
:param lock:
:type lock:
"""
count_value = count.value
start_time_value = start_time.value
current_time = time.time()
if last_count.value != count_value:
# some progress happened
with lock:
# save current state (count, time) to queue
q.put((count_value, current_time))
# get older state from queue (or initial state)
# to to speed estimation
if q.qsize() > speed_calc_cycles:
old_count_value, old_time = q.get()
else:
old_count_value, old_time = 0, start_time_value
last_count.value = count_value
#last_old_count.value = old_count_value
#last_old_time.value = old_time
speed = (count_value - old_count_value) / (current_time - old_time)
last_speed.value = speed
else:
# progress has not changed since last call
# use also old (cached) data from the queue
#old_count_value, old_time = last_old_count.value, last_old_time.value
speed = last_speed.value
if (max_count is None):
max_count_value = None
else:
max_count_value = max_count.value
tet = (current_time - start_time_value)
if (speed == 0) or (max_count_value is None) or (max_count_value == 0):
ttg = None
else:
ttg = math.ceil((max_count_value - count_value) / speed)
return count_value, max_count_value, speed, tet, ttg
def _reset_all(self):
"""
reset all progress information
"""
for i in range(self.len):
self._reset_i(i)
def _reset_i(self, i):
"""
reset i-th progress information
"""
self.count[i].value=0
log.debug("reset counter %s", i)
self.lock[i].acquire()
for x in range(self.q[i].qsize()):
self.q[i].get()
self.lock[i].release()
self.start_time[i].value = time.time()
def _show_stat(self):
"""
convenient functions to call the static show_stat_wrapper_multi with
the given class members
"""
_show_stat_wrapper_multi_Progress(self.count,
self.last_count,
self.start_time,
self.max_count,
self.speed_calc_cycles,
self.width,
self.q,
self.last_speed,
self.prepend,
self.show_stat,
self.len,
self.add_args,
self.lock,
self.info_line,
no_move_up=True)
def reset(self, i = None):
"""resets the progress informaion
:param i: tell which progress to reset, if None reset all
:type i: None, int
"""
if i is None:
self._reset_all()
else:
self._reset_i(i)
def start(self):
"""
start
"""
# before printing any output to stout, we can now check this
# variable to see if any other ProgressBar has reserved that
# terminal.
if (self.__class__.__name__ in terminal.TERMINAL_PRINT_LOOP_CLASSES):
if not terminal.terminal_reserve(progress_obj=self):
log.warning("tty already reserved, NOT starting the progress loop!")
return
super(Progress, self).start()
self.show_on_exit = True
def stop(self):
"""
trigger clean up by hand, needs to be done when not using
context management via 'with' statement
- will terminate loop process
- show a last progress -> see the full 100% on exit
- releases terminal reservation
"""
super(Progress, self).stop()
terminal.terminal_unreserve(progress_obj=self, verbose=self.verbose)
if self.show_on_exit:
if not isinstance(self.pipe_handler, PipeToPrint):
myout = inMemoryBuffer()
stdout = sys.stdout
sys.stdout = myout
self._show_stat()
self.pipe_handler(myout.getvalue())
sys.stdout = stdout
else:
self._show_stat()
print()
self.show_on_exit = False
def show_stat_ProgressBar(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs):
if (max_count_value is None) or (max_count_value == 0):
# only show current absolute progress as number and estimated speed
print("{}{}{} [{}] {}#{} ".format(terminal.ESC_NO_CHAR_ATTR,
COLTHM['PRE_COL'] + prepend + terminal.ESC_DEFAULT,
humanize_time(tet), humanize_speed(speed),
terminal.ESC_BOLD + COLTHM['BAR_COL'],
count_value))
else:
if width == 'auto':
width = get_terminal_width()
# deduce relative progress and show as bar on screen
if ttg is None:
s3 = " TTG --"
else:
s3 = " TTG {}".format(humanize_time(ttg))
s1 = "{}{}{} [{}] ".format(terminal.ESC_NO_CHAR_ATTR,
COLTHM['PRE_COL'] + prepend + terminal.ESC_DEFAULT,
humanize_time(tet),
humanize_speed(speed))
l = terminal.len_string_without_ESC(s1 + s3)
l2 = width - l - 3
a = int(l2 * count_value / max_count_value)
b = l2 - a
s2 = COLTHM['BAR_COL'] + terminal.ESC_BOLD + "[" + "=" * a + ">" + " " * b + "]" + terminal.ESC_RESET_BOLD + terminal.ESC_DEFAULT
print(s1 + s2 + s3)
class ProgressBar(Progress):
"""
implements a progress bar similar to the one known from 'wget' or 'pv'
"""
def __init__(self, *args, **kwargs):
"""
width [int/'auto'] - the number of characters used to show the Progress bar,
use 'auto' to determine width from terminal information -> see _set_width
"""
Progress.__init__(self, *args, show_stat = show_stat_ProgressBar, **kwargs)
# self._PRE_PREPEND = terminal.ESC_NO_CHAR_ATTR + ESC_RED
# self._POST_PREPEND = ESC_BOLD + ESC_GREEN
def show_stat_ProgressBarCounter(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs):
counter_count = kwargs['counter_count'][i]
counter_speed = kwargs['counter_speed'][i]
counter_tet = time.time() - kwargs['init_time']
s_c = "{}{}{} [{}] {}#{} - ".format(terminal.ESC_NO_CHAR_ATTR,
COLTHM['PRE_COL'] + prepend + terminal.ESC_DEFAULT,
humanize_time(counter_tet),
humanize_speed(counter_speed.value),
COLTHM['BAR_COL'],
str(counter_count.value) + terminal.ESC_DEFAULT)
if width == 'auto':
width = get_terminal_width()
if (max_count_value is None) or (max_count_value == 0):
s_c = "{}{} [{}] {}#{} ".format(s_c,
humanize_time(tet),
humanize_speed(speed),
COLTHM['BAR_COL'],
str(count_value) + terminal.ESC_DEFAULT)
else:
if ttg is None:
s3 = " TTG --"
else:
s3 = " TTG {}".format(humanize_time(ttg))
s1 = "{} [{}] ".format(humanize_time(tet), humanize_speed(speed))
l = terminal.len_string_without_ESC(s1 + s3 + s_c)
l2 = width - l - 3
a = int(l2 * count_value / max_count_value)
b = l2 - a
s2 = COLTHM['BAR_COL'] + terminal.ESC_BOLD + "[" + "=" * a + ">" + " " * b + "]" + terminal.ESC_RESET_BOLD + terminal.ESC_DEFAULT
s_c = s_c + s1 + s2 + s3
print(s_c)
class ProgressBarCounter(Progress):
"""
records also the time of each reset and calculates the speed
of the resets.
shows the TET since init (not effected by reset)
the speed of the resets (number of finished processed per time)
and the number of finished processes
after that also show a progress of each process
max_count > 0 and not None -> bar
max_count == None -> absolute count statistic
max_count == 0 -> hide process statistic at all
"""
def __init__(self, speed_calc_cycles_counter=5, **kwargs):
Progress.__init__(self, show_stat = show_stat_ProgressBarCounter, **kwargs)
self.counter_count = []
self.counter_q = []
self.counter_speed = []
for i in range(self.len):
self.counter_count.append(UnsignedIntValue(val=0))
self.counter_q.append(myQueue())
self.counter_speed.append(FloatValue())
self.counter_speed_calc_cycles = speed_calc_cycles_counter
self.init_time = time.time()
self.add_args['counter_count'] = self.counter_count
self.add_args['counter_speed'] = self.counter_speed
self.add_args['init_time'] = self.init_time
def get_counter_count(self, i=0):
return self.counter_count[i].value
def _reset_i(self, i):
c = self.counter_count[i]
with c.get_lock():
c.value += 1
count_value = c.value
q = self.counter_q[i]
current_time = time.time()
q.put((count_value, current_time))
if q.qsize() > self.counter_speed_calc_cycles:
old_count_value, old_time = q.get()
else:
old_count_value, old_time = 0, self.init_time
speed = (count_value - old_count_value) / (current_time - old_time)
self.counter_speed[i].value = speed
Progress._reset_i(self, i)
def get_d(s1, s2, width, lp, lps):
d = width - len(terminal.remove_ESC_SEQ_from_string(s1)) - len(terminal.remove_ESC_SEQ_from_string(s2)) - 2 - lp - lps
if d >= 0:
d1 = d // 2
d2 = d - d1
return s1, s2, d1, d2
def full_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = "TET {} {:>12} TTG {}".format(tet, speed, ttg)
s2 = "ETA {} ORT {}".format(eta, ort)
return get_d(s1, s2, width, lp, lps)
def full_minor_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = "E {} {:>12} G {}".format(tet, speed, ttg)
s2 = "A {} O {}".format(eta, ort)
return get_d(s1, s2, width, lp, lps)
def reduced_1_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = "E {} {:>12} G {}".format(tet, speed, ttg)
s2 = "O {}".format(ort)
return get_d(s1, s2, width, lp, lps)
def reduced_2_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = "E {} G {}".format(tet, ttg)
s2 = "O {}".format(ort)
return get_d(s1, s2, width, lp, lps)
def reduced_3_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = "E {} G {}".format(tet, ttg)
s2 = ''
return get_d(s1, s2, width, lp, lps)
def reduced_4_stat(p, tet, speed, ttg, eta, ort, repl_ch, width, lp, lps):
s1 = ''
s2 = ''
return get_d(s1, s2, width, lp, lps)
def kw_bold(s, ch_after):
kws = ['TET', 'TTG', 'ETA', 'ORT', 'E', 'G', 'A', 'O']
for kw in kws:
for c in ch_after:
s = s.replace(kw + c, terminal.ESC_BOLD + kw + terminal.ESC_RESET_BOLD + c)
return s
def _stat(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs):
if (max_count_value is None) or (max_count_value == 0):
# only show current absolute progress as number and estimated speed
stat = "{}{} [{}] {}#{} ".format(COLTHM['PRE_COL'] + prepend + terminal.ESC_DEFAULT,
humanize_time(tet),
humanize_speed(speed),
COLTHM['BAR_COL'],
str(count_value) + terminal.ESC_DEFAULT)
else:
if width == 'auto':
width = get_terminal_width()
# deduce relative progress
p = count_value / max_count_value
if p < 1:
ps = " {:.1%} ".format(p)
else:
ps = " {:.0%} ".format(p)
if ttg is None:
eta = '--'
ort = None
else:
eta = datetime.datetime.fromtimestamp(time.time() + ttg).strftime("%Y%m%d_%H:%M:%S")
ort = tet + ttg
tet = humanize_time(tet)
speed = '[' + humanize_speed(speed) + ']'
ttg = humanize_time(ttg)
ort = humanize_time(ort)
repl_ch = '-'
lp = len(prepend)
args = p, tet, speed, ttg, eta, ort, repl_ch, width, lp, len(ps)
res = full_stat(*args)
if res is None:
res = full_minor_stat(*args)
if res is None:
res = reduced_1_stat(*args)
if res is None:
res = reduced_2_stat(*args)
if res is None:
res = reduced_3_stat(*args)
if res is None:
res = reduced_4_stat(*args)
if res is not None:
s1, s2, d1, d2 = res
s = s1 + ' ' * d1 + ps + ' ' * d2 + s2
idx_p = math.ceil( (width-lp-2)*p)
s_before = s[:idx_p].replace(' ', repl_ch)
if (len(s_before) > 0) and (s_before[-1] == repl_ch):
s_before = s_before[:-1] + '>'
s_after = s[idx_p:]
s_before = kw_bold(s_before, ch_after=[repl_ch, '>'])
s_after = kw_bold(s_after, ch_after=[' '])
stat = (COLTHM['PRE_COL'] + prepend + terminal.ESC_DEFAULT +
COLTHM['BAR_COL'] + terminal.ESC_BOLD + '[' + terminal.ESC_RESET_BOLD + s_before + terminal.ESC_DEFAULT +
s_after + terminal.ESC_BOLD + COLTHM['BAR_COL'] + ']' + terminal.ESC_NO_CHAR_ATTR)
else:
ps = ps.strip()
if p == 1:
ps = ' ' + ps
stat = prepend + ps
return stat
def show_stat_ProgressBarFancy(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs):
stat = _stat(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs)
print(stat)
class ProgressBarFancy(Progress):
"""
implements a progress bar where the color indicates the current status
similar to the bars known from 'htop'
"""
def __init__(self, *args, **kwargs):
"""
width [int/'auto'] - the number of characters used to show the Progress bar,
use 'auto' to determine width from terminal information -> see _set_width
"""
Progress.__init__(self, *args, show_stat = show_stat_ProgressBarFancy, **kwargs)
def show_stat_ProgressBarCounterFancy(count_value, max_count_value, prepend, speed, tet, ttg, width, i, **kwargs):
counter_count = kwargs['counter_count'][i]
counter_speed = kwargs['counter_speed'][i]
counter_tet = time.time() - kwargs['init_time']
s_c = "{}{}{} [{}] {}#{}".format(terminal.ESC_NO_CHAR_ATTR,
COLTHM['PRE_COL']+prepend+terminal.ESC_DEFAULT,
humanize_time(counter_tet),
humanize_speed(counter_speed.value),
COLTHM['BAR_COL'],
str(counter_count.value) + terminal.ESC_DEFAULT)
if max_count_value is not None:
if width == 'auto':
width = get_terminal_width()
s_c += ' - '
if max_count_value == 0:
s_c = "{}{} [{}] {}#{} ".format(s_c, humanize_time(tet), humanize_speed(speed),
COLTHM['BAR_COL'], str(count_value)+terminal.ESC_DEFAULT)
else:
_width = width - terminal.len_string_without_ESC(s_c)
s_c += _stat(count_value, max_count_value, '', speed, tet, ttg, _width, i)
print(s_c)
class ProgressBarCounterFancy(ProgressBarCounter):
def __init__(self, *args, **kwargs):
ProgressBarCounter.__init__(self, *args, **kwargs)
self.show_stat = show_stat_ProgressBarCounterFancy
class SIG_handler_Loop(object):
"""class to setup signal handling for the Loop class
Note: each subprocess receives the default signal handling from it's parent.
If the signal function from the module signal is evoked within the subprocess
this default behavior can be overwritten.
The init function receives a shared memory boolean object which will be set
false in case of signal detection. Since the Loop class will check the state
of this boolean object before each repetition, the loop will stop when
a signal was receives.
"""
def __init__(self, sigint, sigterm, log, prefix):
self.set_signal(signal.SIGINT, sigint)
self.set_signal(signal.SIGTERM, sigterm)
self.prefix = prefix
self.log = log
self.log.info("setup signal handler for loop (SIGINT:%s, SIGTERM:%s)", sigint, sigterm)
def set_signal(self, sig, handler_str):
if handler_str == 'ign':
signal.signal(sig, self._ignore_signal)
elif handler_str == 'stop':
signal.signal(sig, self._stop_on_signal)
else:
raise TypeError("unknown signal hander string '%s'", handler_str)
def _ignore_signal(self, signal, frame):
self.log.debug("ignore received sig %s", signal_dict[signal])
pass
def _stop_on_signal(self, signal, frame):
self.log.info("received sig %s -> raise InterruptedError", signal_dict[signal])
raise LoopInterruptError()
def FloatValue(val=0.):
"""returns a `multiprocessing.Value` of type `float` with initial value `val`"""
return mp.Value('d', val, lock=True)
def UnsignedIntValue(val=0):
"""returns a `multiprocessing.Value` of type `unsigned int` with initial value `val`"""
return mp.Value('I', val, lock=True)
def StringValue(num_of_bytes):
"""returns a `multiprocessing.Array` of type `character` and length `num_of_bytes`"""
return mp.Array('c', _jm_compatible_bytearray(num_of_bytes), lock=True)
def check_process_termination(proc, prefix, timeout, auto_kill_on_last_resort = False):
proc.join(timeout)
if not proc.is_alive():
log.debug("termination of process (pid %s) within timeout of %s SUCCEEDED!", proc.pid, humanize_time(timeout))
return True
# process still runs -> send SIGTERM -> see what happens
log.warning("termination of process (pid %s) within given timeout of %s FAILED!", proc.pid, humanize_time(timeout))
proc.terminate()
new_timeout = 3*timeout
log.debug("wait for termination (timeout %s)", humanize_time(new_timeout))
proc.join(new_timeout)
if not proc.is_alive():
log.info("termination of process (pid %s) via SIGTERM with timeout of %s SUCCEEDED!", proc.pid, humanize_time(new_timeout))
return True
log.warning("termination of process (pid %s) via SIGTERM with timeout of %s FAILED!", proc.pid, humanize_time(new_timeout))
log.debug("auto_kill_on_last_resort is %s", auto_kill_on_last_resort)
answer = 'k' if auto_kill_on_last_resort else '_'
while True:
log.debug("answer string is %s", answer)
if answer == 'k':
log.warning("send SIGKILL to process with pid %s", proc.pid)
os.kill(proc.pid, signal.SIGKILL)
time.sleep(0.1)
else:
log.info("send SIGTERM to process with pid %s", proc.pid)
os.kill(proc.pid, signal.SIGTERM)
time.sleep(0.1)
if not proc.is_alive():
log.info("process (pid %s) has stopped running!", proc.pid)
return True
else:
log.warning("process (pid %s) is still running!", proc.pid)
print("the process (pid {}) seems still running".format(proc.pid))
try:
answer = input("press 'enter' to send SIGTERM, enter 'k' to send SIGKILL or enter 'ignore' to not bother about the process anymore")
except Exception as e:
log.error("could not ask for sending SIGKILL due to {}".format(type(e)))
log.info(traceback.format_exc())
log.warning("send SIGKILL now")
answer = 'k'
if answer == 'ignore':
log.warning("ignore process %s", proc.pid)
return False
elif answer != 'k':
answer = ''
def getCountKwargs(func):
""" Returns a list ["count kwarg", "count_max kwarg"] for a
given function. Valid combinations are defined in
`progress.validCountKwargs`.
Returns None if no keyword arguments are found.
"""
# Get all arguments of the function
if hasattr(func, "__code__"):
func_args = func.__code__.co_varnames[:func.__code__.co_argcount]
for pair in validCountKwargs:
if ( pair[0] in func_args and pair[1] in func_args ):
return pair
# else
return None
def humanize_speed(c_per_sec):
"""convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero.
"""
scales = [60, 60, 24]
units = ['c/s', 'c/min', 'c/h', 'c/d']
speed = c_per_sec
i = 0
if speed > 0:
while (speed < 1) and (i < len(scales)):
speed *= scales[i]
i += 1
return "{:.1f}{}".format(speed, units[i])
def humanize_time(secs):
"""convert second in to hh:mm:ss format
"""
if secs is None:
return '--'
if secs < 1:
return "{:.2f}ms".format(secs*1000)
elif secs < 10:
return "{:.2f}s".format(secs)
else:
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '{:02d}:{:02d}:{:02d}'.format(int(hours), int(mins), int(secs))
def codecov_subprocess_check():
print("this line will be only called from a subprocess")
myQueue = mp.Queue
# a mapping from the numeric values of the signals to their names used in the
# standard python module signals
signal_dict = {}
for s in dir(signal):
if s.startswith('SIG') and s[3] != '_':
n = getattr(signal, s)
if n in signal_dict:
signal_dict[n] += ('/'+s)
else:
signal_dict[n] = s
_colthm_term_default = {'PRE_COL': terminal.ESC_RED, 'BAR_COL': terminal.ESC_LIGHT_GREEN, 'ADD_LNS_UP':0}
_colthm_ipyt_default = {'PRE_COL': terminal.ESC_RED, 'BAR_COL': terminal.ESC_LIGHT_BLUE, 'ADD_LNS_UP':0}
_colthm_wincmd_default = {'PRE_COL': terminal.ESC_RED, 'BAR_COL': terminal.ESC_GREEN, 'ADD_LNS_UP':1}
color_themes = {'term_default': _colthm_term_default,
'ipyt_default': _colthm_ipyt_default,
'wincmd_default': _colthm_wincmd_default}
if platform.system() == 'Windows':
COLTHM = _colthm_wincmd_default
else:
COLTHM = _colthm_term_default
def choose_color_theme(name):
global COLTHM
if name in color_themes:
COLTHM = color_themes[name]
else:
warnings.warn("no such color theme {}".format(name))
# keyword arguments that define counting in wrapped functions
validCountKwargs = [
[ "count", "count_max"],
[ "count", "max_count"],
[ "c", "m"],
[ "jmc", "jmm"],
]
|
sensor.py
|
"""Pushbullet platform for sensor component."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"application_name": ["Application name"],
"body": ["Body"],
"notification_id": ["Notification ID"],
"notification_tag": ["Notification tag"],
"package_name": ["Package name"],
"receiver_email": ["Receiver email"],
"sender_email": ["Sender email"],
"source_device_iden": ["Sender device ID"],
"title": ["Title"],
"type": ["Type"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["title", "body"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
from pushbullet import PushBullet
from pushbullet import InvalidKeyError
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_entities(devices)
class PushBulletNotificationSensor(Entity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format("Pushbullet", self._element)
@property
def state(self):
"""Return the current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider:
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
import threading
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data["type"] == "push":
self._data = data["push"]
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
from pushbullet import Listener
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
|
monitor.py
|
import sched
import time
from multiprocessing import Queue
from threading import Thread
def clear_queue(queue: Queue):
with queue.mutex:
queue.queue.clear()
def monitor_queue(cls):
class QueueMonitor:
def __init__(self, *args, **kwargs):
self.oInstance = cls(*args, **kwargs)
self.queue = args[1]
s = sched.scheduler(time.time, time.sleep)
s.enter(1, 1, self.monitor_queue, argument=(s,))
Thread(target=s.run, args=(s,), daemon=True).start()
def monitor_queue(self, s):
s.enter(1, 1, self.monitor_queue, argument=(s,))
usage = self.queue.qsize() / self.queue._maxsize
if usage > 0.5:
self.oInstance.logger.debug('Queue is at {}% of it\'s max capacity.'.format(100 * usage))
def __getattribute__(self, attr):
try:
x = super().__getattribute__(attr)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(attr)
return QueueMonitor
|
pytest_dut_monitor.py
|
import pytest
import paramiko
import threading
import logging
import time
import os
import yaml
from collections import OrderedDict
from datetime import datetime
from errors import HDDThresholdExceeded, RAMThresholdExceeded, CPUThresholdExceeded
logger = logging.getLogger(__name__)
DUT_MONITOR = "/tmp/dut_monitor.py"
DUT_CPU_LOG = "/tmp/cpu.log"
DUT_RAM_LOG = "/tmp/ram.log"
DUT_HDD_LOG = "/tmp/hdd.log"
class DUTMonitorPlugin(object):
"""
Pytest plugin which defines:
- pytest fixtures: 'dut_ssh' and 'dut_monitor'
- handlers to verify that measured CPU, RAM and HDD values during each test item execution
does not exceed defined threshold
"""
def __init__(self, thresholds):
self.thresholds = thresholds
@pytest.fixture(autouse=True, scope="module")
def dut_ssh(self, duthosts, rand_one_dut_hostname, creds):
"""Establish SSH connection with DUT"""
duthost = duthosts[rand_one_dut_hostname]
ssh = DUTMonitorClient(host=duthost.hostname, user=creds["sonicadmin_user"],
password=creds["sonicadmin_password"])
yield ssh
@pytest.fixture(autouse=True, scope="function")
def dut_monitor(self, dut_ssh, localhost, duthosts, rand_one_dut_hostname):
"""
For each test item starts monitoring of hardware resources consumption on the DUT
"""
duthost = duthosts[rand_one_dut_hostname]
dut_thresholds = {}
monitor_exceptions = []
# Start monitoring on DUT
dut_ssh.start()
# Read file with defined thresholds
with open(self.thresholds) as stream:
general_thresholds = yaml.safe_load(stream)
dut_thresholds = general_thresholds["default"]
dut_platform = duthost.facts["platform"]
dut_hwsku = duthost.facts["hwsku"]
if dut_platform in general_thresholds:
dut_thresholds.update(general_thresholds[dut_platform]["default"])
if dut_hwsku in general_thresholds[dut_platform]["hwsku"]:
dut_thresholds.update(general_thresholds[dut_platform]["hwsku"][dut_hwsku])
yield dut_thresholds
# Stop monitoring on DUT
dut_ssh.stop()
# Download log files with CPU, RAM and HDD measurements data
measurements = dut_ssh.get_log_files()
# Verify hardware resources consumption does not exceed defined threshold
if measurements["hdd"]:
try:
self.assert_hhd(hdd_meas=measurements["hdd"], thresholds=dut_thresholds)
except HDDThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["ram"]:
try:
self.assert_ram(ram_meas=measurements["ram"], thresholds=dut_thresholds)
except RAMThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["cpu"]:
try:
self.assert_cpu(cpu_meas=measurements["cpu"], thresholds=dut_thresholds)
except CPUThresholdExceeded as err:
monitor_exceptions.append(err)
if monitor_exceptions:
raise Exception("\n".join(item.message for item in monitor_exceptions))
def assert_hhd(self, hdd_meas, thresholds):
"""
Verify that free disk space on the DUT is not overutilized
"""
overused = []
fail_msg = "Used HDD threshold - {}\nHDD overuse:\n".format(thresholds["hdd_used"])
for timestamp, used_hdd in hdd_meas.items():
if used_hdd > thresholds["hdd_used"]:
overused.append((timestamp, used_hdd))
if overused:
raise HDDThresholdExceeded(fail_msg + "\n".join(str(item) for item in overused))
def assert_ram(self, ram_meas, thresholds):
"""
Verify that RAM resources on the DUT are not overutilized
"""
failed = False
peak_overused = []
fail_msg = "\nRAM thresholds: peak - {}; before/after test difference - {}%\n".format(thresholds["ram_peak"],
thresholds["ram_delta"])
for timestamp, used_ram in ram_meas.items():
if used_ram > thresholds["ram_peak"]:
peak_overused.append((timestamp, used_ram))
if peak_overused:
fail_msg = fail_msg + "RAM overuse:\n{}\n".format("\n".join(str(item) for item in peak_overused))
failed = True
# Take first and last RAM measurements
if len(ram_meas) >= 4:
before = sum(ram_meas.values()[0:2]) / 2
after = sum(ram_meas.values()[2:4]) / 2
else:
before = ram_meas.values()[0]
after = ram_meas.values()[-1]
delta = thresholds["ram_delta"] / 100. * before
if after >= before + delta:
fail_msg = fail_msg + "RAM was not restored\nRAM before test {}; RAM after test {}\n".format(before, after)
failed = True
if failed:
raise RAMThresholdExceeded(fail_msg)
def assert_cpu(self, cpu_meas, thresholds):
"""
Verify that CPU resources on the DUT are not overutilized
"""
failed = False
total_overused = []
process_overused = {}
cpu_thresholds = "CPU thresholds: total - {}; per process - {}; average - {}\n".format(thresholds["cpu_total"],
thresholds["cpu_process"],
thresholds["cpu_total_average"])
average_cpu = "\n> Average CPU consumption during test run {}; Threshold - {}\n"
fail_msg = ""
total_sum = 0
t_format = "%Y-%m-%d %H:%M:%S"
def handle_process_measurements(p_name, t_first, t_last, p_average):
"""Compose fail message if process overuse CPU durig 'cpu_measure_duration' interval."""
msg_template = "> Process '{}'\nAverage CPU overuse {} during {} seconds\n{}"
duration = (t_last - t_first).total_seconds()
if duration >= thresholds["cpu_measure_duration"]:
return msg_template.format(process_name,
p_average,
duration,
"{} - {}\n".format(t_first.strftime(t_format),
t_last.strftime(t_format)))
return ""
def handle_total_measurements(overused_list):
"""Compose fail message if CPU utilization exceeds threshold during 'duration' interval."""
fail_msg = ""
start = datetime.strptime(overused_list[0][0], t_format)
end = datetime.strptime(overused_list[-1][0], t_format)
if (end - start).total_seconds() >= thresholds["cpu_measure_duration"]:
fail_msg = "Total CPU overuse during {} seconds.\n{}\n\n".format((end - start).total_seconds(),
"\n".join([str(item) for item in overused_list])
)
del overused_list[0:]
return fail_msg
# Calculate total CPU utilization
for m_id, timestamp in enumerate(cpu_meas):
# Collect total CPU utilization to calculate total average
total_sum += cpu_meas[timestamp]["total"]
if cpu_meas[timestamp]["total"] > thresholds["cpu_total"]:
total_overused.append((timestamp, cpu_meas[timestamp]["total"]))
if m_id == (len(cpu_meas) - 1):
fail_msg += handle_total_measurements(total_overused)
total_overused = []
elif total_overused:
fail_msg += handle_total_measurements(total_overused)
total_overused = []
for process_consumption, process_name in cpu_meas[timestamp]["top_consumer"].items():
if process_consumption >= thresholds["cpu_process"]:
if process_name not in process_overused:
process_overused[process_name] = []
# Collect list of CPU utilization for specific process if CPU utilization exceeds threshold
process_overused[process_name].append((timestamp, process_consumption))
# Handle measurements per process
if process_overused:
for process_name, process_consumption in process_overused.items():
timestamps = []
process_sum = 0
for m_id, m_value in enumerate(process_consumption):
t_stamp = datetime.strptime(m_value[0], t_format)
process_sum += m_value[1]
if not timestamps:
timestamps.append(t_stamp)
continue
if (2 <= (t_stamp - timestamps[-1]).total_seconds() <= 3):
timestamps.append(t_stamp)
if m_id == (len(process_consumption) - 1):
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
else:
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
timestamps = []
process_sum = 0
# Calculate average CPU utilization
if (total_sum / len(cpu_meas)) > thresholds["cpu_total_average"]:
fail_msg += average_cpu.format(total_sum / len(cpu_meas), thresholds["cpu_total_average"])
if fail_msg:
raise CPUThresholdExceeded(cpu_thresholds + fail_msg)
class DUTMonitorClient(object):
"""
DUTMonitorClient object establish SSH connection with DUT. Keeps SSH connection with DUT during full test run.
Available features:
- start/stop hardware resources monitoring on DUT
- automatically restart monitoring script on the DUT in case of lose network connectivity (device reboot, etc.)
"""
def __init__(self, host, user, password):
self.running = False
self.user = user
self.password = password
self.host = host
self.init()
self.run_channel = None
self._thread = threading.Thread(name="Connection tracker", target=self._track_connection)
self._thread.setDaemon(True)
self._thread.start()
def _track_connection(self):
"""
@summary: Track network connectivity. Reestablish network connection in case of drop connection
"""
while True:
try:
self.ssh.exec_command("true", timeout=5)
except (paramiko.SSHException, AttributeError):
logger.warning("SSH connection dropped")
logger.debug("Trying to reconnect...")
self.close()
try:
self.init()
except Exception as err:
logger.debug(repr(err))
else:
if self.running:
self.start()
else:
time.sleep(5)
def _upload_to_dut(self):
"""
@summary: Upload 'dut_monitor.py' module to the DUT '/tmp' folder
"""
logger.debug("Uploading file to the DUT...")
with self.ssh.open_sftp() as sftp:
sftp.put(os.path.join(os.path.split(__file__)[0], "dut_monitor.py"), DUT_MONITOR)
def init(self):
"""
@summary: Connect to the DUT via SSH and authenticate to it.
"""
logger.debug("Trying to establish connection ...")
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.host, username=self.user, password=self.password, timeout=5)
def close(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
logger.debug("Close SSH connection with DUT")
self.ssh.close()
def exec_command(self, cmd, timeout=None):
"""
@summary: Execute a command on the DUT and track possible connectivity issues.
A new Channel is opened and the requested command is executed
"""
try:
return self.ssh.exec_command(cmd, timeout=timeout, get_pty=True)
except Exception as err:
logger.warning("Broken connection - {}".format(repr(err)))
logger.warning("Skip command {}".format(cmd))
return (None, None, None)
def start(self):
"""
@summary: Start HW resources monitoring on the DUT.
Write obtained values to the following files on the DUT: DUT_CPU_LOG, DUT_RAM_LOG, DUT_HDD_LOG
"""
self.running = True
self._upload_to_dut()
logger.debug("Start HW resources monitoring on the DUT...")
self.run_channel = self.ssh.get_transport().open_session()
self.run_channel.get_pty()
self.run_channel.settimeout(5)
# Start monitoring on DUT
self.run_channel.exec_command("python {} --start".format(DUT_MONITOR))
# Ensure monitoring started
output = self.run_channel.recv(1024)
if not "Started resources monitoring ..." in output:
raise Exception("Failed to start monitoring on DUT: {}".format(output))
def stop(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
self.running = False
logger.debug("Stop resources monitoring on the DUT...")
if not self.run_channel.closed:
self.run_channel.close()
def read_yml(self, file_pointer):
"""
@summary: Read yaml file content. Convert it to the ordered data.
@return: OrderedDict with sorted keys by timestamp, or empty dict for empty file.
"""
with file_pointer as fp:
measurements = yaml.safe_load("".join(fp))
if measurements is None:
return {}
# Sort json data to process logs chronologically
keys = measurements.keys()
keys.sort()
key_value_pairs = [(item, measurements[item]) for item in keys]
return OrderedDict(key_value_pairs)
def get_log_files(self):
"""
@summary: Fetch monitoring logs from device, parse, convert to dictionary with sorted order.
@return: Dictionary with keys "cpu", "ram", "hdd", values contains appropriate measurements made on DUT.
"""
logger.debug("Downloading file from the DUT...")
cpu_log_fp = self.ssh.open_sftp().file(DUT_CPU_LOG)
ram_log_fp = self.ssh.open_sftp().file(DUT_RAM_LOG)
hdd_log_fp = self.ssh.open_sftp().file(DUT_HDD_LOG)
cpu_meas = self.read_yml(cpu_log_fp)
ram_meas = self.read_yml(ram_log_fp)
hdd_meas = self.read_yml(hdd_log_fp)
return {"cpu": cpu_meas, "ram": ram_meas, "hdd": hdd_meas}
|
test_marathon.py
|
import ast
import contextlib
import json
import os
import re
import sys
import threading
from datetime import timedelta
import pytest
import retrying
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from dcoscli.test.common import (assert_command, assert_lines, exec_command,
popen_tty, update_config)
from dcoscli.test.marathon import (app, list_apps, list_deployments, show_app,
start_app, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('tests/data/marathon/help.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def test_empty_list():
list_apps()
def test_add_app_through_http():
with _zero_instance_app_through_http():
list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Can\'t read from resource: bad_resource.\n'
b'Please check that it exists.\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_remove_app():
with _zero_instance_app():
pass
list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Application 'zero-instance-app' only has [1-9][0-9]* "
"version\\(s\\)\\.\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: App '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error while fetching')
pattern = (b"""{"message":"Invalid timestamp provided """
b"""\'20:39:32.972Z\'. Expecting ISO-8601 """
b"""datetime string."}".\n""")
assert stderr.endswith(pattern)
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_start_already_started_app():
with _zero_instance_app():
start_app('zero-instance-app')
stdout = (b"Application 'zero-instance-app' already "
b"started: 1 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stdout = (b"Application 'zero-instance-app' already "
b"stopped: 0 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{"message":"Invalid JSON","details":[{"path":"/cpus","errors":["error.expected.jsnumber"]}]}""" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
stderr_end = b"""{"message":"Invalid JSON","details":[{"path":"/container/docker/network","errors":["error.unknown.enum.literal"]}]}""" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_json():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
"env='{\"key\":\"/value\"}'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stdout = (b"Unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_killing_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stderr == b''
out = stdout.decode()
assert out.startswith('Killed tasks: ')
out = out.strip('Killed tasks: ')
dictout = ast.literal_eval(out)
assert len(dictout) == 3
def test_killing_scaling_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
@pytest.mark.skipif(
True, reason='https://github.com/mesosphere/marathon/issues/3251')
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 1
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: App '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Maximum count must be a positive number: -1\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_kill_one_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id)
def test_kill_two_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_ids = [task['id'] for task in task_list]
_kill_task(task_ids)
def test_kill_and_scale_task():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, scale=True)
task_list = _list_tasks(1, 'zero-instance-app')
def test_kill_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = ['unknown-task-id']
_kill_task(task_id, expect_success=False)
def test_kill_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, wipe=True)
def test_stop_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = b'Changes blocked: deployment already in progress for app.\n'
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
def test_ping():
assert_command(['dcos', 'marathon', 'ping'],
stdout=b'Marathon ping response[1x]: "pong"\n')
def test_leader_show():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'leader', 'show', '--json'])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert stderr == b''
assert result['host'] == "marathon.mesos."
assert 'ip' in result
def ignore_exception(exc):
return isinstance(exc, Exception)
@pytest.fixture
def marathon_up():
yield
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception)
def check_marathon_up():
# testing to see if marathon is up and can talk through the gateway
# ignore the exception until we have a successful reponse.
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode == 0
check_marathon_up()
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception)
def wait_marathon_down():
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode != 0
def test_leader_delete(marathon_up):
assert_command(['dcos', 'marathon', 'leader', 'delete'],
stdout=b'Leadership abdicated\n')
# There might be a slight delay until marathon shows itself as down,
# so marathon_up() might succeed directly and the next tests would
# run with an unhealthy marathon. Explicitly wait for marathon to
# go down before waiting for it to become healthy again.
wait_marathon_down()
@pytest.mark.skipif(sys.platform == 'win32',
reason="No pseudo terminal on windows")
def test_app_add_no_tty():
proc, master = popen_tty('dcos marathon app add')
stdout, stderr = proc.communicate()
os.close(master)
print(stdout)
print(stderr)
assert proc.wait() == 1
assert stdout == b''
assert stderr == (b"We currently don't support reading from the TTY. "
b"Please specify an application JSON.\n"
b"E.g.: dcos marathon app add < app_resource.json\n")
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
def _kill_task(task_ids, scale=None, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'kill', '--json'] + task_ids
if scale:
cmd.append('--scale')
if wipe:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
if scale:
assert 'deploymentId' in result
else:
assert sorted(
[task['id'] for task in result['tasks']]) == sorted(task_ids)
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
|
vnrpc.py
|
# encoding: UTF-8
import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
import cPickle
pDumps = cPickle.dumps
pLoads = cPickle.loads
# 实现Ctrl-c中断recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
########################################################################
class RpcObject(object):
"""
RPC对象
提供对数据的序列化打包和解包接口,目前提供了json、msgpack、cPickle三种工具。
msgpack:性能更高,但通常需要安装msgpack相关工具;
json:性能略低但通用性更好,大部分编程语言都内置了相关的库。
cPickle:性能一般且仅能用于Python,但是可以直接传送Python对象,非常方便。
因此建议尽量使用msgpack,如果要和某些语言通讯没有提供msgpack时再使用json,
当传送的数据包含很多自定义的Python对象时建议使用cPickle。
如果希望使用其他的序列化工具也可以在这里添加。
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 默认使用msgpack作为序列化工具
#self.useMsgpack()
self.usePickle()
#----------------------------------------------------------------------
def pack(self, data):
"""打包"""
pass
#----------------------------------------------------------------------
def unpack(self, data):
"""解包"""
pass
#----------------------------------------------------------------------
def __jsonPack(self, data):
"""使用json打包"""
return dumps(data)
#----------------------------------------------------------------------
def __jsonUnpack(self, data):
"""使用json解包"""
return loads(data)
#----------------------------------------------------------------------
def __msgpackPack(self, data):
"""使用msgpack打包"""
return packb(data)
#----------------------------------------------------------------------
def __msgpackUnpack(self, data):
"""使用msgpack解包"""
return unpackb(data)
#----------------------------------------------------------------------
def __picklePack(self, data):
"""使用cPickle打包"""
return pDumps(data)
#----------------------------------------------------------------------
def __pickleUnpack(self, data):
"""使用cPickle解包"""
return pLoads(data)
#----------------------------------------------------------------------
def useJson(self):
"""使用json作为序列化工具"""
self.pack = self.__jsonPack
self.unpack = self.__jsonUnpack
#----------------------------------------------------------------------
def useMsgpack(self):
"""使用msgpack作为序列化工具"""
self.pack = self.__msgpackPack
self.unpack = self.__msgpackUnpack
#----------------------------------------------------------------------
def usePickle(self):
"""使用cPickle作为序列化工具"""
self.pack = self.__picklePack
self.unpack = self.__pickleUnpack
########################################################################
class RpcServer(RpcObject):
"""RPC服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(RpcServer, self).__init__()
# 保存功能函数的字典,key是函数名,value是函数对象
self.__functions = {}
# zmq端口相关
self.__context = zmq.Context()
self.__socketREP = self.__context.socket(zmq.REP) # 请求回应socket
self.__socketREP.bind(repAddress)
self.__socketPUB = self.__context.socket(zmq.PUB) # 数据广播socket
self.__socketPUB.bind(pubAddress)
# 工作线程相关
self.__active = False # 服务器的工作状态
self.__thread = threading.Thread(target=self.run) # 服务器的工作线程
#----------------------------------------------------------------------
def start(self):
"""启动服务器"""
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self, join=False):
"""停止服务器"""
# 将服务器设为停止
self.__active = False
# 等待工作线程退出
if join and self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""服务器运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketREP.poll(1000):
continue
# 从请求响应socket收取请求数据
reqb = self.__socketREP.recv()
# 序列化解包
req = self.unpack(reqb)
# 获取函数名和参数
name, args, kwargs = req
# 获取引擎中对应的函数对象,并执行调用,如果有异常则捕捉后返回
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# 序列化打包
repb = self.pack(rep)
# 通过请求响应socket返回调用结果
self.__socketREP.send(repb)
#----------------------------------------------------------------------
def publish(self, topic, data):
"""
广播推送数据
topic:主题内容(注意必须是ascii编码)
data:具体的数据
"""
# 序列化数据
datab = self.pack(data)
# 通过广播socket发送数据
self.__socketPUB.send_multipart([topic, datab])
#----------------------------------------------------------------------
def register(self, func):
"""注册函数"""
self.__functions[func.__name__] = func
########################################################################
class RpcClient(RpcObject):
"""RPC客户端"""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq端口相关
self.__reqAddress = reqAddress
self.__subAddress = subAddress
self.__context = zmq.Context()
self.__socketREQ = self.__context.socket(zmq.REQ) # 请求发出socket
self.__socketSUB = self.__context.socket(zmq.SUB) # 广播订阅socket
# 工作线程相关,用于处理服务器推送的数据
self.__active = False # 客户端的工作状态
self.__thread = threading.Thread(target=self.run) # 客户端的工作线程
#----------------------------------------------------------------------
def __getattr__(self, name):
"""实现远程调用功能"""
# 执行远程调用任务
def dorpc(*args, **kwargs):
# 生成请求
req = [name, args, kwargs]
# 序列化打包请求
reqb = self.pack(req)
# 发送请求并等待回应
self.__socketREQ.send(reqb)
repb = self.__socketREQ.recv()
# 序列化解包回应
rep = self.unpack(repb)
# 若正常则返回结果,调用失败则触发异常
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
#----------------------------------------------------------------------
def start(self):
"""启动客户端"""
# 连接端口
self.__socketREQ.connect(self.__reqAddress)
self.__socketSUB.connect(self.__subAddress)
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止客户端"""
# 将客户端设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""客户端运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketSUB.poll(1000):
continue
# 从订阅socket收取广播数据
topic, datab = self.__socketSUB.recv_multipart()
# 序列化解包
data = self.unpack(datab)
# 调用回调函数处理
self.callback(topic, data)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数,必须由用户实现"""
raise NotImplementedError
#----------------------------------------------------------------------
def subscribeTopic(self, topic):
"""
订阅特定主题的广播数据
可以使用topic=''来订阅所有的主题
注意topic必须是ascii编码
"""
self.__socketSUB.setsockopt(zmq.SUBSCRIBE, topic)
########################################################################
class RemoteException(Exception):
"""RPC远程异常"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
self.__value = value
#----------------------------------------------------------------------
def __str__(self):
"""输出错误信息"""
return self.__value
|
flipcam.py
|
import cv2
import time
import threading
import queue
import fire
class StreamCameraReader:
def __init__(self, camera_id):
self._stream_buffer = queue.LifoQueue(1)
self._camera_id = camera_id
self.stop_flag = False
def start(self):
def read_frames():
cap = cv2.VideoCapture(self._camera_id)
succeed = True
while not self.stop_flag and succeed:
succeed, frame = cap.read()
if self._stream_buffer.full():
self._stream_buffer.get()
self._stream_buffer.put(frame)
cap.release()
self._worker = threading.Thread(target=read_frames)
self._worker.start()
def get_frame(self):
return self._stream_buffer.get()
def stop(self):
self.stop_flag = True
def join(self):
self._worker.join()
def show_flipped(camera, interval, pct):
camera = int(camera)
interval = float(interval)
pct = float(pct)
camera_reader = StreamCameraReader(camera)
camera_reader.start()
dt0, dt1, dt2 = 0, 0, 0
i = 0
while True:
t_epoch = time.time()
t0 = time.time()
# Capture frame-by-frame
frame = camera_reader.get_frame()
dt0 += time.time() - t0
t0 = time.time()
# downsampling
H0, W0, _ = frame.shape
new_size = (int(W0 * pct), int(H0 * pct))
frame = cv2.resize(frame, new_size)
# flip the first dimension
frame_f = frame[::-1, ::-1, :]
dt1 += time.time() - t0
t0 = time.time()
# Display the resulting frame
cv2.imshow('WebCam Stream', frame_f)
dt2 += time.time() - t0
t0 = time.time()
dt = time.time() - t_epoch
residual_interval_ms = int(max(interval - dt * 1000, 1))
fps = 1 / (residual_interval_ms / 1000 + dt)
i += 1
key = cv2.waitKey(residual_interval_ms) & 0xFF
if key == ord('q'):
camera_reader.stop()
break
if key == ord('c'):
cv2.imwrite('sample.jpg', frame)
print('Image saved')
print('FPS=%.2f' % fps)
print('Report of time cost')
print('Frame capture: %.2fms' % (dt0 / i * 1000))
print('Fliping: %.2fms' % (dt1 / i * 1000))
print('Frame display %.2fms' % (dt2 / i * 1000))
cv2.destroyAllWindows()
camera_reader.join()
class Main:
def show_flipped(self, camera=0, interval=10, pct=0.5):
# Show flipped camera stream
# camera: Camera ID, an integer, default is 0
# interval: Minimal interval between frame, in milliseconds,
# default is 50
# pct: Downsampling rate (for faster display speed)
show_flipped(camera, interval, pct)
def capture_sample(self, camera=0, output='sample.jpg'):
# Capture an single image
camera_reader = StreamCameraReader(camera)
camera_reader.start()
frame = camera_reader.get_frame()
cv2.imwrite(output, frame)
camera_reader.stop()
camera_reader.join()
if __name__ == '__main__':
fire.Fire(Main)
|
monitor_all_redis.py
|
import datetime
import threading
import redis
import config
class Monitor():
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = None
def __del__(self):
try:
self.reset()
except:
pass
def reset(self):
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def monitor(self):
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'monitor', None)
self.connection.send_command("monitor")
return self.listen()
def parse_response(self):
return self.connection.read_response()
def listen(self):
while True:
yield self.parse_response()
def run_monitor(address):
host, port = address.split(':')
pool = redis.ConnectionPool(host=host, port=port)
monitor = Monitor(pool)
commands = monitor.monitor()
for c in commands:
print(address, datetime.datetime.now(), c)
# Need to put this in your /etc/hosts
# 127.0.0.1 redis6000
# 127.0.0.1 redis6001
# ...
if __name__ == '__main__':
redis_ports = config.DOCKER_COMPOSE_CONFIG['redis_ports']
redis_addresses = ['redis{}:{}'.format(p, p) for p in redis_ports]
for i in range(0, len(redis_addresses)):
# Python variable is a name. Need to use the whole array instead of a shared variable.
threading.Thread(target=lambda: run_monitor(redis_addresses[i])).start()
|
__init__.py
|
import inspect
import socket
import json
import requests
import multiprocessing
__version__ = "0.0.6"
def get_class_that_defined_method(method):
"""
Returns the class that defined the method
Got implementation from stackoverflow
http://stackoverflow.com/a/25959545/3903832
"""
# for bound methods
if inspect.ismethod(method):
for cls in inspect.getmro(method.__self__.__class__):
if cls.__dict__.get(method.__name__) is method:
return cls.__name__
method = method.__func__ # fallback to __qualname__ parsing
# for unbound methods
if inspect.isfunction(method):
cls = getattr(inspect.getmodule(method),
method.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls.__name__
def convert_functions_in_dict_to_values(dict_to_convert):
"""
When passed a dictionary that contains functions as some of its
values, it converts them to their responses
"""
return {key: value() if hasattr(value, '__call__') else value for key, value in dict_to_convert.items()}
def send_report(report_server_url, data_as_dict={}, headers={}):
"""
Makes a POST request to the report server. Ideally,
the server should be able to upsert the old record
because this POST request will be made every time the function is run
"""
processed_data = convert_functions_in_dict_to_values(data_as_dict)
response = requests.post(report_server_url, data=json.dumps(processed_data),
headers={'Content-Type': 'application/json', **headers})
if not response.ok:
raise requests.exceptions.HTTPError('Sending report failed. \nresponse:\n %s' % response.reason)
def external_function_monitor(report_server_url, headers={}, **data):
def decorator(function):
def wrapper(*args, **kwargs):
"""the wrapper function"""
function_name = function.__name__
class_name = get_class_that_defined_method(function)
if class_name:
function_name = '%s.%s' %(class_name, function_name)
host_name = socket.gethostname()
report_data = {'function_name': function_name, 'host_name': host_name, **data}
# send_report(report_server_url, data_as_dict=report_data)
reporting = multiprocessing.Process(target=send_report, args=(report_server_url,),
kwargs={'data_as_dict': report_data, 'headers': headers})
reporting.start()
return function(*args, **kwargs)
return wrapper
return decorator
|
task.py
|
""" Backend task management support """
import itertools
import logging
import os
import re
from enum import Enum
from tempfile import gettempdir
from multiprocessing import RLock
from threading import Thread
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from collections import OrderedDict
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...backend_interface.task.development.worker import DevWorker
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from pathlib2 import Path
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ..base import IdObjectBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import make_message, get_or_create_project, get_single_result, \
exact_match_regex
from ...config import get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend, \
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR
from ...debugging import get_logger
from ...debugging.log import LoggerRoot
from ...storage import StorageHelper
from ...storage.helper import StorageError
from .access import AccessMixin
from .log import TaskHandler
from .repo import ScriptInfo
from ...config import config, PROC_MASTER_ID_ENV_VAR
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
_store_diff = config.get('development.store_uncommitted_code_diff', False)
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
training = 'training'
testing = 'testing'
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type project_name: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self._reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = (
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
)
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
else:
# this is an existing task, let's try to verify stuff
self._validate()
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = log_to_backend
self._setup_log(default_log_to_backend=log_to_backend)
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
return
# Remove all handlers, we'll add new ones
for logger, handler in handlers.items():
logger.removeHandler(handler)
# Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
# than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
# handler instance handled them)
backend_handler = TaskHandler(self.session, self.task_id)
# Add backend handler to both loggers:
# 1. to root logger root logger
# 2. to our own logger as well, since our logger is not propagated to the root logger
# (if we propagate our logger will be caught be the root handlers as well, and
# we do not want that)
for logger in loggers:
logger.addHandler(backend_handler)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
else:
StorageHelper._test_bucket_config(conf=conf, log=self.log, raise_on_error=raise_errors)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version:
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'TRAINS new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'TRAINS new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
log=self.log, create_requirements=False, check_uncommitted=self._store_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
self.data.script = result.script
# Since we might run asynchronously, don't use self.data (lest someone else
# overwrite it before we have a chance to call edit)
self._edit(script=result.script)
self.reload()
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
self.reload()
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s by %(user)s@%(host)s')
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
self._set_storage_uri(value)
@property
def task_id(self):
return self.id
@property
def name(self):
return self.data.name or ''
@name.setter
def name(self, value):
self.set_name(value)
@property
def task_type(self):
return self.data.type
@property
def project(self):
return self.data.project
@property
def parent(self):
return self.data.parent
@property
def input_model_id(self):
return self.data.execution.model
@property
def output_model_id(self):
return self.data.output.model
@property
def comment(self):
return self.data.comment or ''
@comment.setter
def comment(self, value):
self.set_comment(value)
@property
def cache_dir(self):
""" Cache dir used to store task related files """
return Path(get_cache_dir()) / self.id
@property
def status(self):
""" The task's status. In order to stay updated, we always reload the task info when this value is accessed. """
self.reload()
return self._status
@property
def _status(self):
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
return self._get_output_model(upload_required=False, force=True)
def _get_output_model(self, upload_required=True, force=False):
return Model(
session=self.session,
model_id=None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def reporter(self):
"""
Returns a simple metrics reporter instance
"""
if self._reporter is None:
self._setup_reporter()
return self._reporter
def _get_metrics_manager(self, storage_uri):
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task_id=self.id,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self._reporter
def _get_output_destination_suffix(self, extra_path=None):
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
""" Reload the task object from the backend """
with self._edit_lock:
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
self.reload()
def started(self, ignore_errors=True):
""" Signal that this task has started """
return self.send(tasks.StartedRequest(self.id), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True):
""" Signal that this task has stopped """
return self.send(tasks.StoppedRequest(self.id), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
""" Signal that this task has been completed """
if hasattr(tasks, 'CompletedRequest'):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
""" Signal that this task has stopped """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
""" Signal that this task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def update_model_desc(self, new_model_desc_file=None):
""" Change the task's model_desc """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
"""
Update the task's output model.
Note that this method only updates the model's metadata using the API and does not upload any data. Use this
method to update the output model when you have a local model URI (e.g. storing the weights file locally and
providing a file://path/to/file URI)
:param model_uri: URI for the updated model weights file
:type model_uri: str
:param name: Optional updated model name
:type name: str
:param comment: Optional updated model description
:type comment: str
:param tags: Optional updated model tags
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags)
def update_output_model_and_upload(
self, model_file, name=None, comment=None, tags=None, async_enable=False, cb=None, iteration=None):
"""
Update the task's output model weights file. File is first uploaded to the preconfigured output destination (see
task's output.destination property or call setup_upload()), than the model object associated with the task is
updated using an API call with the URI of the uploaded file (and other values provided by additional arguments)
:param model_file: Path to the updated model weights file
:type model_file: str
:param name: Optional updated model name
:type name: str
:param comment: Optional updated model description
:type comment: str
:param tags: Optional updated model tags
:type tags: [str]
:param async_enable: Request asynchronous upload. If False, the call blocks until upload is completed and the
API call updating the model returns. If True, the call returns immediately, while upload and update are
scheduled in another thread. Default is False.
:type async_enable: bool
:param cb: Asynchronous callback. If async=True, this callback will be invoked once the asynchronous upload and
update have completed.
:return: The URI of the uploaded weights file. If async=True, this is the expected URI as the upload is
probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
"""
Set a new input model for this task. Model must be 'ready' in order to be used as the Task's input model.
:param model_id: ID for a model that exists in the backend. Required if model_name is not provided.
:param model_name: Model name. Required if model_id is not provided. If provided, this name will be used to
locate an existing model in the backend.
:param update_task_design: if True, the task's model design will be copied from the input model
:param update_task_labels: if True, the task's label enumeration will be copied from the input model
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
self.reload()
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def set_parameters(self, *args, **kwargs):
"""
Set parameters for this task. This allows setting a complete set of key/value parameters, but does not support
parameter descriptions (as the input is a dictionary or key/value pairs.
:param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into
a single key/value dictionary.
:param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`.
"""
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
update = kwargs.pop('__update', False)
with self._edit_lock:
self.reload()
if update:
parameters = self.get_parameters()
else:
parameters = dict()
parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
parameters.update(kwargs)
not_allowed = {
k: type(v).__name__
for k, v in parameters.items()
if not isinstance(v, self._parameters_allowed_types)
}
if not_allowed:
raise ValueError(
"Only builtin types ({}) are allowed for values (got {})".format(
', '.join(t.__name__ for t in self._parameters_allowed_types),
', '.join('%s=>%s' % p for p in not_allowed.items())),
)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(parameters=parameters)
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None):
"""
Set a single task parameter. This overrides any previous value for this parameter.
:param name: Parameter name
:param value: Parameter value
:param description: Parameter description (unused for now)
"""
self.set_parameters({name: value}, __update=True)
def get_parameter(self, name, default=None):
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: Parameter value (or default value if parameter is not defined)
"""
params = self.get_parameters()
return params.get(name, default)
def update_parameters(self, *args, **kwargs):
"""
Update parameters for this task.
This allows updating a complete set of key/value parameters,but does not support
parameter descriptions (as the input is a dictionary or key/value pairs.
:param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into
a single key/value dictionary.
:param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`.
"""
self.set_parameters(__update=True, *args, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
"""
Set the base docker image for this experiment
If provided, this value will be used by trains-agent to execute this experiment
inside the provided docker image.
"""
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
"""Get the base docker command (image) set for this experiment"""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
with self._edit_lock:
self.reload()
execution = self.data.execution
if design is not None:
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
"""
Return a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:return: dict
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
"""
Returns the model configuration as blob of text
:return:
"""
design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# fixed seed for the time being
pass
def set_project(self, project_id):
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
if self.project is None:
return None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
return self._get_task_property("tags")
def set_system_tags(self, tags):
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_tags(self, tags):
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
"""
Set a comment text to the task.
:param name: The name of the task
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_comment(self, comment):
"""
Set a comment text to the task.
:param comment: The comment of the task
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def set_initial_iteration(self, offset=0):
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: newly set initial offset
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
"""
Return the initial iteration offset, default is 0.
Useful when continuing training from previous checkpoints.
:return int: initial iteration offset
"""
return self._initial_iteration_offset
def _get_models(self, model_type='output'):
model_type = model_type.lower().strip()
assert model_type == 'output' or model_type == 'input'
if model_type == 'input':
regex = '((?i)(Using model id: )(\w+)?)'
compiled = re.compile(regex)
ids = [i[-1] for i in re.findall(compiled, self.comment)] + (
[self.input_model_id] if self.input_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
in_model = []
for i in ids:
m = TrainsModel(model_id=i)
try:
# make sure the model is is valid
m._get_model_data()
in_model.append(m)
except:
pass
return in_model
else:
res = self.send(
models.GetAllRequest(
task=[self.id],
order_by=['created'],
only_fields=['id']
)
)
if not res.response.models:
return []
ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
return [TrainsModel(model_id=i) for i in ids]
def _get_default_report_storage_uri(self):
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _reload_last_iteration(self):
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
@classmethod
def _get_api_server(cls):
return Session.get_api_server_host()
def _get_app_server(self):
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _edit(self, **kwargs):
with self._edit_lock:
# Since we ae using forced update, make sure he task status is valid
if not self._data or (str(self.data.status) not in (str(tasks.TaskStatusEnum.created),
str(tasks.TaskStatusEnum.in_progress))):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
try:
self.data.script.requirements = requirements
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
self.data.script = script
self._edit(script=script)
@classmethod
def _clone_task(cls, cloned_task_id, name=None, comment=None, execution_overrides=None,
tags=None, parent=None, project=None, log=None, session=None):
"""
Clone a task
:param cloned_task_id: Task ID for the task to be cloned
:type cloned_task_id: str
:param name: New for the new task
:type name: str
:param comment: Optional comment for the new task
:type comment: str
:param execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:type execution_overrides: dict
:param tags: Optional updated model tags
:type tags: [str]
:param parent: Optional parent Task ID of the new task.
:type parent: str
:param project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:type project: str
:param log: Log object used by the infrastructure.
:type log: logging.Logger
:param session: Session object used for sending requests to the API
:type session: Session
:return: The new tasks's ID
"""
session = session if session else cls._get_default_session()
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment or task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
"""
List all tasks based on specific projection
:param session: Session object used for sending requests to the API
:type session: Session
:param log: Log object
:type log: logging.Logger
:param kwargs: Keyword args passed to the GetAllRequest (see .backend_api.services.tasks.GetAllRequest)
Example: status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: API response
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
def _get_all_events(self, max_events=100):
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:return: A list of events from the task.
"""
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
))
events_list = log_events.response.events
total_events = log_events.response.total
scroll = log_events.response.scroll_id
while len(events_list) < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
scroll_id=scroll,
))
events_list.extend(log_events.response.events)
scroll = log_events.response.scroll_id
return events_list
@property
def _edit_lock(self):
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
# remove previous file lock instance, just in case.
filename = os.path.join(gettempdir(), 'trains_{}.lock'.format(self.id))
try:
os.unlink(filename)
except Exception:
pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[1]
@classmethod
def __is_subprocess(cls):
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.bitcoin import is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum ({name})".format(name=constants.net.NAME)
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend {name}s with it.".format(name=constants.net.NAME)),
_("Make sure you own the seed phrase or the private keys, before you request {name}s to be sent to this wallet.".format(name=constants.net.NAME))
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main {name}s network. It is used for testing.").format(name=constants.net.NAME)
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your SSL certificate file", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.lnworker and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('%s:%s?message=donation for %s'%(constants.net.PAYMENT_URI_SCHEME, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum ({name})".format(name=constants.net.NAME),
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying {name}.").format(name=constants.net.NAME) + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the {name} system.").format(name=constants.net.NAME) + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum ({name})".format(name=constants.net.NAME), message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum ({name})".format(name=constants.net.NAME), message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * constants.net.COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / constants.net.COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding {name} addresses.').format(name=constants.net.NAME),
_('The {name_lower} address never expires and will always be part of this electrum wallet.').format(name_lower=constants.net.NAME_LOWER),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Request'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('On-chain'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a {name} address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a {name} address)').format(
name=constants.net.NAME
)
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('{name} Address is None').format(name=constants.net.NAME))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, amount_sat: int):
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts=attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key):
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, key, description=None):
self.show_message(_('Payment succeeded'))
self.need_update.set()
def on_payment_failed(self, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.amount is None:
amount = self.amount_e.get_amount()
if amount:
invoice.amount = amount
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice):
if invoice.type == PR_TYPE_LN:
self.pay_lightning_invoice(invoice.invoice, invoice.amount)
elif invoice.type == PR_TYPE_ONCHAIN:
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * constants.net.COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.amount) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
grid.addWidget(QLabel(lnaddr.paymenthash.hex()), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit()
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if not self.network.is_lightning_running():
return
cur, total = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_percent = 0
progress_str = "??%"
if cur is not None and total is not None and total > 0:
# note: Progress is rescaled such that 95% is considered "done".
# "Real" progress can stay around 98-99% for a long time, which
# might needlessly worry users.
progress_percent = (1.0 / 0.95 * cur / total) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
progress_str = f"{progress_percent}%"
if progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
labels_clayout.selected_index()
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_show_xpub = run_hook('show_xpub_button', self, dialog, labels_clayout)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_show_xpub, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid {name} address.').format(name=constants.net.NAME))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid {name} address.').format(name=constants.net.NAME))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("{scheme}:".format(constants.net.PAYMENT_URI_SCHEME)):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data[15:])
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
example.py
|
import threading
import time
from dmxnet import ESP
node = ESP(bind_address=('', 1234), node_data='foobar')
client = ESP(send_port=1234)
def mk_handle_poll_reply(name):
def handle_poll_reply(addr, type_, args, data):
print(f"[{name}] Poll reply from {addr}: {args}, {data}")
return handle_poll_reply
def mk_handle_dmx(name):
def handle_dmx(universe, start_addr, channels):
print(f"[{name}] DMX universe {universe}@{start_addr}: {channels}")
return handle_dmx
def mk_handle_ack(name):
def handle_ack(addr, type_, args, data):
print(f"[{name}] ACK from {addr}: {args}")
return handle_ack
def mk_handle_reset(name):
def handle_reset(addr, type_, args, data):
print(f"[{name}] RESET from {addr}: {args}")
return handle_reset
stop = threading.Event()
def run():
try:
while not stop.is_set():
node.process_packet(poll_reply_cb=mk_handle_poll_reply('NODE'), dmx_cb=mk_handle_dmx('NODE'), ack_cb=mk_handle_ack('NODE'), reset_cb=mk_handle_reset('NODE'))
client.process_packet(poll_reply_cb=mk_handle_poll_reply('CLIENT'), dmx_cb=mk_handle_dmx('CLIENT'), ack_cb=mk_handle_ack('CLIENT'), reset_cb=mk_handle_reset('CLIENT'))
finally:
node.close()
client.close()
t = threading.Thread(target=run)
t.start()
client.send_poll()
for i in range(4):
for chan in range(512):
client.set_channel(chan + 1, 0)
for chan in range(i * 128, (i * 128) + 128):
client.set_channel(chan + 1, 255)
client.send_dmx(universe=i)
time.sleep(1)
stop.set()
while t.is_alive():
pass
# import time
# t = time.time()
# while True:
# try:
# node.process_packet(poll_reply_cb=handle_poll_reply, dmx_cb=handle_dmx)
# except BlockingIOError:
# if time.time() - t >= 3:
# raise
# def set_channel(self, chan, level):
# # Note that the channel is 1-512, not 0-indexed
# self.dmx_data[chan - 1] = level
# def send_poll(self, *, address=None, reply_type=None):
# if reply_type is None:
# reply_type = self.REPLY_FULL
# return self._send('POLL', address=address, reply_type=reply_type)
# def send_poll_reply(self, *, address=None, serial_number=None, node_type=None, node_version=None, switches=0, name=None, option=0, tos=0, ttl=10, node_data=None):
# return self._send(
# 'POLL_REPLY',
# data=node_data or self.node_data,
# mac=serial_number or self.serial_number,
# node_type=node_type or self.node_type,
# version=node_version or self.node_version,
# switches=switches,
# name=name or self.name,
# option=option,
# tos=tos,
# ttl=ttl
# )
# def send_dmx(self, *, address=None, universe=None):
# data = bytes(bytearray(self.dmx_data))
# return self._send(
# 'DMX',
# address=address,
# data=data,
# universe=self.universe if universe is None else universe,
# start_code=0,
# data_type=1,
# data_size=len(data)
# )
# def send_ack(self, *, address=None, ack_err=None, crc=None):
# if ack_err is None:
# if crc is None:
# status = 255
# else:
# status = 0
# else:
# status = ack_err
# return self._send('ACK', address=address, status=status, crc=crc or 0)
# def send_reset(self, *, address=None, serial_number=None):
# return self._send('RESET', address=address, mac=serial_number or self.serial_number)
# def process_packet(self, *, poll_cb=None, poll_reply_cb=None, ack_cb=None, dmx_cb=None, reset_cb=None):
# addr, type_, args, crc = self._recv()
# if type_ is None:
# return
# self.send_ack(address=addr[0], crc=crc)
# if type_ == 'POLL':
# if poll_cb:
# poll_cb(addr, type_, args, crc)
# else:
# self.poll_reply(address=addr[0])
# elif type_ == 'POLL_REPLY':
# if poll_reply_cb:
# poll_reply_cb(addr, type_, args, crc)
# elif type_ == 'ACK':
# if ack_cb:
# ack_cb(addr, type_, args, crc)
# elif type_ == 'DMX':
# if dmx_cb:
# dmx_cb(args['universe'], args['start_code'], list(map(ord, args['data'])))
# elif type_ == 'RESET':
# if reset_cb:
# reset_cb(addr, type_, args, crc)
# elif dmx_cb:
# dmx_cb(None, 0, [self.default_level] * 512)
|
linkcheck.py
|
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import json
import queue
import re
import socket
import threading
import time
import warnings
from datetime import datetime, timezone
from email.utils import parsedate_to_datetime
from html.parser import HTMLParser
from os import path
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, cast
from urllib.parse import unquote, urlparse
from docutils import nodes
from docutils.nodes import Element
from requests import Response
from requests.exceptions import HTTPError, TooManyRedirects
from sphinx.application import Sphinx
from sphinx.builders.dummy import DummyBuilder
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.locale import __
from sphinx.transforms.post_transforms import SphinxPostTransform
from sphinx.util import encode_uri, logging, requests
from sphinx.util.console import darkgray, darkgreen, purple, red, turquoise # type: ignore
from sphinx.util.nodes import get_node_line
logger = logging.getLogger(__name__)
uri_re = re.compile('([a-z]+:)?//') # matches to foo:// and // (a protocol relative URL)
Hyperlink = NamedTuple('Hyperlink', (('next_check', float),
('uri', Optional[str]),
('docname', Optional[str]),
('lineno', Optional[int])))
RateLimit = NamedTuple('RateLimit', (('delay', float), ('next_check', float)))
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8',
}
CHECK_IMMEDIATELY = 0
QUEUE_POLL_SECS = 1
DEFAULT_DELAY = 60.0
def node_line_or_0(node: Element) -> int:
"""
PriorityQueue items must be comparable. The line number is part of the
tuple used by the PriorityQueue, keep an homogeneous type for comparison.
"""
warnings.warn('node_line_or_0() is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
return get_node_line(node) or 0
class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor: str) -> None:
super().__init__()
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag: Any, attrs: Any) -> None:
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
break
def check_anchor(response: requests.requests.Response, anchor: str) -> bool:
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
if isinstance(chunk, bytes): # requests failed to decode
chunk = chunk.decode() # manually try to decode it
parser.feed(chunk)
if parser.found:
break
parser.close()
return parser.found
class CheckExternalLinksBuilder(DummyBuilder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
epilog = __('Look for any errors in the above output or in '
'%(outdir)s/output.txt')
def init(self) -> None:
self.hyperlinks = {} # type: Dict[str, Hyperlink]
self.to_ignore = [re.compile(x) for x in self.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.config.linkcheck_anchors_ignore]
self.auth = [(re.compile(pattern), auth_info) for pattern, auth_info
in self.config.linkcheck_auth]
self._good = set() # type: Set[str]
self._broken = {} # type: Dict[str, str]
self._redirected = {} # type: Dict[str, Tuple[str, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create queues and worker threads
self.rate_limits = {} # type: Dict[str, RateLimit]
self.wqueue = queue.PriorityQueue() # type: queue.PriorityQueue
self.rqueue = queue.Queue() # type: queue.Queue
self.workers = [] # type: List[threading.Thread]
for i in range(self.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread, daemon=True)
thread.start()
self.workers.append(thread)
def is_ignored_uri(self, uri: str) -> bool:
return any(pat.match(uri) for pat in self.to_ignore)
@property
def good(self) -> Set[str]:
warnings.warn(
"%s.%s is deprecated." % (self.__class__.__name__, "good"),
RemovedInSphinx50Warning,
stacklevel=2,
)
return self._good
@property
def broken(self) -> Dict[str, str]:
warnings.warn(
"%s.%s is deprecated." % (self.__class__.__name__, "broken"),
RemovedInSphinx50Warning,
stacklevel=2,
)
return self._broken
@property
def redirected(self) -> Dict[str, Tuple[str, int]]:
warnings.warn(
"%s.%s is deprecated." % (self.__class__.__name__, "redirected"),
RemovedInSphinx50Warning,
stacklevel=2,
)
return self._redirected
def check_thread(self) -> None:
kwargs = {}
if self.config.linkcheck_timeout:
kwargs['timeout'] = self.config.linkcheck_timeout
def get_request_headers() -> Dict:
url = urlparse(uri)
candidates = ["%s://%s" % (url.scheme, url.netloc),
"%s://%s/" % (url.scheme, url.netloc),
uri,
"*"]
for u in candidates:
if u in self.config.linkcheck_request_headers:
headers = dict(DEFAULT_REQUEST_HEADERS)
headers.update(self.config.linkcheck_request_headers[u])
return headers
return {}
def check_uri() -> Tuple[str, str, int]:
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = None
break
else:
req_url = uri
anchor = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
# Get auth info, if any
for pattern, auth_info in self.auth:
if pattern.match(uri):
break
else:
auth_info = None
# update request headers for the URL
kwargs['headers'] = get_request_headers()
try:
if anchor and self.config.linkcheck_anchors:
# Read the whole document and see if #anchor exists
response = requests.get(req_url, stream=True, config=self.config,
auth=auth_info, **kwargs)
response.raise_for_status()
found = check_anchor(response, unquote(anchor))
if not found:
raise Exception(__("Anchor '%s' not found") % anchor)
else:
try:
# try a HEAD request first, which should be easier on
# the server and the network
response = requests.head(req_url, allow_redirects=True,
config=self.config, auth=auth_info,
**kwargs)
response.raise_for_status()
except (HTTPError, TooManyRedirects) as err:
if isinstance(err, HTTPError) and err.response.status_code == 429:
raise
# retry with GET request if that fails, some servers
# don't like HEAD requests.
response = requests.get(req_url, stream=True,
config=self.config,
auth=auth_info, **kwargs)
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401:
# We'll take "Unauthorized" as working.
return 'working', ' - unauthorized', 0
elif err.response.status_code == 429:
next_check = self.limit_rate(err.response)
if next_check is not None:
self.wqueue.put((next_check, uri, docname, lineno), False)
return 'rate-limited', '', 0
return 'broken', str(err), 0
elif err.response.status_code == 503:
# We'll take "Service Unavailable" as ignored.
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
except Exception as err:
return 'broken', str(err), 0
else:
netloc = urlparse(req_url).netloc
try:
del self.rate_limits[netloc]
except KeyError:
pass
if response.url.rstrip('/') == req_url.rstrip('/'):
return 'working', '', 0
else:
new_url = response.url
if anchor:
new_url += '#' + anchor
# history contains any redirects, get last
if response.history:
code = response.history[-1].status_code
return 'redirected', new_url, code
else:
return 'redirected', new_url, 0
def check(docname: str) -> Tuple[str, str, int]:
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'tel:')):
return 'unchecked', '', 0
elif not uri.startswith(('http:', 'https:')):
if uri_re.match(uri):
# non supported URI schemes (ex. ftp)
return 'unchecked', '', 0
else:
srcdir = path.dirname(self.env.doc2path(docname))
if path.exists(path.join(srcdir, uri)):
return 'working', '', 0
else:
self._broken[uri] = ''
return 'broken', '', 0
elif uri in self._good:
return 'working', 'old', 0
elif uri in self._broken:
return 'broken', self._broken[uri], 0
elif uri in self._redirected:
return 'redirected', self._redirected[uri][0], self._redirected[uri][1]
# need to actually check the URI
for _ in range(self.config.linkcheck_retries):
status, info, code = check_uri()
if status != "broken":
break
if status == "working":
self._good.add(uri)
elif status == "broken":
self._broken[uri] = info
elif status == "redirected":
self._redirected[uri] = (info, code)
return (status, info, code)
while True:
next_check, uri, docname, lineno = self.wqueue.get()
if uri is None:
break
netloc = urlparse(uri).netloc
try:
# Refresh rate limit.
# When there are many links in the queue, workers are all stuck waiting
# for responses, but the builder keeps queuing. Links in the queue may
# have been queued before rate limits were discovered.
next_check = self.rate_limits[netloc].next_check
except KeyError:
pass
if next_check > time.time():
# Sleep before putting message back in the queue to avoid
# waking up other threads.
time.sleep(QUEUE_POLL_SECS)
self.wqueue.put((next_check, uri, docname, lineno), False)
self.wqueue.task_done()
continue
status, info, code = check(docname)
if status == 'rate-limited':
logger.info(darkgray('-rate limited- ') + uri + darkgray(' | sleeping...'))
else:
self.rqueue.put((uri, docname, lineno, status, info, code))
self.wqueue.task_done()
def limit_rate(self, response: Response) -> Optional[float]:
next_check = None
retry_after = response.headers.get("Retry-After")
if retry_after:
try:
# Integer: time to wait before next attempt.
delay = float(retry_after)
except ValueError:
try:
# An HTTP-date: time of next attempt.
until = parsedate_to_datetime(retry_after)
except (TypeError, ValueError):
# TypeError: Invalid date format.
# ValueError: Invalid date, e.g. Oct 52th.
pass
else:
next_check = datetime.timestamp(until)
delay = (until - datetime.now(timezone.utc)).total_seconds()
else:
next_check = time.time() + delay
netloc = urlparse(response.url).netloc
if next_check is None:
max_delay = self.config.linkcheck_rate_limit_timeout
try:
rate_limit = self.rate_limits[netloc]
except KeyError:
delay = DEFAULT_DELAY
else:
last_wait_time = rate_limit.delay
delay = 2.0 * last_wait_time
if delay > max_delay and last_wait_time < max_delay:
delay = max_delay
if delay > max_delay:
return None
next_check = time.time() + delay
self.rate_limits[netloc] = RateLimit(delay, next_check)
return next_check
def process_result(self, result: Tuple[str, str, int, str, str, int]) -> None:
uri, docname, lineno, status, info, code = result
filename = self.env.doc2path(docname, None)
linkstat = dict(filename=filename, lineno=lineno,
status=status, code=code, uri=uri,
info=info)
if status == 'unchecked':
self.write_linkstat(linkstat)
return
if status == 'working' and info == 'old':
self.write_linkstat(linkstat)
return
if lineno:
logger.info('(%16s: line %4d) ', docname, lineno, nonl=True)
if status == 'ignored':
if info:
logger.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
logger.info(darkgray('-ignored- ') + uri)
self.write_linkstat(linkstat)
elif status == 'local':
logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, filename, lineno, uri)
self.write_linkstat(linkstat)
elif status == 'working':
logger.info(darkgreen('ok ') + uri + info)
self.write_linkstat(linkstat)
elif status == 'broken':
if self.app.quiet or self.app.warningiserror:
logger.warning(__('broken link: %s (%s)'), uri, info,
location=(filename, lineno))
else:
logger.info(red('broken ') + uri + red(' - ' + info))
self.write_entry('broken', docname, filename, lineno, uri + ': ' + info)
self.write_linkstat(linkstat)
elif status == 'redirected':
try:
text, color = {
301: ('permanently', purple),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
308: ('permanently', purple),
}[code]
except KeyError:
text, color = ('with unknown code', purple)
linkstat['text'] = text
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
self.write_entry('redirected ' + text, docname, filename,
lineno, uri + ' to ' + info)
self.write_linkstat(linkstat)
else:
raise ValueError("Unknown status %s." % status)
def write_entry(self, what: str, docname: str, filename: str, line: int,
uri: str) -> None:
self.txt_outfile.write("%s:%s: [%s] %s\n" % (filename, line, what, uri))
def write_linkstat(self, data: dict) -> None:
self.json_outfile.write(json.dumps(data))
self.json_outfile.write('\n')
def finish(self) -> None:
logger.info('')
with open(path.join(self.outdir, 'output.txt'), 'w') as self.txt_outfile,\
open(path.join(self.outdir, 'output.json'), 'w') as self.json_outfile:
total_links = 0
for hyperlink in self.hyperlinks.values():
if self.is_ignored_uri(hyperlink.uri):
self.process_result((hyperlink.uri, hyperlink.docname, hyperlink.lineno,
'ignored', '', 0))
else:
self.wqueue.put(hyperlink, False)
total_links += 1
done = 0
while done < total_links:
self.process_result(self.rqueue.get())
done += 1
if self._broken:
self.app.statuscode = 1
self.wqueue.join()
# Shutdown threads.
for worker in self.workers:
self.wqueue.put((CHECK_IMMEDIATELY, None, None, None), False)
class HyperlinkCollector(SphinxPostTransform):
builders = ('linkcheck',)
default_priority = 800
def run(self, **kwargs: Any) -> None:
builder = cast(CheckExternalLinksBuilder, self.app.builder)
hyperlinks = builder.hyperlinks
# reference nodes
for refnode in self.document.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
uri = refnode['refuri']
lineno = get_node_line(refnode)
uri_info = Hyperlink(CHECK_IMMEDIATELY, uri, self.env.docname, lineno)
if uri not in hyperlinks:
hyperlinks[uri] = uri_info
# image nodes
for imgnode in self.document.traverse(nodes.image):
uri = imgnode['candidates'].get('?')
if uri and '://' in uri:
lineno = get_node_line(imgnode)
uri_info = Hyperlink(CHECK_IMMEDIATELY, uri, self.env.docname, lineno)
if uri not in hyperlinks:
hyperlinks[uri] = uri_info
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(CheckExternalLinksBuilder)
app.add_post_transform(HyperlinkCollector)
app.add_config_value('linkcheck_ignore', [], None)
app.add_config_value('linkcheck_auth', [], None)
app.add_config_value('linkcheck_request_headers', {}, None)
app.add_config_value('linkcheck_retries', 1, None)
app.add_config_value('linkcheck_timeout', None, None, [int])
app.add_config_value('linkcheck_workers', 5, None)
app.add_config_value('linkcheck_anchors', True, None)
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value('linkcheck_anchors_ignore', ["^!"], None)
app.add_config_value('linkcheck_rate_limit_timeout', 300.0, None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
worker_handlers.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code for communicating with the Workers."""
# mypy: disallow-untyped-defs
import collections
import contextlib
import copy
import logging
import queue
import subprocess
import sys
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import BinaryIO # pylint: disable=unused-import
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from typing import overload
import grpc
from apache_beam.io import filesystems
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner.execution import Buffer
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.utils import proto_utils
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from grpc import ServicerContext
from google.protobuf import message
from apache_beam.runners.portability.fn_api_runner.fn_runner import ExtendedProvisionInfo # pylint: disable=ungrouped-imports
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
# Time-based flush is enabled in the fn_api_runner by default.
DATA_BUFFER_TIME_LIMIT_MS = 1000
_LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
ConstructorFn = Callable[[
Union['message.Message', bytes],
'sdk_worker.StateHandler',
'ExtendedProvisionInfo',
'GrpcServer'
],
'WorkerHandler']
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
# type: () -> None
self._push_queue = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionRequest, Sentinel]]
self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]]
self._futures_by_id = dict() # type: Dict[str, ControlFuture]
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
# type: () -> None
assert self._input is not None
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
@overload
def push(self, req):
# type: (Sentinel) -> None
pass
@overload
def push(self, req):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
pass
def push(self, req):
# type: (Union[Sentinel, beam_fn_api_pb2.InstructionRequest]) -> Optional[ControlFuture]
if req is BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
# type: () -> Union[Sentinel, beam_fn_api_pb2.InstructionRequest]
return self._push_queue.get()
def set_input(self, input):
# type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
# type: () -> None
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
def abort(self, exn):
# type: (Exception) -> None
for future in self._futures_by_id.values():
future.abort(exn)
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = Sentinel.sentinel
def __init__(
self,
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self._worker_manager = worker_manager
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int) # type: DefaultDict[str, int]
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(
ControlConnection) # type: DefaultDict[str, ControlConnection]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> ControlConnection
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self,
iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse]
context # type: ServicerContext
):
# type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest]
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError(
'All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
# type: () -> None
self._state = self.DONE_STATE
_LOGGER.debug(
'Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
def GetProcessBundleDescriptor(self, id, context=None):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest, Any) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._worker_manager.get_process_bundle_descriptor(id)
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]]
_worker_id_counter = -1
_lock = threading.Lock()
control_conn = None # type: ControlConnection
data_conn = None # type: data_plane._GrpcDataChannel
def __init__(self,
control_handler, # type: Any
data_plane_handler, # type: Any
state, # type: sdk_worker.StateHandler
provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
# type: () -> None
self.stop_worker()
def start_worker(self):
# type: () -> None
raise NotImplementedError
def stop_worker(self):
# type: () -> None
raise NotImplementedError
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def logging_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
@classmethod
def register_environment(
cls,
urn, # type: str
payload_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Callable[[T, sdk_worker.StateHandler, ExtendedProvisionInfo, GrpcServer], WorkerHandler]], Callable[[T, sdk_worker.StateHandler, ExtendedProvisionInfo, GrpcServer], WorkerHandler]]
def wrapper(constructor):
# type: (Callable) -> Callable
cls._registered_environments[urn] = constructor, payload_type # type: ignore[assignment]
return constructor
return wrapper
@classmethod
def create(cls,
environment, # type: beam_runner_api_pb2.Environment
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> WorkerHandler
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
# This takes a WorkerHandlerManager instead of GrpcServer, so it is not
# compatible with WorkerHandler.register_environment. There is a special case
# in WorkerHandlerManager.get_worker_handlers() that allows it to work.
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None) # type: ignore[arg-type]
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self,
unused_payload, # type: None
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self # type: ignore # need Protocol to describe this
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.bundle_processor_cache = sdk_worker.BundleProcessorCache(
SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
worker_manager._process_bundle_descriptors)
self.worker = sdk_worker.SdkWorker(
self.bundle_processor_cache,
state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
# type: () -> None
pass
def stop_worker(self):
# type: () -> None
self.bundle_processor_cache.shutdown()
def done(self):
# type: () -> None
pass
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
# A fake endpoint is needed for properly constructing timer info map in
# bundle_processor for fnapi_runner.
return endpoints_pb2.ApiServiceDescriptor(url='fake')
def state_api_service_descriptor(self):
# type: () -> None
return None
def logging_api_service_descriptor(self):
# type: () -> None
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
# type: (Iterable[beam_fn_api_pb2.LogEntry.List], Any) -> Iterator[beam_fn_api_pb2.LogControl]
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
):
def __init__(self, base_info, worker_manager):
# type: (beam_provision_api_pb2.ProvisionInfo, WorkerHandlerManager) -> None
self._base_info = base_info
self._worker_manager = worker_manager
def GetProvisionInfo(self, request, context=None):
# type: (Any, Optional[ServicerContext]) -> beam_provision_api_pb2.GetProvisionInfoResponse
if context:
worker_id = dict(context.invocation_metadata())['worker_id']
worker = self._worker_manager.get_worker(worker_id)
info = copy.copy(worker.provision_info.provision_info)
info.logging_endpoint.CopyFrom(worker.logging_api_service_descriptor())
info.artifact_endpoint.CopyFrom(worker.artifact_api_service_descriptor())
info.control_endpoint.CopyFrom(worker.control_api_service_descriptor())
else:
info = self._base_info
return beam_provision_api_pb2.GetProvisionInfoResponse(info=info)
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self,
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer(worker_manager)
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(
self.provision_info.provision_info, worker_manager),
self.control_server)
def open_uncompressed(f):
# type: (str) -> BinaryIO
return filesystems.FileSystems.open(
f, compression_type=CompressionTypes.UNCOMPRESSED)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
file_reader=open_uncompressed),
self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer(
DATA_BUFFER_TIME_LIMIT_MS)
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
GrpcStateServicer(state), self.state_server)
self.logging_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(), self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
# type: () -> None
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self,
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler,
self._grpc_server.data_plane_handler,
state,
provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
# type: () -> None
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
# type: (int) -> str
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
# type: () -> str
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self,
external_payload, # type: beam_runner_api_pb2.ExternalPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(ExternalWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._external_payload = external_payload
def start_worker(self):
# type: () -> None
_LOGGER.info("Requesting worker at %s", self._external_payload.endpoint.url)
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
control_descriptor = endpoints_pb2.ApiServiceDescriptor(
url=self.control_address)
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=control_descriptor,
artifact_endpoint=control_descriptor,
provision_endpoint=control_descriptor,
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
# type: () -> None
pass
def host_from_worker(self):
# type: () -> str
# TODO(BEAM-8646): Reconcile across platforms.
if sys.platform in ['win32', 'darwin']:
return 'localhost'
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(EmbeddedGrpcWorkerHandler,
self).__init__(state, provision_info, grpc_server)
from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment
config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8'))
self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE
self._data_buffer_time_limit_ms = \
config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS
def start_worker(self):
# type: () -> None
self.worker = sdk_worker.SdkHarness(
self.control_address,
state_cache_size=self._state_cache_size,
data_buffer_time_limit_ms=self._data_buffer_time_limit_ms,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to Popen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
worker_command_line, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(SubprocessSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
# type: () -> None
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line,
self.control_address,
self.provision_info,
self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
@WorkerHandler.register_environment(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: beam_runner_api_pb2.DockerPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(DockerSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._container_image = payload.container_image
self._container_id = None # type: Optional[bytes]
def host_from_worker(self):
# type: () -> str
if sys.platform == 'darwin':
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
if sys.platform == 'linux' and is_in_notebook():
import socket
# Gets ipv4 address of current host. Note the host is not guaranteed to
# be localhost because the python SDK could be running within a container.
return socket.gethostbyname(socket.getfqdn())
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
# type: () -> None
with SUBPROCESS_LOCK:
try:
_LOGGER.info('Attempting to pull image %s', self._container_image)
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info(
'Unable to pull image %s, defaulting to local image if it exists' %
self._container_image)
self._container_id = subprocess.check_output([
'docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
assert self._container_id is not None
while True:
status = subprocess.check_output([
'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id
]).strip()
_LOGGER.info(
'Waiting for docker to start up. Current status is %s' %
status.decode('utf-8'))
if status == b'running':
_LOGGER.info(
'Docker container is running. container_id = %s, '
'worker_id = %s',
self._container_id,
self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call(['docker', 'container', 'logs', self._container_id])
raise RuntimeError(
'SDK failed to start. Final status is %s' %
status.decode('utf-8'))
time.sleep(1)
self._done = False
t = threading.Thread(target=self.watch_container)
t.daemon = True
t.start()
def watch_container(self):
# type: () -> None
while not self._done:
assert self._container_id is not None
status = subprocess.check_output(
['docker', 'inspect', '-f', '{{.State.Status}}',
self._container_id]).strip()
if status != b'running':
if not self._done:
logs = subprocess.check_output([
'docker', 'container', 'logs', '--tail', '10', self._container_id
],
stderr=subprocess.STDOUT)
_LOGGER.info(logs)
self.control_conn.abort(
RuntimeError(
'SDK exited unexpectedly. '
'Final status is %s. Final log line is %s' % (
status.decode('utf-8'),
logs.decode('utf-8').strip().split('\n')[-1])))
time.sleep(5)
def stop_worker(self):
# type: () -> None
self._done = True
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call(['docker', 'kill', self._container_id])
class WorkerHandlerManager(object):
"""
Manages creation of ``WorkerHandler``s.
Caches ``WorkerHandler``s based on environment id.
"""
def __init__(self,
environments, # type: Mapping[str, beam_runner_api_pb2.Environment]
job_provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(
list) # type: DefaultDict[str, List[WorkerHandler]]
self._workers_by_id = {} # type: Dict[str, WorkerHandler]
self.state_servicer = StateServicer()
self._grpc_server = None # type: Optional[GrpcServer]
self._process_bundle_descriptors = {
} # type: Dict[str, beam_fn_api_pb2.ProcessBundleDescriptor]
def register_process_bundle_descriptor(self, process_bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
self._process_bundle_descriptors[
process_bundle_descriptor.id] = process_bundle_descriptor
def get_process_bundle_descriptor(self, request):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._process_bundle_descriptors[
request.process_bundle_descriptor_id]
def get_worker_handlers(
self,
environment_id, # type: Optional[str]
num_workers # type: int
):
# type: (...) -> List[WorkerHandler]
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
# special case for EmbeddedWorkerHandler: there's no need for a gRPC
# server, but we need to pass self instead. Cast to make the type check
# on WorkerHandler.create() think we have a GrpcServer instance.
grpc_server = cast(GrpcServer, self)
elif self._grpc_server is None:
self._grpc_server = GrpcServer(
self.state_servicer, self._job_provision_info, self)
grpc_server = self._grpc_server
else:
grpc_server = self._grpc_server
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment,
self.state_servicer,
self._job_provision_info.for_environment(environment),
grpc_server)
_LOGGER.info(
"Created Worker handler %s for environment %s (%s, %r)",
worker_handler,
environment_id,
environment.urn,
environment.payload)
self._cached_handlers[environment_id].append(worker_handler)
self._workers_by_id[worker_handler.worker_id] = worker_handler
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
# type: () -> None
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error(
"Error closing worker_handler %s" % worker_handler, exc_info=True)
self._cached_handlers = {} # type: ignore[assignment]
self._workers_by_id = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
def get_worker(self, worker_id):
# type: (str) -> WorkerHandler
return self._workers_by_id[worker_id]
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer,
sdk_worker.StateHandler):
class CopyOnWriteState(object):
def __init__(self, underlying):
# type: (DefaultDict[bytes, Buffer]) -> None
self._underlying = underlying
self._overlay = {} # type: Dict[bytes, Buffer]
def __getitem__(self, key):
# type: (bytes) -> Buffer
if key in self._overlay:
return self._overlay[key]
else:
return StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
# type: (bytes) -> None
self._overlay[key] = []
def commit(self):
# type: () -> DefaultDict[bytes, Buffer]
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self,
underlying, # type: DefaultDict[bytes, Buffer]
overlay, # type: Dict[bytes, Buffer]
key # type: bytes
):
# type: (...) -> None
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
# type: () -> Iterator[bytes]
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
# type: (bytes) -> None
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]]
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._state = collections.defaultdict(list) # type: StateServicer.StateType
self._checkpoint = None # type: Optional[StateServicer.StateType]
self._use_continuation_tokens = False
self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]]
def checkpoint(self):
# type: () -> None
assert self._checkpoint is None and not \
isinstance(self._state, StateServicer.CopyOnWriteState)
self._checkpoint = self._state
self._state = StateServicer.CopyOnWriteState(self._state)
def commit(self):
# type: () -> None
assert isinstance(self._state,
StateServicer.CopyOnWriteState) and \
isinstance(self._checkpoint,
StateServicer.CopyOnWriteState)
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
# type: () -> None
assert self._checkpoint is not None
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
# type: (Any) -> Iterator
yield
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = b'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', b'%s:0' % token_base
else:
token_base, index = continuation_token.split(b':')
ix = int(index)
full_state_cont = self._continuations[token_base]
if ix == len(full_state_cont):
return b'', None
else:
return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
# type: (StateServicer) -> None
self._state = state
def State(self,
request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest]
context=None # type: Any
):
# type: (...) -> Iterator[beam_fn_api_pb2.StateResponse]
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id, append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id, clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
# type: (sdk_worker.CachingStateHandler) -> None
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
# type: () -> None
"""Does nothing."""
pass
class ControlFuture(object):
def __init__(self,
instruction_id, # type: str
response=None # type: Optional[beam_fn_api_pb2.InstructionResponse]
):
# type: (...) -> None
self.instruction_id = instruction_id
self._response = response
if response is None:
self._condition = threading.Condition()
self._exception = None # type: Optional[Exception]
def is_done(self):
# type: () -> bool
return self._response is not None
def set(self, response):
# type: (beam_fn_api_pb2.InstructionResponse) -> None
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
# type: (Optional[float]) -> beam_fn_api_pb2.InstructionResponse
if not self._response and not self._exception:
with self._condition:
if not self._response and not self._exception:
self._condition.wait(timeout)
if self._exception:
raise self._exception
else:
assert self._response is not None
return self._response
def abort(self, exception):
# type: (Exception) -> None
with self._condition:
self._exception = exception
self._condition.notify_all()
|
example0.py
|
#!/usr/bin/env python
import multiprocessing
def worker():
print('new worker')
for i in range(8):
multiprocessing.Process(target = worker).start()
|
_gui.py
|
import logging
from wse_investor._best_companies import get
from tkinter import *
from threading import *
logger = logging.getLogger()
class StdoutDirector:
def __init__(self, text_area):
self.text_area = text_area
def write(self, msg):
self.text_area.insert(END, msg)
self.text_area.yview(END)
def flush(self):
pass
class Gui:
def __init__(self, get_companies_method):
self._get_companies_method = get_companies_method
self._root = Tk()
self._frame = Frame(self._root)
self._main_menu = Menu(self._frame)
self._configure_window()
self._configure_main_menu()
self._configure_listbox()
def _configure_window(self):
self._root.title("WSE Investor")
self._root.geometry("1000x600")
self._root.config(menu=self._main_menu)
self._frame.pack()
def _configure_listbox(self):
self._scroll_y_axis = Scrollbar(self._frame)
self._scroll_y_axis.pack(side=RIGHT, fill=Y)
self._text = Text(self._frame, undo=True, height=35, width=165, yscrollcommand=self._scroll_y_axis.set)
self._text.pack(expand=True, fill=BOTH)
self._scroll_y_axis.config(command=self._text.yview, )
def _configure_main_menu(self):
self._main_menu.add_command(label="Start", command=self._start)
self._main_menu.add_command(label="Exit", command=self._exit)
def run(self):
self._root.mainloop()
def _start(self):
_thread_get_companies = Thread(target=self._get_companies_process, daemon=True)
sys.stdout = StdoutDirector(self._text)
_text_widget_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(_text_widget_handler)
_thread_get_companies.start()
def _exit(self):
self._root.destroy()
def _get_companies_process(self):
self._get_companies_method()
|
timing.py
|
"""
Team Neural Net of IoT Devices
Test Script for Collecting Neural Net runtime Data on Raspberry Pi
2018 SIUE Senior Project
"""
import subprocess
import sys
import threading
import time
import os
minTime = 1000
bestNumLayers = 0
bestLayerWidth = 0
sleepTime = 3
def javakill():
time.sleep(sleepTime)
os.system("killall java")
if(len(sys.argv) < 2):
print "Specify jar to time"
sys.exit()
#Vary network size
#num layers
for i in range(3,11):
#layer width
for j in range(3,11):
#spin off thread to end pronghorn because it stalls when done
thread = threading.Thread(target=javakill, args=())
thread.start()
subprocess.call(['java', '-jar', 'target/' + sys.argv[1], "-n", str(j), "-l", str(i), "-testing", "-win", "OUTPUT-weights" + str(i) + str(j), "-bin", "OUTPUT-biases" + str(i) + str(j)])
"""
print("Execution time of net was " + str(end - start) + " with " + str(i) +
" layers, " + str(j) + " nodes per layer, and " + str(i * j) + " total nodes.")
if ((end - start) < minTime):
minTime = (end - start)
bestNumLayers = i
bestLayerWidth = j
#Which size performed best?
print "\nThe best number of layers was " + str(bestNumLayers)
print "The best layer width was " + str(bestLayerWidth)
print "The time for this combo was " + str(minTime) + " seconds."
#Use most performant network size from above to classify many test examples, then find avg
avg = 0;
numToTest = 1000
for i in (0,numToTest):
start = time.time()
#subprocess.call(['java', '-jar', 'target/' + sys.argv[1]], "-n", bestLayerWidth, "-l", bestNumLayers, "-testing", "-win", "fn", -"bin", "fn")
end = time.time()
avg += (end - start)
avg = avg / numToTest
print "\nThe average classification time for " + str(numToTest) + " test examples was " + str(avg) + " seconds."
"""
|
test_track_sensors.py
|
#----------------------------------------------------------------------
# This programme provides a simple test programme for the track sensors
# using the Raspberry Pi GPIO inputs
# ---------------------------------------------------------------------
from tkinter import *
from model_railway_signals import *
from model_railway_signals import common
import logging
import time
import threading
logging.basicConfig(format='%(levelname)s: %(message)s',level=logging.DEBUG)
#----------------------------------------------------------------------
# This is the thread to report the state of all the track sensors
#----------------------------------------------------------------------
def report_sensor_status_thread():
while True:
for sensor in range(1,17):
if track_sensor_active(sensor):
set_section_occupied(sensor)
else:
clear_section_occupied(sensor)
time.sleep (0.01)
#------------------------------------------------------------------------------------
# This is the external callback for sensor events
#------------------------------------------------------------------------------------
def main_callback_function(item_id,callback_type):
global main_thread
print ("Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type))
callback_thread = threading.get_ident()
print ("Main Thread "+str(main_thread)+" and Callback Thread "+str(callback_thread)+" should be identical" )
if callback_type == section_callback_type.section_switched:
if item_id == 17: set_section_occupied(23)
if item_id == 18: clear_section_occupied(23)
if item_id == 19: print ("Section 23 Occupied: "+str(section_occupied(23)))
if item_id == 20: set_section_occupied(50)
if item_id == 21: clear_section_occupied(50)
if item_id == 22: print ("Section 50 Occupied: "+str(section_occupied(50)))
if item_id == 24: print ("State of Sensor 50: "+str(track_sensor_active(50)))
return()
#------------------------------------------------------------------------------------
# This is where the code begins
#------------------------------------------------------------------------------------
print ("Creating Window and Canvas")
window = Tk()
window.title("Test Track Sensors and Track Sections")
canvas = Canvas(window,height=680,width=500,bg="grey85")
canvas.pack()
print ("Negative test - passing a callback to the tkinter thread before we have created any signal")
common.execute_function_in_tkinter_thread (lambda: main_callback_function(1,2))
print ("Creating signals - to automatically trigger from sensors 9 - 16")
canvas.create_line(0,570,500,570,fill="black",width=3)
canvas.create_line(0,620,500,620,fill="black",width=3)
create_colour_light_signal (canvas, 1, 100, 570,
signal_subtype = signal_sub_type.home,
sig_callback = main_callback_function,
sig_passed_button = True,
approach_release_button = True)
create_colour_light_signal (canvas, 2, 300, 570,
signal_subtype = signal_sub_type.three_aspect,
sig_callback = main_callback_function,
sig_passed_button = True,
approach_release_button = True)
create_semaphore_signal (canvas,3,100,620,
sig_callback=main_callback_function,
sig_passed_button = True,
approach_release_button = True)
create_semaphore_signal (canvas,4,300,620,
sig_callback=main_callback_function,
sig_passed_button = True,
approach_release_button = True)
print ("Creating Track Sections")
canvas.create_text(250,20,text=" Only Sections 17-22 & 24 will generate an external callback")
canvas.create_text(250,40,text="Clicking on Section 17 will attempt to set Section 23")
canvas.create_text(250,60,text="Clicking on Section 18 will attempt to clear Section 23")
canvas.create_text(250,80,text="Clicking on Section 19 will report the state of Section 23")
canvas.create_text(250,100,text="Clicking on Section 20 will attempt to set a section that doesn't exist")
canvas.create_text(250,120,text="Clicking on Section 21 will attempt to clear a section that doesn't exist")
canvas.create_text(250,140,text="Clicking on Section 22 will report the state of a section that doesn't exist")
for I in range(1,9):
create_section(canvas,I,150,150+(25*I),label=("Sensor "+str(I)))
create_section(canvas,I+8,250,150+(25*I),label=("Sensor "+str(I+8)))
create_section(canvas,17,350,175,label="Section 17",section_callback = main_callback_function)
create_section(canvas,18,350,200,label="Section 18",section_callback = main_callback_function)
create_section(canvas,19,350,225,label="Section 19",section_callback = main_callback_function)
create_section(canvas,20,350,250,label="Section 20",section_callback = main_callback_function)
create_section(canvas,21,350,275,label="Section 21",section_callback = main_callback_function)
create_section(canvas,22,350,300,label="Section 22",section_callback = main_callback_function)
create_section(canvas,23,350,325,label="Section 23")
create_section(canvas,24,350,350,label="Section 24",section_callback = main_callback_function)
print("Negative Tests for Creating Track Sections to test validation:")
create_section(canvas,1,100,100)
create_section(canvas,0,100,100)
print ("Creating external Track Sensor Mappings")
canvas.create_text(250,380,text="Sections 1-16 (above) report the current state of the external sensors")
canvas.create_text(250,400,text="Clicking on Section 24 will report the state of a sensor that doesn't exist")
canvas.create_text(250,420,text="The following are ONLY triggered by the external sensors - NOT Sections")
canvas.create_text(250,440,text="Sensor 1 triggers a signal approach event for a non-existant signal")
canvas.create_text(250,460,text="Sensor 2 triggers a signal passed event for a non-existant signal")
canvas.create_text(250,480,text="Sensors 3-6 triggers a sennsor callback event for the sensor")
canvas.create_text(250,500,text="Sensors 9-16 trigger signal approach & passed events for the signals")
create_track_sensor (1,gpio_channel=4,sensor_timeout=1.0,signal_approach=5) # negative test - sig doesn't exist
create_track_sensor (2,gpio_channel=5,sensor_timeout=1.0,signal_passed=5) # negative test - sig doesn't exist
create_track_sensor (3,gpio_channel=6,sensor_timeout=1.0,sensor_callback=main_callback_function)
create_track_sensor (4,gpio_channel=7,sensor_timeout=1.0,sensor_callback=main_callback_function)
create_track_sensor (5,gpio_channel=8,sensor_timeout=1.0,sensor_callback=main_callback_function)
create_track_sensor (6,gpio_channel=9,sensor_timeout=1.0,sensor_callback=main_callback_function)
create_track_sensor (7,gpio_channel=10,sensor_timeout=1.0)
create_track_sensor (8,gpio_channel=11,sensor_timeout=1.0)
create_track_sensor (9,gpio_channel=12,sensor_timeout=1.0,signal_approach=1)
create_track_sensor (10,gpio_channel=13,sensor_timeout=1.0,signal_passed=1)
create_track_sensor (11,gpio_channel=16,sensor_timeout=1.0,signal_approach=2)
create_track_sensor (12,gpio_channel=17,sensor_timeout=1.0,signal_passed=2)
create_track_sensor (13,gpio_channel=18,sensor_timeout=1.0,signal_approach=3)
create_track_sensor (14,gpio_channel=19,sensor_timeout=1.0,signal_passed=3)
create_track_sensor (15,gpio_channel=20,sensor_timeout=1.0,signal_approach=4)
create_track_sensor (16,gpio_channel=21,sensor_timeout=1.0,signal_passed=4)
print("Negative Tests for Creating Track Sensors to test validation:")
create_track_sensor (0,gpio_channel=22)
create_track_sensor (1,gpio_channel=23)
create_track_sensor (17,gpio_channel=10)
create_track_sensor (18,gpio_channel=27)
create_track_sensor (15,gpio_channel=22,signal_approach=4,signal_passed=4)
create_track_sensor (15,gpio_channel=22,signal_approach=4,sensor_callback=main_callback_function)
create_track_sensor (15,gpio_channel=22,signal_passed=4,sensor_callback=main_callback_function)
# Start the thread to report the status of all the sensors:
report_sensor_status = threading.Thread(target = report_sensor_status_thread)
report_sensor_status.start()
# Now enter the main event loop and wait for a button press (which will trigger a callback)
print ("Entering Main Event Loop")
main_thread = threading.get_ident()
print("Main Thread is: " + str(main_thread))
window.mainloop()
|
send_order_demo.py
|
from threading import Thread
from time import sleep
from ctpbee import CtpbeeApi, helper, CtpBee
from ctpbee.constant import ContractData, LogData, TickData, BarData, OrderType, Offset, OrderData, SharedData, \
TradeData, PositionData, Direction, AccountData
class Demo(CtpbeeApi):
contract_set = set(["rb1910"])
# 当前插件绑定的CtpBee的数据记录信息都在self.app.recorder下面
def on_contract(self, contract: ContractData):
""" 处理推送的合约信息 """
if contract.symbol in self.contract_set:
self.app.subscribe(contract.symbol)
def on_log(self, log: LogData):
""" 处理日志信息 ,特殊需求才用到 """
pass
def on_tick(self, tick: TickData) -> None:
""" 处理推送的tick """
pass
def on_bar(self, bar: BarData) -> None:
""" 处理ctpbee生成的bar """
# 构建发单请求
req = helper.generate_order_req_by_var(symbol=bar.symbol, exchange=bar.exchange, price=bar.high_price,
direction=Direction.LONG, type=OrderType.LIMIT, volume=3,
offset=Offset.OPEN)
# 调用绑定的app进行发单
id = self.app.send_order(req)
print("返回id", id)
sleep(1)
def on_init(self, init):
if init:
print("初始化完成")
def on_order(self, order: OrderData) -> None:
""" 报单回报 """
print("order", order)
def on_shared(self, shared: SharedData) -> None:
pass
def on_trade(self, trade: TradeData) -> None:
""" 成交回报 """
print("成交", trade)
def on_position(self, position: PositionData) -> None:
""" 处理持仓回报 """
def on_account(self, account: AccountData) -> None:
""" 处理账户信息 """
def letsgo():
app = CtpBee(name="demo", import_name=__name__)
# 创建对象
demo = Demo("test")
# 添加对象, 你可以继承多个类 然后实例化不同的插件 再载入它, 这些都是极其自由化的操作
app.add_extension(demo)
app.config.from_json("config.json")
app.start(log_output=True)
def query(time=1):
nonlocal app
while True:
app.query_position()
sleep(time)
app.query_account()
sleep(time)
# 单独开一个线程来进行查询持仓和账户信息
p = Thread(target=query, args=(2,))
p.setDaemon(daemonic=True)
p.start()
if __name__ == '__main__':
letsgo()
|
callback_api.py
|
from app import mythic
import app
from sanic.response import json, raw
from app.database_models.model import (
Callback,
Task,
LoadedCommands,
PayloadCommand,
)
from sanic_jwt.decorators import scoped, inject_user
import app.database_models.model as db_model
from sanic.exceptions import abort
from math import ceil
import aiohttp
import base64
from sanic.log import logger
import ujson as js
import app.crypto as crypt
from app.api.task_api import get_agent_tasks, update_edges_from_checkin
from app.api.response_api import post_agent_response
from app.api.file_api import download_agent_file
from app.api.crypto_api import staging_rsa
from app.api.operation_api import send_all_operations_message
from app.api.rabbitmq_api import MythicBaseRPC
import urllib.parse
from datetime import datetime
from dijkstar import Graph, find_path
from dijkstar.algorithm import NoPathError
import threading
import socket
from app.api.siem_logger import log_to_siem
import sys
import asyncio
import uuid
@mythic.route(mythic.config["API_BASE"] + "/callbacks/", methods=["GET"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def get_all_callbacks(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["current_operation"] != "":
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
callbacks = await app.db_objects.prefetch(
db_model.callback_query.where(Callback.operation == operation),
db_model.callbacktoken_query
)
return json([c.to_json() for c in callbacks])
else:
return json([])
@mythic.route(mythic.config["API_BASE"] + "/callbacks/<eid:int>/edges", methods=["GET"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def get_all_edges_for_callback(request, user, eid):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["current_operation"] != "":
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
callback = await app.db_objects.get(db_model.callback_query, id=eid, operation=operation)
edges = await app.db_objects.execute(db_model.callbackgraphedge_query.where(
(db_model.CallbackGraphEdge.source == callback) |
(db_model.CallbackGraphEdge.destination == callback)
))
edge_info = []
for edge in edges:
if edge.c2_profile.is_p2p:
info = edge.to_json()
c2instances = await app.db_objects.execute(db_model.c2profileparametersinstance_query.where(
(db_model.C2ProfileParametersInstance.callback == edge.destination) &
(db_model.C2ProfileParametersInstance.c2_profile == edge.c2_profile)
))
info["c2_parameters"] = [{"name": c.c2_profile_parameters.name, "value": c.value} for c in c2instances]
edge_info.append(info)
return json(edge_info)
else:
return json([])
cached_keys = {}
translator_rpc = MythicBaseRPC()
@mythic.route(mythic.config["API_BASE"] + "/agent_message", methods=["GET", "POST"])
async def get_agent_message(request):
# get the raw data first
profile = None
data = None
request_url = request.headers['x-forwarded-url'] if 'x-forwarded-url' in request.headers else request.url
request_ip = request.headers['x-forwarded-for'] if 'x-forwarded-for' in request.headers else request.ip
if "Mythic" in request.headers:
profile = request.headers["Mythic"]
else:
error_message = f"Failed to find Mythic header in headers: \n{request.headers}\nConnection to: "
error_message += f"{request_url} via {request.method}\nFrom: "
error_message += f"{request_ip}\n"
error_message += f"With query string: {request.headers['x-forwarded-query'] if 'x-forwarded-query' in request.headers else request.query_string}\n"
error_message += f"Did this come from a Mythic C2 Profile? If so, make sure it's adding the `mythic` header with the name of the C2 profile"
asyncio.create_task(send_all_operations_message(
message=error_message,
level="warning", source="get_agent_message_mythic_header:" + request_ip))
return raw(b"", 404)
if request.body != b"":
data = request.body
# print("Body: " + str(data))
elif len(request.cookies) != 0:
for key, val in request.cookies.items():
if data is None:
data = val
# print("Cookies: " + str(data))
elif len(request.query_args) != 0:
data = urllib.parse.unquote(request.query_args[0][1])
# print("Query: " + str(data))
else:
error_message = f"Failed to find message in body, cookies, or query args\nConnection to: "
error_message += f"{request_url} via {request.method}\nFrom: "
error_message += f"{request_ip}\n"
error_message += f"With query string: {request.headers['x-forwarded-query'] if 'x-forwarded-query' in request.headers else request.query_string}\n"
error_message += f"With extra headers: {request.headers}\n"
asyncio.create_task(send_all_operations_message(
message=error_message,
level="warning", source="get_agent_message_body" + request_ip))
return raw(b"", 404)
if app.debugging_enabled:
await send_all_operations_message(message=f"Parsing agent message - step 1 (get data): \n{data}", level="info", source="debug")
message, code, new_callback, msg_uuid = await parse_agent_message(data, request, profile)
return raw(message, code)
async def get_payload_c2_info(payload_uuid=None, payload=None):
if payload_uuid is not None:
payload = await app.db_objects.get(db_model.payload_query, uuid=payload_uuid)
c2_profiles = await app.db_objects.execute(
db_model.payloadc2profiles_query.where(db_model.PayloadC2Profiles.payload == payload)
)
c2info = {}
for c in c2_profiles:
c2info[c.c2_profile.name] = {
"is_p2p": c.c2_profile.is_p2p,
"mythic_encrypts": payload.payload_type.mythic_encrypts,
"translation_container": payload.payload_type.translation_container.name if payload.payload_type.translation_container is not None else None,
"profile": c.c2_profile.name,
"payload": payload
}
c2_params = await app.db_objects.execute(
db_model.c2profileparametersinstance_query.where(
(db_model.C2ProfileParametersInstance.payload == payload) &
(db_model.C2ProfileParametersInstance.c2_profile == c.c2_profile)
)
)
for cp in c2_params:
# loop through all of the params associated with the payload and find ones that are crypt_type
# currently doesn't really make sense to have more than one crypto_type parameter for this purpose
# in a single c2 profile
if cp.c2_profile_parameters.crypto_type:
c2info[c.c2_profile.name] = {**c2info[c.c2_profile.name],
"enc_key": bytes(cp.enc_key) if cp.enc_key is not None else None,
"type": cp.value,
"dec_key": bytes(cp.dec_key) if cp.dec_key is not None else None,
"stage": "payload",
"profile": c.c2_profile.name,
"payload": payload
}
if "enc_key" not in c2info[c.c2_profile.name]:
# we didn't find a crypto_type parameter that matched something mythic knows where mythic
# was also supposed to encrypt
c2info[c.c2_profile.name] = {**c2info[c.c2_profile.name],
"enc_key": None,
"type": "",
"dec_key": None,
"stage": "payload",
"profile": c.c2_profile.name,
"payload": payload
}
return c2info
async def get_encryption_data(UUID: str, profile: str):
# this function tries to retrieve a cached key for a given UUID
# if the key doesn't exist, it queries the database for the key to use if one exists
if UUID not in cached_keys or profile not in cached_keys[UUID]:
# we need to look up the key to see if it exists
try:
# first check to see if it's some staging piece
staging_info = await app.db_objects.get(db_model.staginginfo_query, staging_uuid=UUID)
c2info = await get_payload_c2_info(payload_uuid=None, payload=staging_info.payload)
cached_keys[staging_info.payload.uuid] = c2info
cached_keys[UUID] = {
profile: {
"enc_key": bytes(staging_info.enc_key) if staging_info.enc_key is not None else None,
"type": staging_info.crypto_type,
"dec_key": bytes(staging_info.dec_key) if staging_info.dec_key is not None else None,
"stage": "staging",
"is_p2p": c2info[profile]["is_p2p"],
"translation_container": staging_info.payload.payload_type.translation_container.name if staging_info.payload.payload_type.translation_container is not None else None,
"mythic_encrypts": staging_info.payload.payload_type.mythic_encrypts,
"profile": profile,
"payload": staging_info.payload
}
}
except Exception as a:
# if it's not a staging key, check if it's a payload uuid and get c2 profile AESPSK
try:
payload = await app.db_objects.get(db_model.payload_query, uuid=UUID)
if payload.deleted:
await send_all_operations_message(operation=payload.operation,
level="warning",
source="deleted_payload_checking_in" + payload.uuid,
message=f"Deleted payload trying to spawn callback - {js.dumps(payload.to_json(), indent=4)}")
raise Exception(FileNotFoundError)
cached_keys[UUID] = await get_payload_c2_info(None, payload)
except Exception as b:
# finally check to see if it's an agent checking in
try:
callback = await app.db_objects.prefetch(db_model.callback_query.where(db_model.Callback.agent_callback_id == UUID),
db_model.callbacktoken_query)
callback = list(callback)[0]
c2_profiles = await app.db_objects.execute(
db_model.callbackc2profiles_query.where(db_model.CallbackC2Profiles.callback == callback)
)
c2info = {}
for c in c2_profiles:
c2info[c.c2_profile.name] = {
"is_p2p": c.c2_profile.is_p2p,
"translation_container": callback.registered_payload.payload_type.translation_container.name if callback.registered_payload.payload_type.translation_container is not None else None,
"mythic_encrypts": callback.registered_payload.payload_type.mythic_encrypts,
"dec_key": bytes(callback.dec_key) if callback.dec_key is not None else None,
"type": callback.crypto_type,
"enc_key": bytes(callback.enc_key) if callback.enc_key is not None else None,
"stage": "callback",
"payload": callback.registered_payload,
"profile": c.c2_profile.name,
"callback": callback
}
cached_keys[UUID] = c2info
except Exception as c:
logger.error(
"Failed to find UUID in staging, payload's with AESPSK c2 param, or callback: " + UUID
)
raise c
return cached_keys[UUID][profile]
else:
return cached_keys[UUID][profile]
# returns a base64 encoded response message
# return decrypted flag is for C2 Profiles to task message decryption
async def parse_agent_message(data: str, request, profile: str, return_decrypted: bool = False):
new_callback = ""
agent_uuid = ""
if return_decrypted:
request_url = ""
request_ip = ""
else:
request_url = request.headers['x-forwarded-url'] if 'x-forwarded-url' in request.headers else request.url
request_ip = request.headers['x-forwarded-for'] if 'x-forwarded-for' in request.headers else request.ip
try:
decoded = base64.b64decode(data)
# print(decoded)
if app.debugging_enabled:
await send_all_operations_message(message=f"Parsing agent message - step 2 (base64 decode): \n {decoded}", level="info", source="debug")
except Exception as e:
error_message = f"Failed to base64 decode message\nConnection to: "
if return_decrypted:
error_message += f"{profile}'s RPC call"
else:
error_message += f"{request_url} via {request.method}\nFrom: "
error_message += f"{request_ip}\n"
error_message += f"With extra headers: {request.headers}\n"
error_message += f"Message sent to Mythic wasn't base64 encoded or was improperly encoded. Make sure your overall message is base64 encoded."
asyncio.create_task(send_all_operations_message(message=error_message,
level="warning", source="get_agent_message" + request_ip))
if return_decrypted:
return {"status": "error", "error": error_message}
return "", 404, new_callback, agent_uuid
try:
try:
UUID = decoded[:36].decode() # first 36 characters are the UUID
UUID_length = 36
# print(UUID)
except Exception as e:
# if we get here, then we're not looking at a string-based UUID, check if it's a 16B representation
UUID = uuid.UUID(bytes_le=decoded[:16])
UUID = str(UUID)
UUID_length = 16
if app.debugging_enabled:
await send_all_operations_message(message=f"Parsing agent message - step 3 (get uuid): \n {UUID} with length {str(UUID_length)}", level="info", source="debug")
except Exception as e:
error_message = f"Failed to get UUID in first 36 or 16 bytes for base64 input\nConnection to: "
if return_decrypted:
error_message += f"{profile}'s RPC call"
else:
error_message += f"{request_url} via {request.method}\nFrom: "
error_message += f"{request_ip}\n"
error_message += f"With extra headers: {request.headers}\nData: "
error_message += f"{str(decoded)}\n"
error_message += f"The first bytes of a message to Mythic should be the UUID of the agent, payload, or stage. Failed to find a UUID in {decoded[:36]}"
asyncio.create_task(send_all_operations_message(message= error_message,
level="warning", source="get_agent_message" + request_ip))
if return_decrypted:
return {"status": "error", "error": error_message}
return "", 404, new_callback, agent_uuid
try:
enc_key = await get_encryption_data(UUID, profile)
except Exception as e:
error_message = f"Failed to correlate UUID, {UUID}, to something mythic knows\nConnection to: "
if return_decrypted:
error_message += f"{profile}'s RPC call"
else:
error_message += f"{request_url} via {request.method}\nFrom: "
error_message += f"{request_ip}\n"
error_message += f"With extra headers: {request.headers}\n"
if UUID_length == 36:
try:
uuid.UUID(UUID)
error_message += f"{UUID} is likely a Callback or Payload UUID from a Mythic instance that has been deleted or had the database reset."
except:
error_message += f"{UUID} is not a valid UUID4 value. The first part of a Mythic message should be the Callback, Payload, or Staging UUID.\n"
error_message += f"This likely happens if some other sort of traffic came through your C2 profile (ports too open) or another C2 Profile's traffic was routed through another C2 Profile"
else:
error_message += f"{UUID} was a 16 Byte UUID, but Mythic doesn't have it in its database.\n"
error_message += f"This is likely a Callback or Payload UUID from a Mythic instance that has been deleted or had the database reset.\n"
error_message += f"This could also happen if some other sort of traffic came through your C2 profile (ports too open) or another C2 Profile's traffic was routed through another C2 Profile"
asyncio.create_task(send_all_operations_message(message= error_message, level="warning",
source="get_agent_message_uuid:" + UUID + request_ip))
if return_decrypted:
return {"status": "error", "error": error_message}
return "", 404, new_callback, agent_uuid
# now we have cached_keys[UUID] is the right AES key to use with this payload, now to decrypt
if enc_key["stage"] == "callback":
asyncio.create_task(update_edges_from_checkin(UUID, profile))
decrypted = None
try:
# print(decoded[36:])
if enc_key["mythic_encrypts"]:
# mythic handles encryption/decryption, but maybe not parsing
if enc_key["translation_container"] is None:
# format is in standard mythic JSON, so parse the decrypted version normally
decrypted = await crypt.decrypt_message(decoded, enc_key, return_json=True, length=UUID_length)
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 4 (mythic decrypted a mythic message): \n{decrypted}", level="info", source="debug")
else:
decrypted = await crypt.decrypt_message(decoded, enc_key, return_json=False, length=UUID_length)
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 4 (mythic decrypted a message that needs translated): \n{str(decrypted)}", level="info", source="debug")
# format isn't standard mythic JSON, after decrypting send to container for processing
decrypted, successfully_sent = await translator_rpc.call(message={
"action": "translate_from_c2_format",
"message": base64.b64encode(decrypted).decode(),
"uuid": UUID,
"profile": profile,
"mythic_encrypts": enc_key["mythic_encrypts"],
"enc_key": base64.b64encode(enc_key["enc_key"]).decode() if enc_key["enc_key"] is not None else None,
"dec_key": base64.b64encode(enc_key["dec_key"]).decode() if enc_key["dec_key"] is not None else None,
"type": enc_key["type"]
}, receiver="{}_rpc_queue".format(enc_key["translation_container"]))
if decrypted == b"":
if successfully_sent:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_from_c2_format. check the container's logs for error information",
level="warning", source="translate_from_c2_format_success", operation=enc_key["payload"].operation))
else:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_from_c2_format because it's offline",
level="warning", source="translate_from_c2_format_error", operation=enc_key["payload"].operation))
if return_decrypted:
return {"status": "error", "error": "Failed to have translation service translate decrypted message"}
return "", 404, new_callback, agent_uuid
else:
# we should get back JSON from the translation container
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 4 (translation container returned): \n{decrypted}", level="info", source="debug")
decrypted = js.loads(decrypted)
else:
# mythic doesn't encrypt, so could already be decrypted or require a trip to a container
if enc_key["translation_container"] is None:
# there's no registered container, so it must be in plaintext
decrypted = decoded
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 4 (mythic isn't decrypting and no associated translation container): \n {decrypted}", level="info", source="debug")
else:
# mythic doesn't encrypt and a container is specified, ship it off to the container for processing
decrypted, successfully_sent = await translator_rpc.call(message={
"action": "translate_from_c2_format",
"message": base64.b64encode(decoded).decode(),
"uuid": UUID,
"profile": profile,
"mythic_encrypts": enc_key["mythic_encrypts"],
"enc_key": base64.b64encode(enc_key["enc_key"]).decode() if enc_key["enc_key"] is not None else None,
"dec_key": base64.b64encode(enc_key["dec_key"]).decode() if enc_key["dec_key"] is not None else None,
"type": enc_key["type"]
}, receiver="{}_rpc_queue".format(enc_key["translation_container"]))
if decrypted == b"":
if successfully_sent:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_from_c2_format and decrypt. check the container's logs for error information",
level="warning", source="translate_from_c2_format_successfully_sent_but_error", operation=enc_key["payload"].operation))
else:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_from_c2_format because it's offline.",
level="warning", source="translate_from_c2_format_error", operation=enc_key["payload"].operation))
if return_decrypted:
return {"status": "error", "error": "Failed to have translation service translate message"}
return "", 404, new_callback, agent_uuid
else:
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 4 (translation container returned): \n {decrypted}", level="info", source="debug", operation=enc_key["payload"].operation)
decrypted = js.loads(decrypted)
#print(decrypted)
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
if decrypted is not None:
msg = str(decrypted)
else:
msg = str(decoded)
asyncio.create_task(send_all_operations_message(message=f"Failed to decrypt/load message with error: {str(sys.exc_info()[-1].tb_lineno) + ' ' + str(e)}\n from {request.method} method with URL {request.url} with headers: \n{request.headers}",
level="warning", source="parse_agent_message_decrypt_load", operation=enc_key["payload"].operation))
if return_decrypted:
return {"status": "error", "error": "Failed to decrypt or load the message as JSON"}
return "", 404, new_callback, agent_uuid
if return_decrypted:
return {"status": "success", "response": decrypted}
"""
JSON({
"action": "", //staging-rsa, get_tasking ...
// staging_info stored in db on what step in the process
"...": ... // JSON data relating to the action
"delegates":[
{"UUID": base64(agentMessage from a forwarded agent),
"c2_profile": "name of c2 profile used to connect the two agents"}
]
})
"""
try:
if "action" not in decrypted:
asyncio.create_task(send_all_operations_message(message="Error in handling a callback message: Missing 'action' in parsed JSON",
level="warning", source="no_action_in_message", operation=enc_key["payload"].operation))
return "", 404, new_callback, agent_uuid
# now to parse out what we're doing, everything is decrypted at this point
# shuttle everything out to the appropriate api files for processing
# print(decrypted)
response_data = {}
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 5 (processing message action): \n {decrypted['action']}",
level="info", source="debug", operation=enc_key["payload"].operation)
if decrypted["action"] == "get_tasking":
response_data = await get_agent_tasks(decrypted, enc_key["callback"])
if "get_delegate_tasks" not in decrypted or decrypted["get_delegate_tasks"] is True:
delegates = await get_routable_messages(enc_key["callback"], request)
if delegates is not None:
response_data["delegates"] = delegates
agent_uuid = UUID
elif decrypted["action"] == "post_response":
response_data = await post_agent_response(decrypted, enc_key["callback"])
agent_uuid = UUID
elif decrypted["action"] == "upload":
response_data = await download_agent_file(decrypted, in_response=False)
agent_uuid = UUID
elif decrypted["action"] == "delegate":
# this is an agent message that is just requesting or forwarding along delegate messages
# this is common in server_routed traffic after the first hop in the mesh
agent_uuid = UUID
elif decrypted["action"] == "checkin":
if enc_key["stage"] != "callback":
# checkin message with a staging uuid
if (
"enc_key" not in decrypted
or decrypted["enc_key"] == ""
):
decrypted["enc_key"] = enc_key["enc_key"]
if (
"dec_key" not in decrypted
or decrypted["dec_key"] == ""
):
decrypted["dec_key"] = enc_key["dec_key"]
if (
"crypto_type" not in decrypted
or decrypted["crypto_type"] == ""
):
decrypted["crypto_type"] = enc_key["type"]
if enc_key["stage"] == "callback":
# if the UUID is for a callback doing a checkin message, just update the callback instead
await update_callback(decrypted, UUID)
response_data = {"action": "checkin", "status": "success", "id": UUID}
agent_uuid = UUID
else:
response_data = await create_callback_func(decrypted, request)
if response_data["status"] == "success":
new_callback = response_data["id"]
elif decrypted["action"] == "staging_rsa":
response_data, staging_info = await staging_rsa(decrypted, UUID)
if staging_info is None:
return "", 404, new_callback, agent_uuid
elif decrypted["action"] == "update_info":
response_data = await update_callback(decrypted, UUID)
delegates = await get_routable_messages(enc_key["callback"], request)
if delegates is not None:
response_data["delegates"] = delegates
agent_uuid = UUID
elif decrypted["action"] == "translation_staging":
# this was already processed as part of our contact to the translation container
# so now we're just saving this data off for the next message
response_data = await staging_translator(decrypted, enc_key)
if response_data is None:
return "", 404, new_callback, agent_uuid
else:
return response_data, 200, new_callback, agent_uuid
else:
asyncio.create_task(send_all_operations_message(message="Unknown action:" + str(decrypted["action"]),
level="warning", source="unknown_action_in_message", operation=enc_key["payload"].operation))
return "", 404, new_callback, agent_uuid
if "edges" in decrypted:
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 6 (processing reported p2p edge updates): \n {decrypted['edges']}",
level="info", source="debug", operation=enc_key["payload"].operation)
if decrypted["edges"] != "" and decrypted["edges"] is not None:
asyncio.create_task(add_p2p_route(decrypted["edges"], None, None))
response_data.pop("edges", None)
# now that we have the right response data, format the response message
if (
"delegates" in decrypted
and decrypted["delegates"] is not None
and decrypted["delegates"] != ""
and decrypted["delegates"] != []
):
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 7 (processing delegate messages): \n {decrypted['delegates']}",
level="info", source="debug", operation=enc_key["payload"].operation)
if "delegates" not in response_data:
response_data["delegates"] = []
for d in decrypted["delegates"]:
# handle messages for all of the delegates
# d is {"message": agentMessage, "c2_profile": "profile name", "uuid": d_uuid}
# process the delegate message recursively
del_message, status, del_new_callback, del_uuid = await parse_agent_message(d["message"],
request,
d["c2_profile"])
if status == 200:
# store the response to send back
print("got delegate message: ")
print(del_message)
if not isinstance(del_message, str):
del_message = del_message.decode()
if del_new_callback != "":
# the delegate message caused a new callback, to report the changing UUID
asyncio.create_task(
add_p2p_route(
agent_message=[{
"source": UUID,
"destination": del_new_callback,
"direction": 1,
"metadata": "",
"action": "add",
"c2_profile": d["c2_profile"]
}],
callback=None,
task=None)
)
response_data["delegates"].append({"message": del_message,
"mythic_uuid": del_new_callback,
"uuid": d["uuid"]})
elif del_uuid != "" and del_uuid != d["uuid"]:
# there is no new callback
# the delegate is a callback (not staging) and the callback uuid != uuid in the message
# so send an update message with the rightful callback uuid so the agent can update
asyncio.create_task(
add_p2p_route(
agent_message=[{
"source": UUID,
"destination": del_uuid,
"direction": 1,
"metadata": "",
"action": "add",
"c2_profile": d["c2_profile"]
}],
callback=None,
task=None)
)
response_data["delegates"].append({"message": del_message,
"uuid": d["uuid"],
"mythic_uuid": del_uuid})
else:
# there's no new callback and the delegate message isn't a full callback yet
# so just proxy through the UUID since it's in some form of staging
response_data["delegates"].append({"message": del_message, "uuid": d["uuid"]})
#print("final message before going to containers:")
#print(response_data)
final_msg = await create_final_message_from_data_and_profile_info(response_data, enc_key, UUID, request)
if final_msg is None:
return "", 404, new_callback, agent_uuid
#print("finishing processing loop, returning: ")
#print(final_msg)
return final_msg, 200, new_callback, agent_uuid
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno))
logger.warning("callback_api.py: " + str(e))
asyncio.create_task(send_all_operations_message(message=f"Exception dealing with message from {request.host} as {request.method} method with headers: \n{request.headers}\ncallback.py: {str(sys.exc_info()[-1].tb_lineno)} - {str(e)}",
level="warning", source="mythic_error_for_message_parsing"))
return "", 404, new_callback, agent_uuid
async def create_final_message_from_data_and_profile_info(response_data, enc_key, current_uuid, request):
if enc_key["translation_container"] is not None:
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 8 (final mythic response message going to translation container): \n {js.dumps(response_data)}",
level="info", operation=enc_key["payload"].operation, source="debug")
final_msg, successfully_sent = await translator_rpc.call(message={
"action": "translate_to_c2_format",
"message": response_data,
"profile": enc_key["profile"],
"mythic_encrypts": enc_key["mythic_encrypts"],
"enc_key": base64.b64encode(enc_key["enc_key"]).decode() if enc_key["enc_key"] is not None else None,
"dec_key": base64.b64encode(enc_key["dec_key"]).decode() if enc_key["dec_key"] is not None else None,
"uuid": current_uuid,
"type": enc_key["type"]
}, receiver="{}_rpc_queue".format(enc_key["translation_container"]))
# print("received from translate_to_c2_format: ")
# print(final_msg)
if final_msg == b"":
if successfully_sent:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_to_c2_format with message: {str(response_data)}",
level="warning", source="translate_to_c2_format_success", operation=enc_key["payload"].operation))
else:
asyncio.create_task(send_all_operations_message(
message=f"Failed to have {enc_key['translation_container']} container process translate_to_c2_format, is it online?",
level="warning", source="translate_to_c2_format_error", operation=enc_key["payload"].operation))
return None
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 8.5 (response from translation container to c2 format): \n {final_msg}",
level="info", source="debug", operation=enc_key["payload"].operation)
else:
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 8 (final mythic response message): \n {js.dumps(response_data)}",
level="info", source="debug", operation=enc_key["payload"].operation)
final_msg = js.dumps(response_data).encode()
if enc_key["mythic_encrypts"]:
# if mythic should encrypt this, encrypt it and do our normal stuff
# print(final_msg)
final_msg = await crypt.encrypt_message(final_msg, enc_key, current_uuid)
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 9 (mythic encrypted final message): \n {final_msg}",
level="info", source="debug", operation=enc_key["payload"].operation)
# print(final_msg)
elif enc_key["translation_container"] is None:
# if mythic shouldn't encrypt it and there's a container,
# then the container should have already handled everything
# otherwise, there's no container and we shouldn't encrypt, so just concat and base64
final_msg = base64.b64encode((current_uuid.encode() + final_msg)).decode()
if app.debugging_enabled:
await send_all_operations_message(
message=f"Parsing agent message - step 9 (mythic doesn't encrypt and no translation container, just adding uuid and base64 encoding): \n {final_msg}",
level="info", source="debug", operation=enc_key["payload"].operation)
return final_msg
async def staging_translator(final_msg, enc_key):
try:
# we got a message back, process it and store it for staging information in the future
await app.db_objects.create(db_model.StagingInfo,
session_id=final_msg["session_id"],
enc_key=base64.b64decode(final_msg["enc_key"]) if final_msg["enc_key"] is not None else None,
dec_key=base64.b64decode(final_msg["dec_key"]) if final_msg["dec_key"] is not None else None,
crypto_type=final_msg["type"],
staging_uuid=final_msg["next_uuid"],
payload=enc_key["payload"]
)
return base64.b64decode(final_msg["message"])
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to translator_staging response from {enc_key['translation_container']} container message: {str(final_msg)}",
level="warning", source="translator_staging_response_error", operation=enc_key["payload"].operation))
return None
@mythic.route(mythic.config["API_BASE"] + "/callbacks/", methods=["POST"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def create_manual_callback(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator" or user["current_operation"] == "":
return json(
{"status": "error", "error": "Spectators cannot create manual callbacks"}
)
try:
data = request.json
encryption = await get_encryption_data(data["uuid"], data["profile"])
if encryption['type'] is None:
data["crypto_type"] = ""
data["enc_key"] = None
data["dec_key"] = None
else:
data["crypto_type"] = encryption['type']
data["enc_key"] = base64.b64encode(encryption['enc_key']).decode()
data["dec_key"] = base64.b64encode(encryption['dec_key']).decode()
return json(await create_callback_func(data, request))
except Exception as e:
print(e)
return json(
{"status": "error", "error": "failed to create callback: " + str(e)}
)
async def create_callback_func(data, request):
if not data:
return {"status": "error", "error": "Data is required for POST"}
if "user" not in data:
data["user"] = ""
if "host" not in data:
data["host"] = ""
if "pid" not in data:
data["pid"] = -1
if "ip" not in data:
data["ip"] = ""
if "uuid" not in data:
return {"status": "error", "error": "uuid required"}
# Get the corresponding Payload object based on the uuid
try:
payload = await app.db_objects.get(db_model.payload_query, uuid=data["uuid"])
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to create new callback - embedded payload uuid unknown: {data['uuid'] if 'uuid' in data else None}",
level="warning", source="create_callback"))
return {"status": "error", "error": "payload not found by uuid"}
if "integrity_level" not in data:
data["integrity_level"] = 2 # default medium integrity level
if "os" not in data:
data["os"] = ""
if "domain" not in data:
data["domain"] = ""
if "architecture" not in data:
data["architecture"] = ""
if "external_ip" not in data:
if "x-forwarded-for" in request.headers:
data["external_ip"] = request.headers["x-forwarded-for"].split(",")[-1]
elif "X-Forwarded-For" in request.headers:
data["external_ip"] = request.headers["X-Forwarded-For"].split(",")[-1]
else:
data["external_ip"] = ""
if "extra_info" not in data:
data["extra_info"] = ""
if "sleep_info" not in data:
data["sleep_info"] = ""
if "process_name" not in data:
data["process_name"] = ""
try:
if payload.operation.complete:
await app.db_objects.create(
db_model.OperationEventLog,
operation=payload.operation,
level="warning",
message="Payload {} trying to checkin with data: {}".format(
payload.uuid, js.dumps(data)
),
source=str(uuid.uuid4())
)
return {"status": "error", "error": "Failed to create callback"}
else:
cal = await app.db_objects.create(
Callback,
user=data["user"],
host=data["host"].upper(),
pid=data["pid"],
ip=data["ip"],
description=payload.tag,
operator=payload.operator,
registered_payload=payload,
operation=payload.operation,
integrity_level=data["integrity_level"],
os=data["os"],
domain=data["domain"],
architecture=data["architecture"],
external_ip=data["external_ip"],
extra_info=data["extra_info"],
sleep_info=data["sleep_info"],
process_name=data["process_name"]
)
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=payload.operation,
message="New Callback ({}) {}@{} with pid {}".format(
cal.id, cal.user, cal.host, str(cal.pid)
),
source=str(uuid.uuid4())
)
await app.db_objects.get_or_create(
db_model.PayloadOnHost,
host=data["host"].upper(),
payload=payload,
operation=payload.operation,
)
if "crypto_type" in data:
cal.crypto_type = data["crypto_type"]
if "dec_key" in data:
cal.dec_key = data["dec_key"]
if "enc_key" in data:
cal.enc_key = data["enc_key"]
await app.db_objects.update(cal)
payload_commands = await app.db_objects.execute(
db_model.payloadcommand_query.where(PayloadCommand.payload == payload)
)
# now create a loaded command for each one since they are loaded by default
for p in payload_commands:
await app.db_objects.create(
LoadedCommands,
command=p.command,
version=p.version,
callback=cal,
operator=payload.operator,
)
# now create a callback2profile for each loaded c2 profile in the payload since it's there by default
pc2profiles = await app.db_objects.execute(
db_model.payloadc2profiles_query.where(db_model.PayloadC2Profiles.payload == payload)
)
for pc2p in pc2profiles:
if pc2p.c2_profile.is_p2p is False:
# add in an edge to itself with the associated egress edge
await app.db_objects.create(
db_model.CallbackGraphEdge,
source=cal,
destination=cal,
c2_profile=pc2p.c2_profile,
operation=cal.operation,
direction=1,
)
await app.db_objects.create(
db_model.CallbackC2Profiles, callback=cal, c2_profile=pc2p.c2_profile
)
# now also save off a copy of the profile parameters
instances = await app.db_objects.execute(
db_model.c2profileparametersinstance_query.where(
(
db_model.C2ProfileParametersInstance.payload
== cal.registered_payload
)
& (
db_model.C2ProfileParametersInstance.c2_profile
== pc2p.c2_profile
)
)
)
for i in instances:
await app.db_objects.create(
db_model.C2ProfileParametersInstance,
callback=cal,
c2_profile_parameters=i.c2_profile_parameters,
c2_profile=i.c2_profile,
value=i.value,
operation=cal.operation,
)
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to create new callback {str(e)}",
level="warning", source="create_callback2"))
return {"status": "error", "error": "Failed to create callback: " + str(e)}
status = {"status": "success"}
asyncio.create_task( log_to_siem(mythic_object=cal, mythic_source="callback_new") )
if cal.operation.webhook != "" and cal.registered_payload.callback_alert:
# if we have a webhook, send a message about the new callback
try:
if cal.integrity_level >= 3:
int_level = "HIGH"
elif cal.integrity_level == 2:
int_level = "MEDIUM"
else:
int_level = "LOW"
message = cal.operation.webhook_message.replace("{channel}", cal.operation.channel)
message = message.replace("{display_name}", cal.operation.display_name)
message = message.replace("{icon_emoji}", cal.operation.icon_emoji)
message = message.replace("{icon_url}", cal.operation.icon_url)
message = message.replace("{operation}", cal.operation.name)
message = message.replace("{callback}", str(cal.id))
message = message.replace("{ip}", str(cal.ip))
message = message.replace("{payload_type}", cal.registered_payload.payload_type.ptype)
message = message.replace("{description}", cal.description)
message = message.replace("{operator}", cal.operator.username)
message = message.replace("{integrity}", int_level)
asyncio.create_task(send_webhook_message(cal.operation.webhook, message, cal.operation))
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to create webhook message: {str(e)}",
level="warning", source="create_callback", operation=cal.operation))
for k in data:
if k not in [
"action",
"user",
"host",
"pid",
"ip",
"uuid",
"sleep_info",
"integrity_level",
"os",
"domain",
"architecture",
"external_ip",
"crypto_type",
"enc_key",
"dec_key",
"delegates",
"extra_info",
]:
status[k] = data[k]
return {**status, "id": cal.agent_callback_id, "action": "checkin"}
async def send_webhook_message(webhook, message, operation):
try:
message = js.loads(message)
async with aiohttp.ClientSession() as session:
async with session.post(webhook, json=message) as resp:
return await resp.text()
except Exception as e:
await send_all_operations_message(f"Failed to send webhook message: {str(e)}",
level="warning", source="new_callback_webhook", operation=operation)
async def load_commands_func(command_dict, callback, task):
try:
cmd = await app.db_objects.get(db_model.command_query, cmd=command_dict["cmd"],
payload_type=callback.registered_payload.payload_type)
if command_dict["action"] == "add":
try:
lc = await app.db_objects.get(db_model.loadedcommands_query, command=cmd, callback=callback)
lc.version = cmd.version
lc.operator = task.operator
await app.db_objects.update(lc)
except Exception as e:
await app.db_objects.create(db_model.LoadedCommands,
command=cmd,
version=cmd.version,
callback=callback,
operator=task.operator)
else:
lc = await app.db_objects.get(db_model.loadedcommands_query, callback=callback, command=cmd)
await app.db_objects.delete(lc)
return {"status": "success"}
except Exception as e:
print(e)
asyncio.create_task(send_all_operations_message(
message=f"Failed to update loaded command: {str(e)}",
level="warning", source="load_commands", operation=callback.operation))
return {"status": "error", "error": str(e)}
async def update_callback(data, UUID):
# { INPUT
# "action": "update_info",
# ... info to update, same as checkin data
# }
# { RESPONSE
# "action": "update_info",
# "status": "success",
# "error": "error message" (optional)
# }
cal = await app.db_objects.get(db_model.callback_query, agent_callback_id=UUID)
try:
if "user" in data:
cal.user = data["user"]
if "ip" in data:
cal.ip = data["ip"]
if "host" in data:
cal.host = data["host"].upper()
if "external_ip" in data:
cal.external_ip = data["external_ip"]
if "integrity_level" in data:
cal.integrity_level = data["integrity_level"]
if "domain" in data:
cal.domain = data["domain"]
if "extra_info" in data:
cal.extra_info = data["extra_info"]
if "os" in data:
cal.os = data["os"]
if "architecture" in data:
cal.architecture = data["architecture"]
if "pid" in data:
cal.pid = data["pid"]
if "sleep_info" in data:
cal.sleep_info = data["sleep_info"]
if "description" in data:
cal.description = data["description"]
if "process_name" in data:
cal.process_name = data["process_name"]
await app.db_objects.update(cal)
return {"action": "update_info", "status": "success"}
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to update callback information {str(e)}",
level="warning", source="update_callback", operation=cal.operation))
return {"action": "update_info", "status": "error", "error": str(e)}
def cost_func(u, v, edge, prev_edge):
return 1
# https://pypi.org/project/Dijkstar/
async def get_graph(operation: db_model.Operation, directed: bool = True):
try:
available_edges = await app.db_objects.execute(
db_model.callbackgraphedge_query.where(
(db_model.CallbackGraphEdge.operation == operation)
& (db_model.CallbackGraphEdge.end_timestamp == None)
)
)
temp = Graph(undirected=not directed)
# dijkstra is directed, so if we have a bidirectional connection (type 3) account for that as well
for e in available_edges:
if e.source == e.destination:
temp.add_edge(e.source, e.c2_profile, e)
elif e.direction == 1:
temp.add_edge(e.source, e.destination, e)
elif e.direction == 2:
temp.add_edge(e.destination, e.source, e)
elif e.direction == 3:
temp.add_edge(e.source, e.destination, e)
temp.add_edge(e.destination, e.source, e)
profiles = await app.db_objects.execute(
db_model.c2profile_query.where(db_model.C2Profile.is_p2p == False)
)
for p in profiles:
temp.add_edge(p, "Mythic", 1)
return temp
except Exception as e:
asyncio.create_task(send_all_operations_message(message=f"Failed to create graph:\n{str(e)}", level="warning", operation=operation))
return Graph()
async def get_routable_messages(requester, request):
# are there any messages sitting in the database in the "submitted" stage that have routes from the requester
# 1. get all CallbackGraphEdge entries that have an end_timestamp of Null (they're still active)
# 2. feed into dijkstar and do shortest path
# 3. for each element in the shortest path, see if there's any tasking stored
# 4. if there's tasking, wrap it up in a message:
# content is the same of that of a "get_tasking" reply with a a -1 request
try:
delegates = []
operation = requester.operation
graph = await get_graph(operation)
if graph.edge_count == 0:
return None # graph for this operation has no edges
submitted_tasks = await app.db_objects.execute(
db_model.task_query.where(
(db_model.Task.status == "submitted")
& (db_model.Callback.operation == operation)
)
)
temp_callback_tasks = {}
for t in submitted_tasks:
# print(t.to_json())
try:
path = find_path(graph, requester, t.callback, cost_func=cost_func)
except NoPathError:
# print("No path from {} to {}".format(requester.id, t.callback.id))
continue
if len(path.nodes) > 1 and path.nodes[-1] != requester:
# this means we have some sort of path longer than 1
# make a tasking message for this
# print(t.to_json())
if path.nodes[-1].agent_callback_id in temp_callback_tasks:
temp_callback_tasks[path.nodes[-1].agent_callback_id]["tasks"].append(t)
else:
temp_callback_tasks[path.nodes[-1].agent_callback_id] = {
"tasks": [t],
"path": path.nodes[::-1],
"edges": path.edges[::-1]
}
# now actually construct the tasks
for k, v in temp_callback_tasks.items():
#print(k)
#print(v)
tasks = []
for t in v["tasks"]:
t.status = "processing"
t.status_timestamp_processing = datetime.utcnow()
t.timestamp = t.status_timestamp_processing
t.callback.last_checkin = datetime.utcnow()
await app.db_objects.update(t.callback)
await app.db_objects.update(t)
tasks.append(
{
"command": t.command.cmd,
"parameters": t.params,
"id": t.agent_task_id,
"timestamp": t.timestamp.timestamp(),
}
)
# now that we have all the tasks we're going to send, make the message
message = {"action": "get_tasking", "tasks": tasks}
# now wrap this message up like it's going to be sent out, first level is just normal
#print(v["edges"])
#print(v["path"])
enc_key = await get_encryption_data(v["path"][0].agent_callback_id, v["edges"][0].c2_profile.name)
logger.info(
"Got encryption data for linked callback, about to send off {} to create_final_message".format(
str(message)))
final_msg = await create_final_message_from_data_and_profile_info(message,
enc_key,
v["path"][0].agent_callback_id,
request)
if final_msg is None:
message = {}
else:
if not isinstance(final_msg, str):
final_msg = final_msg.decode()
message = {
"message": final_msg,
"uuid": v["path"][0].agent_callback_id
}
# we don't need to do this wrapping for the last in the list since that's the egress node asking for tasking
for cal in v["edges"][1:]:
message = {"action": "get_tasking", "tasks": [], "delegates": [message]}
logger.info("destination agent: " + cal.destination.agent_callback_id)
logger.info("source agent: " + cal.source.agent_callback_id)
enc_key = await get_encryption_data(cal.destination.agent_callback_id, cal.c2_profile.name)
logger.info(
"Got encryption data for linked callback in for loop, about to send off {} to create_final_message".format(
str(message)))
final_msg = await create_final_message_from_data_and_profile_info(message,
enc_key,
cal.destination.agent_callback_id,
request)
if final_msg is None:
message = {}
else:
if not isinstance(final_msg, str):
final_msg = final_msg.decode()
logger.info("setting final target uuid of message: " + cal.destination.agent_callback_id)
message = {
"message": final_msg,
"uuid": cal.destination.agent_callback_id
}
#print(message)
delegates.append(message)
# print(delegates)
if len(delegates) == 0:
return None
else:
return delegates
except Exception as e:
asyncio.create_task(send_all_operations_message(
message=f"Failed to get delegate messages {str(sys.exc_info()[-1].tb_lineno) +str(e)}",
level="warning", source="get_delegate_messages", operation=requester.operation))
return None
@mythic.route(
mythic.config["API_BASE"] + "/callbacks/edges/<eid:int>", methods=["DELETE"]
)
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def remove_graph_edge(request, eid, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator" or user["current_operation"] == "":
return json(
{"status": "error", "error": "Spectators cannot remove graph edges"}
)
try:
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
edge = await app.db_objects.get(db_model.callbackgraphedge_query, id=eid, operation=operation)
edge.end_timestamp = datetime.utcnow()
await app.db_objects.update(edge)
return json({"status": "success"})
except Exception as e:
return json({"status": "error", "error": "Failed to update: " + str(e)})
cached_socks = {}
async def start_socks(port: int, callback: Callback, task: Task):
print("starting socks")
try:
socks_instance = await app.db_objects.get(db_model.callback_query, port=port)
return {"status": "error", "error": "socks already started on that port"}
except:
# we're not using this port, so we can use it
if app.redis_pool.exists(f"SOCKS_RUNNING:{callback.id}"):
app.redis_pool.delete(f"SOCKS_RUNNING:{callback.id}")
kill_socks_processes(callback.id)
pass
server_address = ("0.0.0.0", port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(server_address)
except Exception as e:
return {"status": "error", "error": "failed to bind to socket: " + str(e)}
try:
app.redis_pool.set(f"SOCKS_RUNNING:{callback.id}", "True")
callback.port = port
callback.socks_task = task
await app.db_objects.update(callback)
cached_socks[callback.id] = {
"socket": sock,
"connections": {},
"thread": threading.Thread(
target=thread_read_socks,
kwargs={"port": port, "callback_id": callback.id, "sock": sock},
),
}
cached_socks[callback.id]["thread"].start()
await app.db_objects.create(
db_model.OperationEventLog,
operator=task.operator,
operation=callback.operation,
message="Started socks proxy on port {} in callback {}".format(
str(port), str(callback.id)
),
)
print("started socks")
except Exception as e:
return {"status": "error", "error": str(e)}
return {"status": "success"}
def kill_socks_processes(callback_id: int):
try:
cached_socks[callback_id]["thread"].exit()
except:
pass
try:
for key, con in cached_socks[callback_id]["connections"].items():
try:
con["connection"].shutdown(socket.SHUT_RDWR)
con["connection"].close()
except Exception:
print("failed to close a connection from proxychains")
except Exception as e:
logger.warning("exception in looping through connections in kill_socks_processes: " + str(e))
try:
cached_socks[callback_id]["socket"].shutdown(socket.SHUT_RDWR)
cached_socks[callback_id]["socket"].close()
except Exception as e:
logger.warning("exception trying to kill socket in kill_socks_processes: " + str(e))
try:
del cached_socks[callback_id]
except:
pass
try:
app.redis_pool.delete(f"SOCKS:{callback_id}:ToAgent")
except:
pass
try:
app.redis_pool.delete(f"SOCKS:{callback_id}:FromAgent")
except:
pass
async def stop_socks(callback: Callback, operator):
app.redis_pool.delete(f"SOCKS_RUNNING:{callback.id}")
kill_socks_processes(callback.id)
try:
port = callback.port
callback.port = None
await app.db_objects.update(callback)
await app.db_objects.create(
db_model.OperationEventLog,
operator=operator,
operation=callback.operation,
message="Stopped socks proxy on port {} in callback {}".format(
str(port), str(callback.id)
),
)
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(0)
s.connect( ("127.0.0.1", port))
s.shutdown(socket.SHUT_RDWR)
s.close()
except Exception as s:
logger.warning("Failed to connect to current socket to issue a kill: " + str(s))
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": "failed to find socks instance: " + str(e)}
def thread_send_socks_data(callback_id: int):
while True:
try:
if not app.redis_pool.exists(f"SOCKS_RUNNING:{callback_id}"):
kill_socks_processes(callback_id)
return
sub_class = app.redis_pool.pubsub(ignore_subscribe_messages=True)
sub_class.subscribe(f"SOCKS:{callback_id}:FromAgent")
for message in sub_class.listen():
if not app.redis_pool.exists(f"SOCKS_RUNNING:{callback_id}"):
kill_socks_processes(callback_id)
return
print("******* SENDING THE FOLLOWING TO PROXYCHAINS *******")
print(message)
if message["type"] == "message":
data = js.loads(message["data"])
for d in data:
print("processing the following to go to proxychains")
print(d)
if callback_id in cached_socks:
if d["server_id"] in cached_socks[callback_id]["connections"]:
conn = cached_socks[callback_id]["connections"][d["server_id"]]
if d["exit"]:
print("agent tasked mythic to close connection")
cached_socks[callback_id]["connections"].pop(d["server_id"], None)
try:
conn["connection"].shutdown(socket.SHUT_RDWR)
conn["connection"].close()
except Exception as d:
print("error trying to close connection that agent told me to close: " + str(d))
pass
else:
conn["connection"].sendall(base64.b64decode(d["data"]))
else:
# we don't have d["server_id"] tracked as an active connection, so unless they said to kill it, tell them to kill it
#print("got message for something we aren't tracking")
if not d["exit"]:
print("telling agent to kill connection")
app.redis_pool.rpush(f"SOCKS:{callback_id}:ToAgent", js.dumps({
"exit": True,
"server_id": d["server_id"],
"data": ""
}))
else:
# we no longer have that callback_id in our cache, so we tried to exit, so exit this
return
except Exception as e:
print("******** EXCEPTION IN SEND SOCKS DATA *****\n{}".format(str(e)))
#print(cached_socks[callback.id]["connections"])
async def get_socks_data(callback: Callback):
# called during a get_tasking function
data = []
while True:
try:
d = app.redis_pool.lpop(f"SOCKS:{callback.id}:ToAgent")
if d is None:
break
print("agent picking up data from callback queue")
print(d)
data.append(js.loads(d))
#data.append(cached_socks[callback.id]["queue"].popleft())
except Exception as e:
print("exception in get_socks_data for an agent: " + str(e))
break
if len(data) > 0:
print("******* SENDING THE FOLLOWING TO THE AGENT ******")
print(data)
return data
# accept connections from proxychains clients
def thread_read_socks(port: int, callback_id: int, sock: socket) -> None:
# print(port)
# print(callback_id)
sock.listen(1)
id = 1
try:
#print("waiting to accept connections")
# spin off a thread to handle messages from agent to connections
toAgentThread = threading.Thread(target=thread_send_socks_data, kwargs={"callback_id": callback_id})
toAgentThread.start()
while callback_id in cached_socks:
connection, client_address = sock.accept()
if not app.redis_pool.exists(f"SOCKS_RUNNING:{callback_id}"):
kill_socks_processes(callback_id)
return
#print("got new connection for " + str(id))
conn_sock = {
"connection": connection,
"thread_read": threading.Thread(
target=thread_get_socks_data_from_connection,
kwargs={"port": port, "connection": connection, "callback_id": callback_id, "connection_id": id}
),
}
cached_socks[callback_id]["connections"][id] = conn_sock
cached_socks[callback_id]["connections"][id]["thread_read"].start()
id = id + 1
except Exception:
try:
kill_socks_processes(callback_id)
except Exception as e:
pass
#print("exception in accepting new socket connections!!!!!")
def thread_get_socks_data_from_connection(port: int, connection: socket, callback_id: int, connection_id: int):
try:
#print("reading 4 bytes and sending 05 00")
data_raw = connection.recv(4)
#print(str(data))
connection.sendall(b'\x05\x00')
#connection.settimeout(2)
#print("wait to read data from connection for: " + str(connection_id))
while callback_id in cached_socks and connection_id in cached_socks[callback_id]["connections"]:
#data = None
#print("about to call connection.recv on connection " + str(connection_id))
data_raw = connection.recv(8192)
if not data_raw:
# this means our connection just closed, tell the agent to close their end
#print("data_raw is none for connection " + str(connection_id))
app.redis_pool.rpush(f"SOCKS:{callback_id}:ToAgent", js.dumps({
"exit": True,
"server_id": connection_id,
"data": ""
}))
cached_socks[callback_id]["connections"].pop(connection_id, None)
try:
connection.shutdown(socket.SHUT_RDWR)
connection.close()
except Exception as d:
#print("error trying to close connection that agent told me to close: " + str(d))
pass
return
data = base64.b64encode(data_raw).decode()
#print("++++++appending data to ToAgent for " + str(connection_id))
#print(data)
app.redis_pool.rpush(f"SOCKS:{callback_id}:ToAgent", js.dumps({
"exit": False,
"server_id": connection_id,
"data": data
}))
#cached_socks[callback_id]["queue"].append({
# "exit": False,
# "server_id": connection_id,
# "data": data
#})
#print("wait to read more data from connection for: " + str(connection_id))
#print("no longer in while loop for connection: " + str(connection_id))
#print(cached_socks[callback_id]["connections"])
except Exception:
#print("failed to read from proxychains client, sending exit to agent")
if callback_id in cached_socks and connection_id in cached_socks[callback_id]["connections"]:
#print("adding exit message to redis")
app.redis_pool.rpush(f"SOCKS:{callback_id}:ToAgent", js.dumps({
"exit": True,
"server_id": connection_id,
"data": ""
}))
#cached_socks[callback_id]["queue"].append({
# "exit": True,
# "server_id": connection_id,
# "data": ""
#})
@mythic.route(mythic.config["API_BASE"] + "/callbacks/<cid:int>", methods=["GET"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def get_one_callback(request, cid, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
try:
if user["current_operation"] == "":
return json({"status": "error", "error": "must be part of an operation"})
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
callback = await app.db_objects.prefetch(db_model.callback_query.where(
(db_model.Callback.id == cid) & (db_model.Callback.operation == operation)
), db_model.callbacktoken_query)
callback = list(callback)[0]
return_json = callback.to_json()
loaded_commands = await app.db_objects.execute(
db_model.loadedcommands_query.where(LoadedCommands.callback == callback)
)
return_json["loaded_commands"] = [
{
"command": lc.command.cmd,
"version": lc.version,
"mythic_version": lc.command.version,
}
for lc in loaded_commands
]
script_commands = await app.db_objects.execute(db_model.command_query.where(
(db_model.Command.payload_type == callback.registered_payload.payload_type) &
(db_model.Command.script_only == True)
))
for c in script_commands:
return_json["loaded_commands"].append(
{"command": c.cmd,
"version": c.version,
"mythic_version": c.version}
)
callbackc2profiles = await app.db_objects.execute(
db_model.callbackc2profiles_query.where(db_model.CallbackC2Profiles.callback == callback)
)
c2_profiles_info = {}
for c2p in callbackc2profiles:
c2_profile_params = await app.db_objects.execute(
db_model.c2profileparametersinstance_query.where(
(db_model.C2ProfileParametersInstance.callback == callback)
& (
db_model.C2ProfileParametersInstance.c2_profile
== c2p.c2_profile
)
)
)
params = [p.to_json() for p in c2_profile_params]
c2_profiles_info[c2p.c2_profile.name] = params
return_json["c2_profiles"] = c2_profiles_info
build_parameters = await app.db_objects.execute(
db_model.buildparameterinstance_query.where(
db_model.BuildParameterInstance.payload == callback.registered_payload
)
)
build_params = [t.to_json() for t in build_parameters]
return_json["build_parameters"] = build_params
return_json["payload_uuid"] = callback.registered_payload.uuid
return_json["payload_name"] = bytes(callback.registered_payload.file.filename).decode("utf-8")
return_json["status"] = "success"
paths = await path_to_callback(callback, "Mythic")
return_json["path"] = [str(p) if p == "Mythic" else js.dumps(p.to_json()) for p in paths]
return json(return_json)
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
return json(
{"status": "error", "error": "failed to get callback: " + str(e)}, 200
)
@mythic.route(mythic.config["API_BASE"] + "/update_callback_webhook", methods=["POST"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def update_callback_webhook(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
# some commands can optionally upload files or indicate files for use
# if they are uploaded here, process them first and substitute the values with corresponding file_id numbers
if user["current_operation"] == "":
return json(
{"status": "error", "error": "Must be part of a current operation first"}
)
if user["view_mode"] == "spectator":
return json({"status": "error", "error": "Spectators cannot issue tasking"})
try:
operator = await app.db_objects.get(db_model.operator_query, username=user["username"])
except Exception as e:
return json(
{
"status": "error",
"error": "failed to get the current user's info from the database",
}
)
try:
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
except Exception as e:
return json({"status": "error", "error": "failed to get the current operation"})
try:
data = request.json["input"]["input"]
print(data)
cb = await app.db_objects.get(db_model.callback_query, id=data["callback_id"], operation=operation)
return json(await update_callback_active_lock(user, request, cb, data))
except Exception as e:
return json({"status": "error", "error": "failed to get callback: " + str(e)})
async def update_callback_active_lock(user, request, cal, data):
if "description" in data:
if data["description"] == "reset":
# set the description back to what it was from the payload
cal.description = cal.registered_payload.tag
else:
cal.description = data["description"]
if "active" in data:
if data["active"]:
if not cal.active:
c2profiles = await app.db_objects.execute(
db_model.callbackc2profiles_query.where(db_model.CallbackC2Profiles.callback == cal)
)
for c2 in c2profiles:
if not c2.c2_profile.is_p2p:
try:
edge = await app.db_objects.get(
db_model.CallbackGraphEdge,
source=cal,
destination=cal,
c2_profile=c2.c2_profile,
direction=1,
end_timestamp=None,
operation=cal.operation,
)
except Exception as d:
print(d)
edge = await app.db_objects.create(
db_model.CallbackGraphEdge,
source=cal,
destination=cal,
c2_profile=c2.c2_profile,
direction=1,
end_timestamp=None,
operation=cal.operation,
)
cal.active = True
else:
if cal.active:
try:
edges = await app.db_objects.execute(
db_model.callbackgraphedge_query.where(
(db_model.CallbackGraphEdge.source == cal)
& (db_model.CallbackGraphEdge.destination == cal)
& (db_model.CallbackGraphEdge.end_timestamp == None)
& (db_model.CallbackGraphEdge.operation == cal.operation)
)
)
for edge in edges:
if not edge.c2_profile.is_p2p:
edge.end_timestamp = datetime.utcnow()
await app.db_objects.update(edge)
except Exception as d:
logger.warning(
"callback_api.py - error trying to add end-timestamps to edges when going inactive"
)
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(d))
cal.active = False
if "locked" in data:
if cal.locked and not data["locked"]:
# currently locked and trying to unlock, must be admin, admin of that operation, or the user that did it
if (
user["admin"]
or cal.operation.name in user["admin_operations"]
or user["username"] == cal.locked_operator.username
):
cal.locked = False
cal.locked_operator = None
else:
await app.db_objects.update(cal)
return json(
{"status": "error", "error": "Not authorized to unlock"}
)
elif not cal.locked and data["locked"]:
# currently unlocked and wanting to lock it
if (
user["admin"]
or cal.operation.name in user["operations"]
or cal.operation.name in user["admin_operations"]
):
cal.locked = True
operator = await app.db_objects.get(db_model.operator_query, username=user["username"])
cal.locked_operator = operator
else:
await app.db_objects.update(cal)
return json({"status": "error", "error": "Not authorized to lock"})
await app.db_objects.update(cal)
return {"status": "success"}
@mythic.route(mythic.config["API_BASE"] + "/callbacks/<cid:int>", methods=["PUT"])
@inject_user()
@scoped(["auth:user", "auth:apitoken_user", "auth:apitoken_c2"], False)
async def update_callback_web(request, cid, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator" or user["current_operation"] == "":
return json({"status": "error", "error": "Spectators cannot update callbacks"})
data = request.json
try:
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
cal = await app.db_objects.prefetch(db_model.callback_query.where(
(db_model.Callback.id == cid) & (db_model.Callback.operation == operation)
), db_model.callbacktoken_query)
cal = list(cal)[0]
updated_cal = cal.to_json()
status = await update_callback_active_lock(user, request, cal, data)
if status["status"] == "success":
return json({**status, **updated_cal})
else:
return json(status)
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
return json(
{"status": "error", "error": "failed to update callback: " + str(e)}
)
@mythic.route(mythic.config["API_BASE"] + "/callbacks/<cid:int>", methods=["DELETE"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def remove_callback(request, cid, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator" or user["current_operation"] == "":
return json(
{"status": "error", "error": "Spectators cannot make callbacks inactive"}
)
try:
cal = await app.db_objects.prefetch(db_model.callback_query.where(db_model.Callback.id == cid),
db_model.callbacktoken_query)
cal = list(cal)[0]
if user["admin"] or cal.operation.name in user["operations"]:
cal.active = False
await app.db_objects.update(cal)
success = {"status": "success"}
deleted_cal = cal.to_json()
return json({**success, **deleted_cal})
else:
return json(
{
"status": "error",
"error": "must be an admin or part of that operation to mark it as no longer active",
}
)
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
return json(
{"status": "error", "error": "failed to delete callback: " + str(e)}
)
@mythic.route(
mythic.config["API_BASE"] + "/callbacks/<cid:int>/all_tasking", methods=["GET"]
)
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def callbacks_get_all_tasking(request, user, cid):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
# Get all of the tasks and responses so far for the specified agent
try:
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
callback = await app.db_objects.prefetch(db_model.callback_query.where(
(db_model.Callback.id == cid) & (db_model.Callback.operation == operation)),
db_model.CallbackToken.select()
)
callback = list(callback)[0]
cb_json = callback.to_json()
cb_json["tasks"] = []
cb_json["payload_os"] = callback.registered_payload.os
tasks = await app.db_objects.execute(
db_model.task_query.where(Task.callback == callback).order_by(Task.id)
)
for t in tasks:
cb_json["tasks"].append({**t.to_json()})
return json({"status": "success", **cb_json})
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
return json({"status": "error", "error": str(e)})
@mythic.route(
mythic.config["API_BASE"] + "/callbacks/<page:int>/<size:int>", methods=["GET"]
)
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def get_pageinate_callbacks(request, user, page, size):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
# get all of the artifact tasks for the current operation
if page <= 0 or size <= 0:
return json({"status": "error", "error": "page or size must be greater than 0"})
try:
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
except Exception as e:
return json({"status": "error", "error": "failed to get current operation"})
callbacks_query = db_model.callback_query.where(Callback.operation == operation)
count = await app.db_objects.count(callbacks_query)
if page * size > count:
page = ceil(count / size)
if page == 0:
page = 1
cb = await app.db_objects.prefetch(
callbacks_query.order_by(-Callback.id).paginate(page, size), db_model.CallbackToken.select()
)
return json(
{
"status": "success",
"callbacks": [c.to_json() for c in cb],
"total_count": count,
"page": page,
"size": size,
}
)
# Get a single response
@mythic.route(mythic.config["API_BASE"] + "/callbacks/search", methods=["POST"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def search_callbacks_with_pageinate(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
try:
data = request.json
if "search" not in data:
return json({"status": "error", "error": "must supply a search term"})
operation = await app.db_objects.get(db_model.operation_query, name=user["current_operation"])
except Exception as e:
return json({"status": "error", "error": "Cannot find operation"})
try:
count = await app.db_objects.count(
db_model.callback_query.where(
(Callback.operation == operation)
& (Callback.host.regexp(data["search"]))
)
)
if "page" not in data:
cb = await app.db_objects.execute(
db_model.callback_query.where(
(Callback.operation == operation)
& (Callback.host.regexp(data["search"]))
).order_by(-Callback.id)
)
data["page"] = 1
data["size"] = count
else:
if (
"page" not in data
or "size" not in data
or int(data["size"]) <= 0
or int(data["page"]) <= 0
):
return json(
{
"status": "error",
"error": "size and page must be supplied and be greater than 0",
}
)
data["size"] = int(data["size"])
data["page"] = int(data["page"])
if data["page"] * data["size"] > count:
data["page"] = ceil(count / data["size"])
if data["page"] == 0:
data["page"] = 1
cb = await app.db_objects.execute(
db_model.callback_query.where(
(Callback.operation == operation)
& (Callback.host.regexp(data["search"]))
)
.order_by(-Callback.id)
.paginate(data["page"], data["size"])
)
return json(
{
"status": "success",
"callbacks": [c.to_json() for c in cb],
"total_count": count,
"page": data["page"],
"size": data["size"],
}
)
except Exception as e:
logger.warning("callback_api.py - " + str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
return json({"status": "error", "error": str(e)})
async def add_p2p_route(agent_message, callback, task):
# { INPUT
# "edges": [
# {
# "source": "uuid of callback",
# "destination": "uuid of adjoining callback",
# "direction": 1 or 2 or 3,
# "metadata": "{ optional metadata json string }",
# "action": "add" or "remove"
# "c2_profile": "name of the c2 profile"
# }
# ]
# }
# { RESPONSE
# "status": "success" or "error"
# }
# dijkstra is directed, so if we have a bidirectional connection (type 3) account for that as well
for e in agent_message:
if task is None and "task_id" in e and e["task_id"] != "" and e["task_id"] is not None:
try:
this_task = await app.db_objects.get(db_model.task_query, agent_task_id=e["task_id"])
except Exception as e:
this_task = None
asyncio.create_task(send_all_operations_message(message="Failed to find specified task for 'edges' message",
level="warning",
source="generic_edge_processing"))
else:
this_task = task
if e["action"] == "add":
try:
source = await app.db_objects.get(db_model.callback_query, agent_callback_id=e["source"])
destination = await app.db_objects.get(
db_model.callback_query, agent_callback_id=e["destination"]
)
if callback is None:
callback = source
if (
"c2_profile" in e
and e["c2_profile"] is not None
and e["c2_profile"] != ""
):
profile = await app.db_objects.get(db_model.c2profile_query, name=e["c2_profile"])
else:
await app.db_objects.create(db_model.OperationEventLog, operation=callback.operation,
level="warning", message=f"Failed to add route between {source.id} and {destination.id}. No c2_profile specified")
return
# there can only be one source-destination-direction-metadata-c2_profile combination
try:
edge = await app.db_objects.get(
db_model.CallbackGraphEdge,
source=source,
destination=destination,
direction=e["direction"],
metadata=e["metadata"],
operation=callback.operation,
c2_profile=profile,
end_timestamp=None,
)
return
except Exception as error:
edge = await app.db_objects.create(
db_model.CallbackGraphEdge,
source=source,
destination=destination,
direction=e["direction"],
metadata=e["metadata"],
operation=callback.operation,
c2_profile=profile,
task_start=this_task,
)
except Exception as d:
await app.db_objects.create(db_model.OperationEventLog, operation=callback.operation,
level="warning",
message=f"Failed to add p2p route. {str(sys.exc_info()[-1].tb_lineno) + ' ' + str(d)}")
return
if e["action"] == "remove":
try:
# find the edge its talking about
# print(e)
source = await app.db_objects.get(db_model.callback_query, agent_callback_id=e["source"])
destination = await app.db_objects.get(
db_model.callback_query, agent_callback_id=e["destination"]
)
if callback is None:
callback = source
if (
"c2_profile" in e
and e["c2_profile"] is not None
and e["c2_profile"] != ""
):
profile = await app.db_objects.get(db_model.c2profile_query, name=e["c2_profile"])
else:
await app.db_objects.create(db_model.OperationEventLog, operation=callback.operation,
level="warning",
message=f"Failed to remove route between {source.id} and {destination.id}. c2_profile not specified")
return
edge = await app.db_objects.get(
db_model.CallbackGraphEdge,
source=source,
destination=destination,
direction=e["direction"],
metadata=e["metadata"],
operation=callback.operation,
c2_profile=profile,
end_timestamp=None,
)
edge.end_timestamp = datetime.utcnow()
edge.task_end = this_task
await app.db_objects.update(edge)
except Exception as d:
await app.db_objects.create(db_model.OperationEventLog, operation=callback.operation,
level="warning",
message=f"Failed to remove route. {str(sys.exc_info()[-1].tb_lineno) + ' ' + str(d)}")
return
return
async def path_to_callback(callback, destination):
try:
graph = await get_graph(callback.operation, directed=False)
if graph.edge_count == 0:
return [] # graph for this operation has no edges
try:
path = find_path(
graph, callback, destination, cost_func=cost_func
)
except NoPathError:
return []
return path.nodes
except Exception as e:
asyncio.create_task(send_all_operations_message(message=f"Error in getting path to callback:\n{str(sys.exc_info()[-1].tb_lineno) + ' ' + str(e)}",
level="warning", operation=callback.operation))
return []
|
CourierService.py
|
'''
Created on Jan 17, 2015
@author: owwlo
'''
from PyQt5 import QtGui, QtCore, QtQml, QtQuick
from PyQt5.QtCore import QObject, QUrl, Qt, QVariant, QMetaObject, Q_ARG
import threading
import websocket
import json
import logging
from time import sleep
import coloredlogs
WS_URL = "ws://localhost:8888/computer"
RECONNECT_INTERVAL = 5
logger = logging.getLogger("CourierApp")
coloredlogs.install(level = logging.DEBUG, show_hostname = False, show_timestamps = False)
class CourierService(threading.Thread, QObject):
class WebSocketHandler():
def __init__(self, service):
self.__service = service
def onMessage(self, ws, message):
self.__service.onMessage(message)
def onError(self, ws, error):
logger.debug("onError " + str(error))
def onClose(self, ws):
logger.debug("onCLose")
self.__service.ws = None
def onOpen(self, ws):
logger.debug("onOpen")
self.__service.ws = ws
self.__service.token = None
fetchThread = threading.Thread(target=self.__service.fetchToken)
fetchThread.start()
# fetchThread.join()
onTokenFetched = QtCore.pyqtSignal([str])
onNewMessage = QtCore.pyqtSignal([dict])
def __init__(self, app):
threading.Thread.__init__(self)
QObject.__init__(self, app)
self.__app = app
self.handler = self.WebSocketHandler(self)
self.token = None
# Initialize callback lists for
self.__callbacksOnNewMessageFromDevice = []
self.__callbacksOnTokenFetched = []
self.__callbacksOnDeviceConnected = []
def run(self):
while(True):
ws = websocket.WebSocketApp(WS_URL,
on_message=self.handler.onMessage,
on_error=self.handler.onError,
on_close=self.handler.onClose,
on_open=self.handler.onOpen)
ws.run_forever()
logger.error("Lost connection, will try again in %d seconds." % RECONNECT_INTERVAL)
sleep(RECONNECT_INTERVAL)
def fetchToken(self):
MAX_RETRY_CNT = 5
cnt = MAX_RETRY_CNT
while cnt > 0 and self.token == None:
if cnt != MAX_RETRY_CNT:
logger.warn(
"Connect failed, reconnecting... trying count remains: %d" % cnt)
self.sendHash(self.getTokenRequestPackage())
sleep(5)
cnt -= 1
if self.token == None:
logger.error("Cannot connect to server")
# else:
# self.on
def getTokenRequestPackage(self):
return {"type": "operation", "command": "request_token"}
def getReplyRequestPackage(self, cId, replyText):
return {"type": "reply", "cId": str(cId), "content": replyText}
def sendReply(self, cId, replyText):
pkg = self.getReplyRequestPackage(cId, replyText)
self.sendHash(pkg)
def parseMessage(self, message):
parsed = None
try:
parsed = json.loads(message)
except Exception as e:
logger.warn(str(e))
return None
return parsed
def sendHash(self, h):
if self.token:
h["token"] = self.token
j = json.dumps(h)
self.send(j)
def send(self, message):
if self.ws != None:
self.ws.send(message)
else:
logger.error("Socket Failed.")
def onMessage(self, message):
logger.debug("Raw Message from Server: " + message)
msg = self.parseMessage(message)
if msg == None:
return
mtype = msg["type"]
if mtype == "new_msg":
self.onNewMessageFromDevice(msg)
elif mtype == "token_response":
self.onTokenResponse(msg)
elif mtype == "info_paired":
self.onDeviceConnected(msg)
def onTokenResponse(self, message):
logger.debug("Get token from server: " + message["token"])
self.token = message["token"]
for fn in self.__callbacksOnTokenFetched:
fn(self.token)
self.onTokenFetched.emit(self.token)
def onNewMessageFromDevice(self, message):
for fn in self.__callbacksOnNewMessageFromDevice:
fn(message)
self.onNewMessage.emit(message)
def onDeviceConnected(self, message):
for fn in self.__callbacksOnDeviceConnected:
fn(message)
def addOnNewMessageFromDevice(self, callback):
self.__callbacksOnNewMessageFromDevice.append(callback)
def removeOnNewMessageFromDevice(self, callback):
self.__callbacksOnNewMessageFromDevice.remove(callback)
def addOnTokenFetched(self, callback):
self.__callbacksOnTokenFetched.append(callback)
def removeOnTokenFetched(self, callback):
self.__callbacksOnTokenFetched.remove(callback)
def addOnDeviceConnected(self, callback):
self.__callbacksOnDeviceConnected.append(callback)
def removeOnDeviceConnected(self, callback):
self.__callbacksOnDeviceConnected.remove(callback)
|
test_url_cost.py
|
import requests
import json
import csv
import time
import aiohttp
import asyncio
import threading
import queue
import uuid
import urllib.request
test_num = 100
url = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baiduadv&wd=05782212292&rqlang=cn&rsv_enter=1&rsv_sug3=2'
query_header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "www.baidu.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
}
query_cookie = {
"BAIDUID": "",
"BD_CK_SAM": "1",
"PSTM": '',
"PSINO": "1",
}
def test():
resp = requests.get(url, headers=query_header, cookies=query_cookie)
print(resp.status_code)
print(resp.text)
def do_test():
st = time.time()
for i in range(test_num):
test()
et = time.time()
inv = et - st
print(inv, test_num/float(inv))
def _gen_cookie_guid():
u_str = uuid.uuid4()
u_str = str(u_str)
return u_str.replace('-', '')
def thread_test(q):
while True:
url = q.get()
if not url:
break
query_cookie['BAIDUID'] = "%s:FG=1" % _gen_cookie_guid().upper()
query_cookie['PSTM'] = str(int(time.time()))
resp = requests.get(url, headers=query_header, cookies=query_cookie)
print(resp.status_code)
def do_thread_test():
thread_pool_size = 8
q_list = [queue.Queue() for i in range(thread_pool_size)]
t_list = [threading.Thread(target=thread_test, args=(q,)) for q in q_list]
for t in t_list:
t.start()
for i in range(test_num):
q_list[i % thread_pool_size].put(url)
for t in t_list:
if not t.is_alive:
index = t_list.index(t)
tt = threading.Thread(target=test, args=(q_list[index],))
tt.start()
t_list[index] = tt
for q in q_list:
q.put(None)
for t in t_list:
t.join()
print('test done')
def do_frame(fun):
st = time.time()
fun()
et = time.time()
cost = et -st
print(f"cost:{cost}, ratio:{test_num/cost}")
def do_async_test():
async def get(url):
query_cookie['BAIDUID'] = "%s:FG=1" % _gen_cookie_guid().upper()
query_cookie['PSTM'] = str(int(time.time()))
async with aiohttp.ClientSession(cookies=query_cookie) as session:
async with session.get(url, headers= query_header) as resp:
k = await resp.text()
return k
st = time.time()
loop = asyncio.get_event_loop()
tasks = []
for i in range(test_num):
tasks.append(get(url))
done = loop.run_until_complete(asyncio.gather(*tasks))
for i in done:
print(i)
loop.close()
et = time.time()
cost = et -st
print(f"cost:{cost}, ratio:{test_num/cost}")
def do_single():
for i in range(test_num):
resp = urllib.request.urlopen(url)
#print(resp.status_code)
do_frame(do_single)
|
im2rec.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
try:
import multiprocessing
except ImportError:
multiprocessing = None
def list_image(root, recursive, exts):
"""Traverses the root of directory that contains images and
generates image list iterator.
Parameters
----------
root: string
recursive: bool
exts: string
Returns
-------
image iterator that contains all the image under the specified path
"""
i = 0
if recursive:
cat = {}
for path, dirs, files in os.walk(root, followlinks=True):
dirs.sort()
files.sort()
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
yield (i, os.path.relpath(fpath, root), cat[path])
i += 1
for k, v in sorted(cat.items(), key=lambda x: x[1]):
print(os.path.relpath(k, root), v)
else:
for fname in sorted(os.listdir(root)):
fpath = os.path.join(root, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
yield (i, os.path.relpath(fpath, root), 0)
i += 1
def write_list(path_out, image_list):
"""Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
"""
with open(path_out, "w") as fout:
for i, item in enumerate(image_list):
line = "%d\t" % item[0]
for j in item[2:]:
line += "%f\t" % j
line += "%s\n" % item[1]
fout.write(line)
def make_list(args):
"""Generates .lst file.
Parameters
----------
args: object that contains all the arguments
"""
image_list = list_image(args.root, args.recursive, args.exts)
image_list = list(image_list)
if args.shuffle is True:
random.seed(100)
random.shuffle(image_list)
N = len(image_list)
chunk_size = (N + args.chunks - 1) // args.chunks
for i in range(args.chunks):
chunk = image_list[i * chunk_size : (i + 1) * chunk_size]
if args.chunks > 1:
str_chunk = "_%d" % i
else:
str_chunk = ""
sep = int(chunk_size * args.train_ratio)
sep_test = int(chunk_size * args.test_ratio)
if args.train_ratio == 1.0:
write_list(args.prefix + str_chunk + ".lst", chunk)
else:
if args.test_ratio:
write_list(args.prefix + str_chunk + "_test.lst", chunk[:sep_test])
if args.train_ratio + args.test_ratio < 1.0:
write_list(
args.prefix + str_chunk + "_val.lst", chunk[sep_test + sep :]
)
write_list(
args.prefix + str_chunk + "_train.lst", chunk[sep_test : sep_test + sep]
)
def read_list(path_in):
"""Reads the .lst file and generates corresponding iterator.
Parameters
----------
path_in: string
Returns
-------
item iterator that contains information in .lst file
"""
with open(path_in) as fin:
while True:
line = fin.readline()
if not line:
break
line = [i.strip() for i in line.strip().split("\t")]
line_len = len(line)
# check the data format of .lst file
if line_len < 3:
print(
"lst should have at least has three parts, but only has %s parts for %s"
% (line_len, line)
)
continue
try:
item = [int(line[0])] + [line[-1]] + [float(i) for i in line[1:-1]]
except Exception as e:
print("Parsing lst met error for %s, detail: %s" % (line, e))
continue
yield item
def image_encode(args, i, item, q_out):
"""Reads, preprocesses, packs the image and put it back in output queue.
Parameters
----------
args: object
i: int
item: list
q_out: queue
"""
fullpath = os.path.join(args.root, item[1])
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
if args.pass_through:
try:
with open(fullpath, "rb") as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print("pack_img error:", item[1], e)
q_out.put((i, None, item))
return
try:
img = cv2.imread(fullpath, args.color)
except:
traceback.print_exc()
print("imread error trying to load file: %s " % fullpath)
q_out.put((i, None, item))
return
if img is None:
print("imread read blank (None) image for file: %s" % fullpath)
q_out.put((i, None, item))
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) // 2
img = img[margin : margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) // 2
img = img[:, margin : margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize // img.shape[1])
else:
newsize = (img.shape[1] * args.resize // img.shape[0], args.resize)
img = cv2.resize(img, newsize)
try:
s = mx.recordio.pack_img(
header, img, quality=args.quality, img_fmt=args.encoding
)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print("pack_img error on file: %s" % fullpath, e)
q_out.put((i, None, item))
return
def read_worker(args, q_in, q_out):
"""Function that will be spawned to fetch the image
from the input queue and put it back to output queue.
Parameters
----------
args: object
q_in: queue
q_out: queue
"""
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
"""Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string
"""
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + ".rec"
fname_idx = os.path.splitext(fname)[0] + ".idx"
record = mx.recordio.MXIndexedRecordIO(
os.path.join(working_dir, fname_idx), os.path.join(working_dir, fname_rec), "w"
)
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print("time:", cur_time - pre_time, " count:", count)
pre_time = cur_time
count += 1
def parse_args():
"""Defines all arguments.
Returns
-------
args object that contains all the params
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create an image list or \
make a record database by reading from an image list",
)
parser.add_argument("prefix", help="prefix of input/output lst and rec files.")
parser.add_argument("root", help="path to folder containing images.")
cgroup = parser.add_argument_group("Options for creating image lists")
cgroup.add_argument(
"--list",
action="store_true",
help="If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec",
)
cgroup.add_argument(
"--exts",
nargs="+",
default=[".jpeg", ".jpg", ".png"],
help="list of acceptable image extensions.",
)
cgroup.add_argument("--chunks", type=int, default=1, help="number of chunks.")
cgroup.add_argument(
"--train-ratio",
type=float,
default=1.0,
help="Ratio of images to use for training.",
)
cgroup.add_argument(
"--test-ratio",
type=float,
default=0,
help="Ratio of images to use for testing.",
)
cgroup.add_argument(
"--recursive",
action="store_true",
help="If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.",
)
cgroup.add_argument(
"--no-shuffle",
dest="shuffle",
action="store_false",
help="If this is passed, \
im2rec will not randomize the image order in <prefix>.lst",
)
rgroup = parser.add_argument_group("Options for creating database")
rgroup.add_argument(
"--pass-through",
action="store_true",
help="whether to skip transformation and save image as is",
)
rgroup.add_argument(
"--resize",
type=int,
default=0,
help="resize the shorter edge of image to the newsize, original images will\
be packed by default.",
)
rgroup.add_argument(
"--center-crop",
action="store_true",
help="specify whether to crop the center image to make it rectangular.",
)
rgroup.add_argument(
"--quality",
type=int,
default=95,
help="JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9",
)
rgroup.add_argument(
"--num-thread",
type=int,
default=1,
help="number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.",
)
rgroup.add_argument(
"--color",
type=int,
default=1,
choices=[-1, 0, 1],
help="specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.",
)
rgroup.add_argument(
"--encoding",
type=str,
default=".jpg",
choices=[".jpg", ".png"],
help="specify the encoding of the images.",
)
rgroup.add_argument(
"--pack-label",
action="store_true",
help="Whether to also pack multi dimensional label in the record file",
)
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args
if __name__ == "__main__":
args = parse_args()
# if the '--list' is used, it generates .lst file
if args.list:
make_list(args)
# otherwise read .lst file to generates .rec file
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
files = [
os.path.join(working_dir, fname)
for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))
]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith(".lst"):
print("Creating .rec file from", fname, "in", working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
# define the process
read_process = [
multiprocessing.Process(
target=read_worker, args=(args, q_in[i], q_out)
)
for i in range(args.num_thread)
]
# process images with num_thread process
for p in read_process:
p.start()
# only use one process to write .rec to avoid race-condtion
write_process = multiprocessing.Process(
target=write_worker, args=(q_out, fname, working_dir)
)
write_process.start()
# put the image list into input queue
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print(
"multiprocessing not available, fall back to single threaded encoding"
)
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + ".rec"
fname_idx = os.path.splitext(fname)[0] + ".idx"
record = mx.recordio.MXIndexedRecordIO(
os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec),
"w",
)
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, _ = q_out.get()
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print("time:", cur_time - pre_time, " count:", cnt)
pre_time = cur_time
cnt += 1
if not count:
print("Did not find and list file with prefix %s" % args.prefix)
|
__init__.py
|
from robothor_challenge.startx import startx
import ai2thor.controller
import ai2thor.util.metrics
import json
import threading
import yaml
import os
import sys
import logging
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(sys.stdout)
ch.flush = sys.stdout.flush
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
ALLOWED_ACTIONS = ['MoveAhead', 'MoveBack', 'RotateRight', 'RotateLeft', 'LookUp', 'LookDown', 'Stop']
class RobothorChallenge:
def __init__(self, agent):
self.agent = agent
self.load_config()
if self.dataset_split == 'test':
self.controller = ai2thor.controller.Controller(start_unity=False, port=8200, width=self.config['width'], height=self.config['height'], **self.config['initialize'])
else:
self.setup_env()
self.controller = ai2thor.controller.Controller(width=self.config['width'], height=self.config['height'], **self.config['initialize'])
@property
def dataset_split(self):
if 'CHALLENGE_SPLIT' not in os.environ:
raise ValueError("CHALLENGE_SPLIT not in environment")
return os.environ['CHALLENGE_SPLIT']
def load_config(self):
if 'CHALLENGE_CONFIG' not in os.environ:
raise ValueError("CHALLENGE_CONFIG not in environment")
logger.info("Loading configuration from: {CHALLENGE_CONFIG}".format(**os.environ))
split_path = os.path.join(os.path.dirname(os.environ['CHALLENGE_CONFIG']), self.dataset_split + ".json")
logger.info("Loading split: {path}".format(path=split_path))
with open(os.environ['CHALLENGE_CONFIG']) as f:
self.config = yaml.safe_load(f.read())
with open(split_path) as f:
self.episodes = json.loads(f.read())
def inference(self):
episode_results = []
for e in self.episodes:
episode_result = dict(shortest_path= e['shortest_path'], success=False, path=[])
episode_results.append(episode_result)
logger.info("Task Start id:{id} scene:{scene} target_object:{object_id} initial_position:{initial_position} rotation:{initial_orientation}".format(**e))
self.controller.initialization_parameters['robothorChallengeEpisodeId'] = e['id']
self.controller.reset(e['scene'])
teleport_action = dict(action='TeleportFull')
teleport_action.update(e['initial_position'])
self.controller.step(action=teleport_action)
self.controller.step(action=dict(action='Rotate', rotation=dict(y=e['initial_orientation'], horizon=0.0)))
total_steps = 0
episode_result['path'].append(self.controller.last_event.metadata['agent']['position'])
self.agent.reset()
stopped = False
while total_steps < self.config['max_steps'] and not stopped:
total_steps +=1
event = self.controller.last_event
# must clear out metadata during inference
event.metadata.clear()
action = self.agent.act(dict(object_goal=e['object_type'], depth=None, rgb=event.frame))
if action not in ALLOWED_ACTIONS:
raise ValueError("Invalid action: {action}".format(action=action))
logger.info("Agent action: {action}".format(action=action))
event = self.controller.step(action=action)
stopped = action == 'Stop'
episode_result['path'].append(self.controller.last_event.metadata['agent']['position'])
if stopped:
simobj = self.controller.last_event.get_object(e['object_id'])
episode_result['success'] = simobj['visible']
spl = ai2thor.util.metrics.compute_spl(episode_results)
logger.info("Total Episodes: {episode_count} SPL:{spl}".format(episode_count=len(episode_results), spl=spl))
return spl
def setup_env(self):
if 'DISPLAY' not in os.environ:
xthread = threading.Thread(target=startx)
xthread.daemon = True
xthread.start()
import time
# XXX change this to use xdpyinfo
time.sleep(4)
|
GUI_output_redirection.py
|
from __future__ import print_function, unicode_literals
import sys
import os
if 'pythonw.exe' in sys.executable.lower():
import subprocess
# Re-launch with python.exe and hidden console window:
CREATE_NO_WINDOW = 1 << 27
cmd = [sys.executable.lower().replace('pythonw.exe', 'python.exe')] + sys.argv
proc = subprocess.Popen(cmd, creationflags=CREATE_NO_WINDOW)
sys.exit(0)
import os
from zprocess.process_tree import OutputInterceptor
from qtutils.outputbox import OutputBox
from qtutils.qt import QtGui, QtWidgets
import ctypes
if os.name == 'nt':
libc = ctypes.cdll.msvcrt
else:
libc = ctypes.CDLL(None)
import multiprocessing
def regular_print_stdout(button):
print("hello from a regular print to stdout")
# print(" stdout is:", sys.stdout, "fileno:", sys.stdout.fileno())
# print(" orig stdout is:", sys.__stdout__, "fileno:", sys.__stdout__.fileno())
def regular_print_stderr(button):
print("hello from a regular print to stderr", file=sys.stderr)
# print(" stderr is:", sys.stderr, "fileno:", sys.stderr.fileno(), file=sys.stderr)
# print(" orig stderr is:", sys.__stderr__, "fileno:", sys.__stderr__.fileno(), file=sys.stderr)
def libc_printf(button):
libc.printf(b"hello from printf to stdout via libc\n")
def echo_hello(button):
os.system("echo hello from echo via os.system")
def multiprocessing_Process(button):
if os.name != 'nt' and sys.version_info.major == 2:
msg = (
"Cannot fork processes when using Qt. This test only works on Windows "
+ "or Python 3 where forking can be disabled."
)
print(msg, file=sys.stderr)
return
if multiprocessing.get_start_method(True) != 'spawn':
multiprocessing.set_start_method('spawn')
proc = multiprocessing.Process(
target=print, args=('hello from print() in a multiprocessing.Process()',)
)
proc.start()
proc.join()
def main():
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout(window)
outputbox = OutputBox(layout)
funcs = [
regular_print_stdout,
regular_print_stderr,
libc_printf,
echo_hello,
multiprocessing_Process,
]
for f in funcs:
button = QtWidgets.QPushButton(f.__name__)
button.clicked.connect(f)
layout.addWidget(button)
redirect_stdout = OutputInterceptor('localhost', outputbox.port)
redirect_sterr = OutputInterceptor('localhost', outputbox.port, streamname='stderr')
redirect_stdout.connect()
redirect_sterr.connect()
window.resize(800, 500)
window.show()
app.exec_()
redirect_stdout.disconnect()
redirect_sterr.disconnect()
outputbox.shutdown()
if __name__ == '__main__':
main()
|
flair.py
|
import platform
import time
from http.client import HTTPConnection
from threading import Thread
import webview
from apis.input_methods.mouse_and_keyboard_listener import start_listeners
from app import run_app
error = False
status = False
port = 43968
operating_system = str(platform.system()).lower()
def get_user_agent(window):
result = window.evaluate_js(r"""
// Return user agent
'User agent:\n' + navigator.userAgent;
""")
print(result)
def is_server_running(url, max_wait):
global error
global status
global port
time.sleep(0.4)
start = time.time()
while True:
try:
end = time.time()
if end - start > max_wait:
return False
time.sleep(0.1)
connection = HTTPConnection(url, port)
request, response = connection.request(
"GET", "/"), connection.getresponse()
if response is not None:
status = response.status
return True
except Exception as e:
error = e
print("Server not yet running")
def main():
global port
url, max_wait = 'localhost', 90 # 15 seconds
link = "http://" + url + ":" + str(port)
# Starting Server
t = Thread(target=start_listeners, args=())
t.daemon = True
t.start()
print("Listeners started")
server_thread = Thread(target=run_app, args=(url, port))
server_thread.daemon = True
server_thread.start()
# Waiting for server to load content
if is_server_running(url, max_wait):
print("Server started")
# webbrowser.open(link, new=2)
# while server_thread.is_alive():
# time.sleep(0.1)
window = webview.create_window(
"Flair App", link, width=1000, height=522)
# If you want to inspect element just go to localhost url in browser
webview.start(get_user_agent, window, debug=True)
else:
print("Server failed to start with a max wait time of " + str(max_wait))
if status is not False:
print("Status was " + str(status))
if error is not False:
print("Exception was " + str(error))
print("Server has exited")
if __name__ == '__main__':
main()
|
pput.py
|
"""Multipart parallel s3 upload.
usage
pput bucket_name/filename
"""
from queue import Queue
from io import StringIO
from collections import namedtuple
from threading import Thread
import argparse
import base64
import binascii
import functools
import hashlib
import logging
import json
import os
import sys
import boto3
from zfs3backup.config import get_config
Result = namedtuple('Result', ['success', 'traceback', 'index', 'md5', 'etag'])
CFG = get_config()
VERB_QUIET = 0
VERB_NORMAL = 1
VERB_PROGRESS = 2
session = boto3.Session(profile_name=CFG['PROFILE'])
if CFG['ENDPOINT'] == 'aws':
s3 = session.resource('s3') # boto3.resource makes an intelligent decision with the default url
else:
s3 = session.resource('s3', endpoint_url=CFG['ENDPOINT'])
def multipart_etag(digests):
"""
Computes etag for multipart uploads
:type digests: list of hex-encoded md5 sums (string)
:param digests: The list of digests for each individual chunk.
:rtype: string
:returns: The etag computed from the individual chunks.
"""
etag = hashlib.md5()
count = 0
for dig in digests:
count += 1
etag.update(binascii.a2b_hex(dig))
return f"'{etag.hexdigest()}-{count}'"
def parse_size(size):
if isinstance(size, (int)):
return size
size = size.strip().upper()
last = size[-1]
if last == 'T':
return int(size[:-1]) * 1024 * 1024 * 1024 * 1024
if last == 'G':
return int(size[:-1]) * 1024 * 1024 * 1024
if last == 'M':
return int(size[:-1]) * 1024 * 1024
if last == 'K':
return int(size[:-1]) * 1024
return int(size)
class StreamHandler(object):
def __init__(self, input_stream, chunk_size=5*1024*1024):
self.input_stream = input_stream
self.chunk_size = chunk_size
self._partial_chunk = b""
self._eof_reached = False
@property
def finished(self):
return self._eof_reached and len(self._partial_chunk) == 0
def get_chunk(self):
"""Return complete chunks or None if EOF reached"""
while not self._eof_reached:
read = self.input_stream.read(self.chunk_size - len(self._partial_chunk))
if len(read) == 0:
self._eof_reached = True
self._partial_chunk += read
if len(self._partial_chunk) == self.chunk_size or self._eof_reached:
chunk = self._partial_chunk
self._partial_chunk = b""
return chunk
# else:
# print "partial", len(self._partial_chunk)
def retry(times=int(CFG['MAX_RETRIES'])):
def decorator(func):
@functools.wraps(func)
def wrapped(*a, **kwa):
for attempt in range(1, times+1):
try:
return func(*a, **kwa)
except: # pylint: disable=bare-except
if attempt >= times:
raise
logging.exception(f"Failed to upload part attempt {attempt} of {times}")
return wrapped
return decorator
class WorkerCrashed(Exception):
pass
class UploadWorker(object):
def __init__(self, bucket, multipart, inbox, outbox):
self.bucket = bucket
self.inbox = inbox
self.outbox = outbox
self.multipart = multipart
self._thread = None
self.log = logging.getLogger('UploadWorker')
@retry()
def upload_part(self, index, chunk):
md5 = hashlib.md5(chunk)
part = s3.MultipartUploadPart(
self.multipart.bucket_name,
self.multipart.object_key,
self.multipart.id,
index
)
response = part.upload(
Body = chunk,
ContentMD5 = base64.b64encode(md5.digest()).decode()
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise UploadException(response['ResponseMetadata'])
return md5.hexdigest(), response[u'ETag']
def start(self):
self._thread = Thread(target=self.main_loop)
self._thread.daemon = True
self._thread.start()
return self
def is_alive(self):
return self._thread.is_alive()
def main_loop(self):
while True:
index, chunk = self.inbox.get()
md5, etag = self.upload_part(index, chunk)
self.outbox.put(Result(
success=True,
md5=md5,
traceback=None,
index=index,
etag=etag
))
class UploadException(Exception):
pass
class UploadSupervisor(object):
'''Reads chunks and dispatches them to UploadWorkers'''
def __init__(self, stream_handler, name, bucket, headers=None, metadata=None, verbosity=1):
self.stream_handler = stream_handler
self.name = name
self.bucket = bucket
self.inbox = None
self.outbox = None
self.multipart = None
self.results = [] # beware s3 multipart indexes are 1 based
self._pending_chunks = 0
self._verbosity = verbosity
self._workers = None
self._headers = {} if headers is None else headers
self._metadata = {} if metadata is None else metadata
self.obj = None
def _start_workers(self, concurrency, worker_class):
work_queue = Queue(maxsize=concurrency)
result_queue = Queue()
self.outbox = work_queue
self.inbox = result_queue
workers = [
worker_class(
bucket=self.bucket,
multipart=self.multipart,
inbox=work_queue,
outbox=result_queue,
).start()
for _ in range(concurrency)]
return workers
def _begin_upload(self):
if self.multipart is not None:
raise AssertionError("multipart upload already started")
self.obj = self.bucket.Object(self.name)
self.multipart = self.obj.initiate_multipart_upload(
ACL="bucket-owner-full-control",
Metadata=self._metadata,
**self._headers
)
def _finish_upload(self):
if len(self.results) == 0:
self.multipart.abort()
raise UploadException("Error: Can't upload zero bytes!")
sorted_results = sorted(
[{'PartNumber': r[0], 'ETag': r[2]} for r in self.results],
key = lambda x: x['PartNumber']
)
return self.multipart.complete(
MultipartUpload={
'Parts': sorted_results
}
)
def _handle_result(self):
"""Process one result. Block until one is available
"""
result = self.inbox.get()
if result.success:
if self._verbosity >= VERB_PROGRESS:
sys.stderr.write(f"uploaded chunk {result.index}\n")
self.results.append((result.index, result.md5, result.etag))
self._pending_chunks -= 1
else:
raise result.traceback
def _handle_results(self):
"""Process any available result
Doesn't block.
"""
while not self.inbox.empty():
self._handle_result()
def _send_chunk(self, index, chunk):
"""Send the current chunk to the workers for processing.
Called when the _partial_chunk is complete.
Blocks when the outbox is full.
"""
self._pending_chunks += 1
self.outbox.put((index, chunk))
def _check_workers(self):
"""Check workers are alive, raise exception if any is dead."""
for worker in self._workers:
if not worker.is_alive():
raise WorkerCrashed()
def main_loop(self, concurrency=4, worker_class=UploadWorker):
chunk_index = 0
self._begin_upload()
self._workers = self._start_workers(concurrency, worker_class=worker_class)
while self._pending_chunks or not self.stream_handler.finished:
self._check_workers() # raise exception and stop everything if any worker has crashed
# print "main_loop p:{} o:{} i:{}".format(
# self._pending_chunks, self.outbox.qsize(), self.inbox.qsize())
# consume results first as this is a quick operation
self._handle_results()
chunk = self.stream_handler.get_chunk()
if chunk:
# s3 multipart index is 1 based, increment before sending
chunk_index += 1
self._send_chunk(chunk_index, chunk)
self._finish_upload()
self.results.sort()
return multipart_etag(r[1] for r in self.results)
def parse_metadata(metadata):
headers = {}
for meta in metadata:
try:
key, val = meta.split('=', 1)
except ValueError:
sys.stderr.write(f"malformed metadata '{meta}'; should be key=value\n")
sys.exit(1)
headers[key] = val
return headers
def optimize_chunksize(estimated):
max_parts = 9999 # S3 requires part indexes to be between 1 and 10000
# part size has to be at least 5MB (BK I tried this up to 10MB and dropped the concurrency)
estimated = estimated * 1.05 # just to be on the safe side overesimate the total size to upload
min_part_size = max(estimated / max_parts, 10*1024*1024)
return int(min_part_size)
def parse_args():
parser = argparse.ArgumentParser(
description='Read data from stdin and upload it to s3',
epilog=('All optional args have a configurable default. '
'Order of precedence is command line args then '
'environment variables then user config ~/.zfs3backup.cfg'
' then default config.'),
)
parser.add_argument('name', help='name of S3 key')
chunk_group = parser.add_mutually_exclusive_group()
chunk_group.add_argument('-s', '--chunk-size',
dest='chunk_size',
default=CFG['CHUNK_SIZE'],
help='multipart chunk size, eg: 10M, 1G')
chunk_group.add_argument('--estimated',
help='Estimated upload size')
parser.add_argument('--file-descriptor',
dest='file_descriptor',
type=int,
help=('read data from this fd instead of stdin; '
'useful if you want an [i]pdb session to use stdin\n'
'`pput --file-descriptor 3 3<./file`'))
parser.add_argument('--concurrency',
dest='concurrency',
type=int,
default=int(CFG['CONCURRENCY']),
help='number of worker threads to use')
parser.add_argument('--metadata',
action='append',
dest='metadata',
default=list(),
help='Metatada in key=value form')
parser.add_argument('--storage-class', default=CFG['S3_STORAGE_CLASS'],
dest='storage_class', help='The S3 storage class. Defaults to STANDARD_IA.')
quiet_group = parser.add_mutually_exclusive_group()
quiet_group.add_argument('--progress',
dest='progress',
action='store_true',
help=('show progress report'))
quiet_group.add_argument('--quiet',
dest='quiet',
action='store_true',
help=('don\'t emit any output at all'))
return parser.parse_args()
def main():
args = parse_args()
input_fd = fopen(args.file_descriptor, mode='rb') if args.file_descriptor else sys.stdin.buffer
if args.estimated is not None:
chunk_size = optimize_chunksize(parse_size(args.estimated))
else:
chunk_size = parse_size(args.chunk_size)
stream_handler = StreamHandler(input_fd, chunk_size=chunk_size)
bucket = s3.Bucket(CFG['BUCKET'])
# verbosity: 0 totally silent, 1 default, 2 show progress
verbosity = 0 if args.quiet else 1 + int(args.progress)
metadata = parse_metadata(args.metadata)
headers = {}
headers["StorageClass"] = args.storage_class
sup = UploadSupervisor(
stream_handler,
args.name,
bucket=bucket,
verbosity=verbosity,
headers=headers,
metadata=metadata
)
if verbosity >= VERB_NORMAL:
sys.stderr.write(f"starting upload to {CFG['BUCKET']}/{args.name} with chunksize"
f" {(chunk_size/(1024*1024.0))}M using {args.concurrency} workers\n")
try:
etag = sup.main_loop(concurrency=args.concurrency)
except UploadException as excp:
sys.stderr.write(f"{excp}\n")
return 1
if verbosity >= VERB_NORMAL:
print(json.dumps({'status': 'success', 'etag': etag}))
return 0
if __name__ == '__main__':
sys.exit(main())
|
timeout.py
|
from threading import Thread
import functools
def timeout(timeout_length, message=None):
""" Creates timeout decorator to be attached to functions """
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
default_message = 'function [%s] timeout [%s seconds] exceeded!' % (func.__name__, timeout_length)
res = [Exception(message or default_message)]
def new_func():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=new_func)
t.daemon = True
try:
t.start()
t.join(timeout_length)
except Exception as je:
print('error starting thread')
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
|
app.py
|
# Run this before anything else: checks for command line arguments
# Default: no arguments, liked when using Makefile (normal API backend running)
import argparse
parser = argparse.ArgumentParser()
# To turn option into flag, use action= parameter: calls a predefined function
# store_true is one of many default functions for action=, later can check args.test = True
parser.add_argument('-t', '--test', help='Use a test database, to protect live data.', action='store_true')
parser.add_argument('-d', '--days-before', help='Specify # of days to go back in time for past events.', type=int)
parser.add_argument('-c', '--clear', help='Clear out old database data to start anew.', action='store_true')
parser.add_argument('-p', '--prod', help='Run production version of Mappening backend', action='store_true')
args = parser.parse_args()
# There's an 'app' Flask object in mappening's __init__.py
# App object also links to blueprints to other modules
from mappening import app, db
from mappening.models import Address
from mappening.utils import scheduler
from mappening.api.utils.events import event_collector
from mappening.api.utils.eventbrite import eb_event_collector, eb_event_processor
from flask import Flask, jsonify, request
import datetime
from threading import Thread
# Used to check app is running, visit http://api.mappening.io:5000/
@app.route('/')
def index():
return "The Mappening API is running!"
# Sample database route
@app.route('/db')
def test():
return jsonify(addresses=[address.serialize() for address in Address.query.all()])
# https://www.jordanbonser.com/flask-session-timeout.html
# @app.before_request
# def before_request():
# flask.session.permanent = True
# app.permanent_session_lifetime = datetime.timedelta(minutes=20)
# Runs threads to periodically update events. Also updates database.
# For dev purposes, only call this when we are in prod.
def thread_scheduler(args):
# Another thread to run the periodic events update, daily
event_update_thread = Thread(target = scheduler.event_thread_func)
event_update_thread.start()
code_update_date = "6/1/18"
print("Updated on: {0}".format(code_update_date))
print("UPDATE EVENTS FIRST...\n")
dbit = args.days_before
# pass in args from command line, need to check it's there
if not dbit or dbit < 1:
dbit = 0
event_collector.update_ucla_events_database(use_test=args.test,
days_back_in_time=dbit,
clear_old_db=args.clear)
# Flask defaults to port 5000
# If debug is true, runs 2 instances at once (so two copies of all threads)
if __name__ == "__main__":
print('Arguments passed: {0}'.format(args))
if not args.prod:
print("\n~~~~~~~~~~~~~~~~~~~\n~~~ IN DEV MODE ~~~\n~~~~~~~~~~~~~~~~~~~\n")
app.run(host='0.0.0.0', debug=True)
else:
print("\n~~~~~~~~~~~~~~~~~~~~\n~~~ IN PROD MODE ~~~\n~~~~~~~~~~~~~~~~~~~~\n")
# TODO: Breaks EB deployment. cron jobs?
thread_scheduler(args)
app.run(host='0.0.0.0', debug=False)
|
xla_device_utils.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import importlib
import queue as q
from multiprocessing import Process, Queue
import torch
TORCHXLA_AVAILABLE = importlib.util.find_spec("torch_xla") is not None
if TORCHXLA_AVAILABLE:
import torch_xla.core.xla_model as xm
else:
xm = None
def inner_f(queue, func, *args, **kwargs): # pragma: no cover
try:
queue.put(func(*args, **kwargs))
except Exception:
import traceback
traceback.print_exc()
queue.put(None)
def pl_multi_process(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
queue = Queue()
proc = Process(target=inner_f, args=(queue, func, *args), kwargs=kwargs)
proc.start()
proc.join(10)
try:
return queue.get_nowait()
except q.Empty:
return False
return wrapper
class XLADeviceUtils:
"""Used to detect the type of XLA device"""
TPU_AVAILABLE = None
@staticmethod
def _fetch_xla_device_type(device: torch.device) -> str:
"""
Returns XLA device type
Args:
device: (:class:`~torch.device`): Accepts a torch.device type with a XLA device format i.e xla:0
Return:
Returns a str of the device hardware type. i.e TPU
"""
if xm is not None:
return xm.xla_device_hw(device)
@staticmethod
def _is_device_tpu() -> bool:
"""
Check if device is TPU
Return:
A boolean value indicating if the xla device is a TPU device or not
"""
if xm is not None:
device = xm.xla_device()
device_type = XLADeviceUtils._fetch_xla_device_type(device)
return device_type == "TPU"
@staticmethod
def tpu_device_exists() -> bool:
"""
Public method to check if TPU is available
Return:
A boolean value indicating if a TPU device exists on the system
"""
if XLADeviceUtils.TPU_AVAILABLE is None and TORCHXLA_AVAILABLE:
XLADeviceUtils.TPU_AVAILABLE = pl_multi_process(XLADeviceUtils._is_device_tpu)()
return XLADeviceUtils.TPU_AVAILABLE
|
LoginFaceManual.py
|
from tkinter import Tk, PhotoImage, Button, Label, StringVar, Entry
from threading import Thread as Process
import cv2
import time
from face_recognition import face_encodings, compare_faces, load_image_file
import os
from PIL import ImageTk, Image
import pickle
def pascheck(idt, past):
if idt == "StartCode@@@" and past == "12121@!#":
root.after(1000, root.destroy)
#################################### Your Code after login ###################################
sroot = Tk()
sroot.title("login successfull")
sroot.geometry("300x200")
sroot.title("Login")
sroot.attributes('-alpha', 0.8)
sroot.configure(background="black")
idlab = Label(sroot, text="Hello User", fg="white", bg="black", font=("Segoe UI", 18))
idlab.place(relx=0.2, rely=0.3)
sroot.mainloop()
##############################################################################################
else:
print("Unauthorized Access")
def paschecklog(idt, past):
global auth
if idt == "ary" and past == "1234":
auth = True
def loginGui():
global idv
global pas
global root
global auth
root = Tk()
root.geometry("300x200")
root.title("Login")
root.attributes('-alpha', 0.8)
root.configure(background="black")
root.iconify()
idv = StringVar()
pas = StringVar()
idlab = Label(root, text="Id", fg="white", bg="black", font=("Segoe UI", 12))
identry = Entry(root, textvariable=idv)
paslab = Label(root, text="Passcode", fg="white", bg="black", font=("Segoe UI", 12))
pasentry = Entry(root, textvariable=pas)
identry.place(relx=0.4, rely=0.3)
idlab.place(relx=0.2, rely=0.3)
pasentry.place(relx=0.4, rely=0.5)
paslab.place(relx=0.1, rely=0.5)
bg = PhotoImage(file=".\sign_button.png")
signin = Button(root, image=bg, bg="black", bd=0, command=lambda: paschecklog(idv.get(), pas.get()))
signin.image = bg
signin.place(rely=0.7, relx=0.36)
root.mainloop()
def facelog():
global auth
path = "IMGD"
try:
with open(r'./IMGD/img.dat', 'rb') as f:
known_image = pickle.load(f)
except:
known_image = load_image_file(path+"/model.png")
f = open(r'./IMGD/img.dat', 'wb')
pickle.dump(known_image,f)
f.close()
encoding = face_encodings(known_image)[0]
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while not auth:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
try:
unknown_image = face_encodings(imgS)[0]
results = compare_faces([encoding], unknown_image, tolerance=0.5)
except:
results = [False]
if results[0]:
break
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
pascheck('StartCode@@@', '12121@!#')
if __name__ == "__main__":
ad = time.time()
if not os.path.exists('IMGD'):
os.makedirs('IMGD')
if not os.listdir('./IMGD'):
root = Tk()
root.geometry("700x600")
root.title("Create Face Model")
frame = Label(root)
frame.place(x=0, y=10, relwidth=1, relheight=1)
text = StringVar()
namel = Label(root, textvariable=text)
namel.place(x=0, rely=0)
create = Button(root, text="Create Face model", bg="black", fg="white", bd=0,
command=lambda: cv2.imwrite(os.getcwd() + "/IMGD/model.png", img1))
create.place(rely=0.93, relx=0.44)
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
success, img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = ImageTk.PhotoImage(Image.fromarray(img))
frame['image'] = img
root.update()
auth = False
a = Process(target=facelog, args="")
a.start()
loginGui()
print(round(time.time()-ad, 2),"s Taken to login")
|
uhdtodrf.py
|
#!python
# ----------------------------------------------------------------------------
# Copyright (c) 2020 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Directly record to Digital RF using UHD python API."""
import argparse
import ast
import math
import os
import re
import sys
import threading
import time
from datetime import datetime, timedelta
from fractions import Fraction
from itertools import chain, cycle, islice, repeat
from subprocess import call
from textwrap import dedent, fill, TextWrapper
import digital_rf as drf
import numpy as np
import pytz
import scipy.signal as sig
from six.moves import queue
import uhd
# UHD globals not included in python uhd wrapper.
ALL_MBOARDS = 18446744073709551615
ALL_LOS = "all"
ALL_GAINS = ""
ALL_CHANS = 18446744073709551615
def equiripple_lpf(cutoff=0.8, transition_width=0.2, attenuation=80, pass_ripple=None):
"""Get taps for an equiripple low-pass filter.
All frequencies given must be normalized in the range [0, 1], with 1
corresponding to the Nyquist frequency (Fs/2).
Parameters
----------
cutoff : float
Normalized cutoff frequency (beginning of transition band).
transition_width : float
Normalized width (in frequency) of transition region from pass band to
stop band.
attenuation : float
Attenuation of the stop band in dB.
pass_ripple : float | None
Maximum ripple in the pass band in dB. If None, the attenuation value
is used.
Returns
-------
taps : array_like
Type I (even order) FIR low-pass filter taps meeting the given
requirements.
"""
if pass_ripple is None:
pass_ripple = attenuation
if cutoff <= 0:
errstr = "Cutoff ({0}) must be strictly greater than zero."
raise ValueError(errstr.format(cutoff))
if transition_width <= 0:
errstr = "Transition width ({0}) must be strictly greater than zero."
raise ValueError(errstr.format(transition_width))
if cutoff + transition_width >= 1:
errstr = (
"Cutoff ({0}) + transition width ({1}) must be strictly less than"
" one, but it is {2}."
).format(cutoff, transition_width, cutoff + transition_width)
raise ValueError(errstr)
# pm_remez arguments
bands = [0, cutoff, cutoff + transition_width, 1]
ampl = [1, 0]
error_weight = [10 ** ((pass_ripple - attenuation) / 20.0), 1]
# get estimate for the filter order (Oppenheim + Schafer 2nd ed, 7.104)
M = ((attenuation + pass_ripple) / 2.0 - 13) / 2.324 / (np.pi * transition_width)
# round up to nearest even-order (Type I) filter
M = int(np.ceil(M / 2.0)) * 2 + 1
for _attempts in range(20):
# get taps for order M
try:
taps = sig.remez(M, bands, ampl, error_weight, Hz=2.0)
except RuntimeError:
M = M + 2
continue
# calculate frequency response and get error from ideal
nfft = 16 * len(taps)
h = np.fft.fft(taps, nfft)
w = np.fft.fftfreq(nfft, 0.5)
passband = h[(np.abs(w) >= bands[0]) & (np.abs(w) <= bands[1])]
stopband = h[(np.abs(w) >= bands[2]) & (np.abs(w) <= bands[3])]
act_ripple = -20 * np.log10(np.max(np.abs(ampl[0] - np.abs(passband))))
act_atten = -20 * np.log10(np.max(np.abs(ampl[1] - np.abs(stopband))))
if act_ripple >= pass_ripple and act_atten >= attenuation:
break
else:
M = M + 2
else:
errstr = (
"Could not calculate equiripple filter that meets requirements"
"after {0} attempts (final order {1})."
)
raise RuntimeError(errstr.format(_attempts, M))
return taps
class Recorder(object):
"""Record data from a USRP to digital rf through the uhd python library."""
def __init__(self, datadir, **kwargs):
options = dict(
verbose=True,
# mainboard group (num: len of mboards)
mboards=[],
subdevs=["A:A"],
clock_rates=[None],
clock_sources=[""],
time_sources=[""],
# receiver group (apply to all)
samplerate=1e6,
dev_args=["recv_buff_size=100000000", "num_recv_frames=512"],
stream_args=[],
tune_args=[],
time_sync=True,
wait_for_lock=True,
stop_on_dropped=False,
realtime=False,
test_settings=True,
# receiver ch. group (num: matching channels from mboards/subdevs)
centerfreqs=[100e6],
lo_offsets=[0],
lo_sources=[""],
lo_exports=[None],
dc_offsets=[False],
iq_balances=[None],
gains=[0],
bandwidths=[0],
antennas=[""],
# output channel group (num: len of channel_names)
channel_names=["ch0"],
channels=[None],
ch_samplerates=[None],
ch_centerfreqs=[False],
ch_scalings=[1.0],
ch_nsubchannels=[1],
ch_lpf_cutoffs=[0.9],
ch_lpf_transition_widths=[0.2],
ch_lpf_attenuations=[80.0],
ch_lpf_pass_ripples=[None],
ch_out_types=[None],
# digital_rf group (apply to all)
file_cadence_ms=1000,
subdir_cadence_s=3600,
metadata={},
uuid=None,
)
options.update(kwargs)
op = self._parse_options(datadir=datadir, **options)
self.op = op
# Set up buffer and create a secondary buffer for future use
# HACK may need to come up with way to set this better
BUFFLEN = int(op.samplerate)
self.bufflen = BUFFLEN
nbuff = 2
self.bufflist = [
np.empty((op.nrchs, BUFFLEN), dtype=op.cpu_dtype) for i in range(nbuff)
]
self.pntlist = [0 for i in range(nbuff)]
self.nbuff = nbuff
self.act_buff = 0
# test usrp device settings, release device when done
if op.test_settings:
if op.verbose:
print("Initialization: testing device settings.")
self._usrp_setup()
# finalize options (for settings that depend on USRP setup)
self._finalize_options()
@staticmethod
def _parse_options(**kwargs):
"""Put all keyword options in a namespace and normalize them."""
op = argparse.Namespace(**kwargs)
# check that subdevice specifications are unique per-mainboard
for sd in op.subdevs:
sds = sd.split()
if len(set(sds)) != len(sds):
errstr = (
'Invalid subdevice specification: "{0}". '
"Each subdevice specification for a given mainboard must "
"be unique."
)
raise ValueError(errstr.format(sd))
# get USRP cpu_format based on output type and decimation requirements
processing_required = (
any(sr is not None for sr in op.ch_samplerates)
or any(cf is not False for cf in op.ch_centerfreqs)
or any(s != 1 for s in op.ch_scalings)
or any(nsch != 1 for nsch in op.ch_nsubchannels)
)
if (
all(ot is None or ot == "sc16" for ot in op.ch_out_types)
and not processing_required
):
# with only sc16 output and no processing, can use sc16 as cpu
# format and disable conversion
op.cpu_format = "sc16"
op.cpu_dtype = np.dtype([(str("r"), np.int16), (str("i"), np.int16)])
op.ch_out_specs = [
dict(
convert=None,
convert_kwargs=None,
dtype=np.dtype([(str("r"), np.int16), (str("i"), np.int16)]),
name="sc16",
)
]
else:
op.cpu_format = "fc32"
op.cpu_dtype = np.dtype("complex64")
# get full specification for output types
supported_out_types = {
"sc8": dict(
convert="float_to_char",
convert_kwargs=dict(vlen=2, scale=float(2 ** 7 - 1)),
dtype=np.dtype([(str("r"), np.int8), (str("i"), np.int8)]),
name="sc8",
),
"sc16": dict(
convert="float_to_short",
convert_kwargs=dict(vlen=2, scale=float(2 ** 15 - 1)),
dtype=np.dtype([(str("r"), np.int16), (str("i"), np.int16)]),
name="sc16",
),
"sc32": dict(
convert="float_to_int",
convert_kwargs=dict(vlen=2, scale=float(2 ** 31 - 1)),
dtype=np.dtype([(str("r"), np.int32), (str("i"), np.int32)]),
name="sc32",
),
"fc32": dict(
convert=None,
convert_kwargs=None,
dtype=np.dtype("complex64"),
name="fc32",
),
}
supported_out_types[None] = supported_out_types["fc32"]
type_dicts = []
for ot in op.ch_out_types:
try:
type_dict = supported_out_types[ot]
except KeyError:
errstr = (
"Output type {0} is not supported. Must be one of {1}."
).format(ot, list(supported_out_types.keys()))
raise ValueError(errstr)
else:
type_dicts.append(type_dict)
op.ch_out_specs = type_dicts
# replace out_types to fill in None values with type name
op.ch_out_types = [os["name"] for os in op.ch_out_specs]
# repeat mainboard arguments as necessary
op.nmboards = len(op.mboards) if len(op.mboards) > 0 else 1
for mb_arg in ("subdevs", "clock_rates", "clock_sources", "time_sources"):
val = getattr(op, mb_arg)
mbval = list(islice(cycle(val), 0, op.nmboards))
setattr(op, mb_arg, mbval)
# get number of receiver channels by total number of subdevices over
# all mainboards
op.mboards_bychan = []
op.subdevs_bychan = []
op.mboardnum_bychan = []
mboards = op.mboards if op.mboards else ["default"]
for mbnum, (mb, sd) in enumerate(zip(mboards, op.subdevs)):
sds = sd.split()
mbs = list(repeat(mb, len(sds)))
mbnums = list(repeat(mbnum, len(sds)))
op.mboards_bychan.extend(mbs)
op.subdevs_bychan.extend(sds)
op.mboardnum_bychan.extend(mbnums)
# repeat receiver channel arguments as necessary
op.nrchs = len(op.subdevs_bychan)
for rch_arg in (
"antennas",
"bandwidths",
"centerfreqs",
"dc_offsets",
"iq_balances",
"lo_offsets",
"lo_sources",
"lo_exports",
"gains",
):
val = getattr(op, rch_arg)
rval = list(islice(cycle(val), 0, op.nrchs))
setattr(op, rch_arg, rval)
# repeat output channel arguments as necessary
op.nochs = len(op.channel_names)
for och_arg in (
"channels",
"ch_centerfreqs",
"ch_lpf_attenuations",
"ch_lpf_cutoffs",
"ch_lpf_pass_ripples",
"ch_lpf_transition_widths",
"ch_nsubchannels",
"ch_out_specs",
"ch_out_types",
"ch_samplerates",
"ch_scalings",
):
val = getattr(op, och_arg)
rval = list(islice(cycle(val), 0, op.nochs))
setattr(op, och_arg, rval)
# fill in unspecified (None) channels values
rchannels = set(range(op.nrchs))
ochannels = set(c for c in op.channels if c is not None)
if not ochannels.issubset(rchannels):
errstr = (
"Invalid channel specification. Output channel uses"
" non-existent receiver channel: {0}."
)
raise ValueError(errstr.format(list(ochannels - rchannels)))
avail = sorted(rchannels - ochannels)
try:
op.channels = [c if c is not None else avail.pop(0) for c in op.channels]
except IndexError:
errstr = (
"No remaining receiver channels left to assign to unspecified"
" (None) output channel. You probably need to explicitly"
" specify the receiver channels to output."
)
raise ValueError(errstr)
unused_rchs = set(range(op.nrchs)) - set(op.channels)
if unused_rchs:
errstr = (
"Receiver channels {0} are unused in the output. Either"
" remove them from the mainboard/subdevice specification or"
" correct the output channel specification."
)
raise ValueError(errstr.format(unused_rchs))
# copy desired centerfreq from receiver to output channel if requested
op.ch_centerfreqs = [
op.centerfreqs[rch] if f in (None, True) else f
for f, rch in zip(op.ch_centerfreqs, op.channels)
]
# create device_addr string to identify the requested device(s)
op.mboard_strs = []
for n, mb in enumerate(op.mboards):
if re.match(r"[^0-9]+=.+", mb):
idtype, mb = mb.split("=")
elif re.match(r"[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", mb):
idtype = "addr"
elif (
re.match(r"usrp[123]", mb)
or re.match(r"b2[01]0", mb)
or re.match(r"x3[01]0", mb)
):
idtype = "type"
elif re.match(r"[0-9A-Fa-f]{1,}", mb):
idtype = "serial"
else:
idtype = "name"
if len(op.mboards) == 1:
# do not use identifier numbering if only using one mainboard
s = "{type}={mb}".format(type=idtype, mb=mb.strip())
else:
s = "{type}{n}={mb}".format(type=idtype, n=n, mb=mb.strip())
op.mboard_strs.append(s)
if op.verbose:
opstr = (
dedent(
"""\
Main boards: {mboard_strs}
Subdevices: {subdevs}
Clock rates: {clock_rates}
Clock sources: {clock_sources}
Time sources: {time_sources}
Sample rate: {samplerate}
Device arguments: {dev_args}
Stream arguments: {stream_args}
Tune arguments: {tune_args}
Antenna: {antennas}
Bandwidth: {bandwidths}
Frequency: {centerfreqs}
LO frequency offset: {lo_offsets}
LO source: {lo_sources}
LO export: {lo_exports}
Gain: {gains}
DC offset: {dc_offsets}
IQ balance: {iq_balances}
Output channels: {channels}
Output channel names: {channel_names}
Output sample rate: {ch_samplerates}
Output frequency: {ch_centerfreqs}
Output scaling: {ch_scalings}
Output subchannels: {ch_nsubchannels}
Output type: {ch_out_types}
Data dir: {datadir}
Metadata: {metadata}
UUID: {uuid}
"""
)
.strip()
.format(**op.__dict__)
)
print(opstr)
return op
def _finalize_options(self):
"""Apply changes to op object to deal with the sub banding."""
op = self.op
op.ch_samplerates_frac = []
op.resampling_ratios = []
op.resampling_filter_taps = []
op.resampling_filter_delays = []
op.channelizer_filter_taps = []
op.channelizer_filter_taps_list = []
op.channelizer_filter_delays = []
op.total_filter_delay = []
op.rotator = []
op.max_filter = 0
for ko, (osr, nsc) in enumerate(zip(op.ch_samplerates, op.ch_nsubchannels)):
# get output sample rate fraction
# (op.samplerate_frac final value is set in _usrp_setup
# so can't get output sample rate until after that is done)
if osr is None:
ch_samplerate_frac = op.samplerate_frac
else:
ch_samplerate_frac = Fraction(osr).limit_denominator()
op.ch_samplerates_frac.append(ch_samplerate_frac)
# get resampling ratio
ratio = ch_samplerate_frac / op.samplerate_frac
op.resampling_ratios.append(ratio)
op.rotator.append(False)
# get resampling low-pass filter taps
if ratio == 1:
op.resampling_filter_taps.append(np.zeros(0))
op.resampling_filter_delays.append(0)
else:
# filter taps need to be designed for the highest rate
# (i.e. after interpolation but before decimation)
taps = equiripple_lpf(
cutoff=float(op.ch_lpf_cutoffs[ko]) / ratio.denominator,
transition_width=(
float(op.ch_lpf_transition_widths[ko]) / ratio.denominator
),
attenuation=op.ch_lpf_attenuations[ko],
pass_ripple=op.ch_lpf_pass_ripples[ko],
)
# for unit gain in passband, need to multiply taps by
# interpolation rate
taps = ratio.numerator * taps
op.resampling_filter_taps.append(taps)
# calculate filter delay in same way as pfb_arb_resampler
# (overall taps are applied at interpolated rate, but delay is
# still in terms of input rate, i.e. the taps per filter
# after being split into the polyphase filter bank)
taps_per_filter = int(np.ceil(float(len(taps)) / ratio.numerator))
op.resampling_filter_delays.append(Fraction(taps_per_filter - 1, 2))
# get channelizer low-pass filter taps
if nsc > 1:
taps = equiripple_lpf(
cutoff=(op.ch_lpf_cutoffs[ko] / nsc),
transition_width=(op.ch_lpf_transition_widths[ko] / nsc),
attenuation=op.ch_lpf_attenuations[ko],
pass_ripple=op.ch_lpf_pass_ripples[ko],
)
op.channelizer_filter_taps.append(taps)
op.channelizer_filter_taps_list.append([taps])
op.channelizer_filter_delays.append(Fraction(len(taps) - 1, 2))
else:
op.channelizer_filter_taps.append(np.zeros(0))
op.channelizer_filter_taps_list.append([np.zeros(0)])
op.channelizer_filter_delays.append(0)
m_rs = op.resampling_filter_taps[-1].shape[0]
m_cf = op.channelizer_filter_taps[-1].shape[0]
# Delay for filter without downsampling
hlen_rs = max(0, (m_rs - 1) // 2)
hlen_cf = max(0, (m_cf - 1) // 2)
# Delay after downsampling
rs_del = hlen_rs // ratio.denominator + bool(hlen_rs % ratio.denominator)
ch_del = (rs_del + hlen_cf) // nsc + bool((rs_del + hlen_cf) % nsc)
op.total_filter_delay.append(ch_del)
overlap0 = ch_del * ratio.denominator * nsc * 2 + 1
overlapout = overlap0 // ratio.numerator + bool(overlap0 % ratio.numerator)
op.max_filter = max(overlapout, op.max_filter)
op.rotator.append(False)
def _usrp_setup(self):
"""Create, set up, and return USRP source and streamer objects.
Using the op object set up uhd.usrp.MultiUSRP object and create a rx_streamer.
to get all of the data.
Returns
-------
usrp : MultiUSRP
Object for the radios.
rx_streamer : RX_streamer
Streamer object for getting data.
"""
op = self.op
# create usrp source block
op.otw_format = "sc16"
usrp = uhd.usrp.MultiUSRP(",".join(chain(op.mboard_strs, op.dev_args)))
# set mainboard options
for mb_num in range(op.nmboards):
usrp.set_rx_subdev_spec(uhd.usrp.SubdevSpec(op.subdevs[mb_num]), mb_num)
# set master clock rate
clock_rate = op.clock_rates[mb_num]
if clock_rate is not None:
usrp.set_master_clock_rate(clock_rate, mb_num)
# set clock source
clock_source = op.clock_sources[mb_num]
if not clock_source and op.wait_for_lock:
clock_source = "external"
if clock_source:
try:
usrp.set_clock_source(clock_source, mb_num)
except RuntimeError:
errstr = (
"Setting mainboard {0} clock_source to '{1}' failed."
" Must be one of {2}. If setting is valid, check that"
" the source (REF) is operational."
).format(mb_num, clock_source, usrp.get_clock_sources(mb_num))
raise ValueError(errstr)
# set time source
time_source = op.time_sources[mb_num]
if not time_source and op.time_sync:
time_source = "external"
if time_source:
try:
usrp.set_time_source(time_source, mb_num)
except RuntimeError:
errstr = (
"Setting mainboard {0} time_source to '{1}' failed."
" Must be one of {2}. If setting is valid, check that"
" the source (PPS) is operational."
).format(mb_num, time_source, usrp.get_time_sources(mb_num))
raise ValueError(errstr)
# check for ref lock
mbnums_with_ref = [
mb_num
for mb_num in range(op.nmboards)
if "ref_locked" in usrp.get_mboard_sensor_names(mb_num)
]
if op.wait_for_lock and mbnums_with_ref:
if op.verbose:
sys.stdout.write("Waiting for reference lock...")
sys.stdout.flush()
timeout = 0
if op.wait_for_lock is True:
timeout_thresh = 30
else:
timeout_thresh = op.wait_for_lock
while not all(
usrp.get_mboard_sensor("ref_locked", mb_num).to_bool()
for mb_num in mbnums_with_ref
):
if op.verbose:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
timeout += 1
if timeout > timeout_thresh:
if op.verbose:
sys.stdout.write("failed\n")
sys.stdout.flush()
unlocked_mbs = [
mb_num
for mb_num in mbnums_with_ref
if usrp.get_mboard_sensor("ref_locked", mb_num).to_bool()
]
errstr = (
"Failed to lock to 10 MHz reference on mainboards {0}."
" To skip waiting for lock, set `wait_for_lock` to"
" False (pass --nolock on the command line)."
).format(unlocked_mbs)
raise RuntimeError(errstr)
if op.verbose:
sys.stdout.write("locked\n")
sys.stdout.flush()
# set global options
# sample rate (set after clock rate so it can be calculated correctly)
usrp.set_rx_rate(float(op.samplerate))
time.sleep(0.25)
# read back actual mainboard options
# (clock rate can be affected by setting sample rate)
for mb_num in range(op.nmboards):
op.clock_rates[mb_num] = usrp.get_master_clock_rate(mb_num)
op.clock_sources[mb_num] = usrp.get_clock_source(mb_num)
op.time_sources[mb_num] = usrp.get_time_source(mb_num)
# read back actual sample rate value
samplerate = usrp.get_rx_rate()
# calculate longdouble precision/rational sample rate
# (integer division of clock rate)
cr = op.clock_rates[0]
srdec = int(round(cr / samplerate))
samplerate_ld = np.longdouble(cr) / srdec
op.samplerate = samplerate_ld
op.samplerate_frac = Fraction(cr).limit_denominator() / srdec
# set per-channel options
# set command time so settings are synced
COMMAND_DELAY = 0.2
cmd_time = usrp.get_time_now() + uhd.types.TimeSpec(COMMAND_DELAY)
usrp.set_command_time(cmd_time, ALL_MBOARDS) # defaults to all mboards
for ch_num in range(op.nrchs):
# local oscillator sharing settings
lo_source = op.lo_sources[ch_num]
if lo_source:
try:
usrp.set_rx_lo_source(lo_source, ALL_LOS, ch_num)
except RuntimeError:
errstr = (
"Unknown LO source option: '{0}'. Must be one of {1},"
" or it may not be possible to set the LO source on"
" this daughterboard."
).format(lo_source, usrp.get_rx_lo_sources(ALL_LOS, ch_num))
raise ValueError(errstr)
lo_export = op.lo_exports[ch_num]
if lo_export is not None:
if not lo_source:
errstr = (
"Channel {0}: must set an LO source in order to set"
" LO export."
).format(ch_num)
raise ValueError(errstr)
usrp.set_rx_lo_export_enabled(True, ALL_LOS, ch_num)
# center frequency and tuning offset
# HACK TuneRequest constructor does not take tune args as input.
# Need to set args afterward.
tune_req = uhd.types.TuneRequest(
op.centerfreqs[ch_num], op.lo_offsets[ch_num]
)
tune_req.args = uhd.types.DeviceAddr(",".join(op.tune_args))
tune_res = usrp.set_rx_freq(tune_req, ch_num)
time.sleep(0.5)
# store actual values from tune result
op.centerfreqs[ch_num] = tune_res.actual_rf_freq - tune_res.actual_dsp_freq
op.lo_offsets[ch_num] = tune_res.actual_dsp_freq
# dc offset
dc_offset = op.dc_offsets[ch_num]
if dc_offset is True:
usrp.set_rx_dc_offset(True, ch_num)
elif dc_offset is False:
usrp.set_rx_dc_offset(False, ch_num)
elif dc_offset is not None:
usrp.set_rx_dc_offset(dc_offset, ch_num)
# iq balance
iq_balance = op.iq_balances[ch_num]
if iq_balance is True:
usrp.set_rx_iq_balance(True, ch_num)
elif iq_balance is False:
usrp.set_rx_iq_balance(False, ch_num)
elif iq_balance is not None:
usrp.set_rx_iq_balance(iq_balance, ch_num)
# gain
usrp.set_rx_gain(op.gains[ch_num], ch_num)
# bandwidth
bw = op.bandwidths[ch_num]
if bw:
usrp.set_rx_bandwidth(bw, ch_num)
# antenna
ant = op.antennas[ch_num]
if ant:
try:
usrp.set_rx_antenna(ant, ch_num)
except RuntimeError:
errstr = (
"Unknown RX antenna option: '{0}'. Must be one of {1}."
).format(ant, usrp.get_antennas(ch_num))
raise ValueError(errstr)
# commands are done, clear time
usrp.clear_command_time(ALL_MBOARDS)
time.sleep(COMMAND_DELAY)
st_args = uhd.usrp.StreamArgs(op.cpu_format, op.otw_format)
st_args.channels = np.unique(op.channels).tolist()
rx_streamer = usrp.get_rx_stream(st_args)
# read back actual channel settings
op.usrpinfo = []
for ch_num in range(op.nrchs):
if op.lo_sources[ch_num]:
op.lo_sources[ch_num] = usrp.get_rx_lo_sources(ALL_LOS, ch_num)
if op.lo_exports[ch_num] is not None:
op.lo_exports[ch_num] = usrp.get_rx_lo_export_enabled(ALL_LOS, ch_num)
op.gains[ch_num] = usrp.get_rx_gain(ch_num)
op.bandwidths[ch_num] = usrp.get_rx_bandwidth(ch_num)
op.antennas[ch_num] = usrp.get_rx_antenna(ch_num)
op.usrpinfo.append(dict(usrp.get_usrp_rx_info(ch_num)))
if op.verbose:
print("Using the following devices:")
chinfostrs = [
"Motherboard: {mb_id} ({mb_addr}) | Daughterboard: {db_name}",
"Subdev: {sub} | Antenna: {ant} | Gain: {gain} | Rate: {sr}",
"Frequency: {freq:.3f} ({lo_off:+.3f}) | Bandwidth: {bw}",
]
if any(op.lo_sources) or any(op.lo_exports):
chinfostrs.append("LO source: {lo_source} | LO export: {lo_export}")
chinfo = "\n".join([" " + l for l in chinfostrs])
for ch_num in range(op.nrchs):
header = "---- receiver channel {0} ".format(ch_num)
header += "-" * (78 - len(header))
print(header)
usrpinfo = op.usrpinfo[ch_num]
info = {}
info["mb_id"] = usrpinfo["mboard_id"]
mba = op.mboards_bychan[ch_num]
if mba == "default":
mba = usrpinfo["mboard_serial"]
info["mb_addr"] = mba
info["db_name"] = usrpinfo["rx_subdev_name"]
info["sub"] = op.subdevs_bychan[ch_num]
info["ant"] = op.antennas[ch_num]
info["bw"] = op.bandwidths[ch_num]
info["freq"] = op.centerfreqs[ch_num]
info["gain"] = op.gains[ch_num]
info["lo_off"] = op.lo_offsets[ch_num]
info["lo_source"] = op.lo_sources[ch_num]
info["lo_export"] = op.lo_exports[ch_num]
info["sr"] = op.samplerate
print(chinfo.format(**info))
print("-" * 78)
return usrp, rx_streamer
def run(self, starttime=None, endtime=None, duration=None, period=10):
"""Launch threads that run the buffering, processing and recording.
This function sets launches the threads and final set up for the timeing.
Parameters
----------
starttime : str, optional
Start time string. Defaults to None.
endtime : str, optional
End time string. Defaults to None.
duration : float, optional
Recording duration. Defaults to None.
period : int, optional
Cycle repeat period. Defaults to 10.
"""
op = self.op
# window in seconds that we allow for setup time so that we don't
# issue a start command that's in the past when the recording starts
SETUP_TIME = 10
# print current time and NTP status
if op.verbose and sys.platform.startswith("linux"):
try:
call(("timedatectl", "status"))
except OSError:
# no timedatectl command, ignore
pass
# parse time arguments
st = drf.util.parse_identifier_to_time(starttime)
if st is not None:
# find next suitable start time by cycle repeat period
now = datetime.utcnow()
now = now.replace(tzinfo=pytz.utc)
soon = now + timedelta(seconds=SETUP_TIME)
diff = max(soon - st, timedelta(0)).total_seconds()
periods_until_next = (diff - 1) // period + 1
st = st + timedelta(seconds=periods_until_next * period)
if op.verbose:
ststr = st.strftime("%a %b %d %H:%M:%S %Y")
stts = (st - drf.util.epoch).total_seconds()
print("Start time: {0} ({1})".format(ststr, stts))
et = drf.util.parse_identifier_to_time(endtime, ref_datetime=st)
if et is not None:
if op.verbose:
etstr = et.strftime("%a %b %d %H:%M:%S %Y")
etts = (et - drf.util.epoch).total_seconds()
print("End time: {0} ({1})".format(etstr, etts))
if (
et
< (pytz.utc.localize(datetime.utcnow()) + timedelta(seconds=SETUP_TIME))
) or (st is not None and et <= st):
raise ValueError("End time is before launch time!")
# if op.realtime:
# r = gr.enable_realtime_scheduling()
#
# if op.verbose:
# if r == gr.RT_OK:
# print("Realtime scheduling enabled")
# else:
# print("Note: failed to enable realtime scheduling")
# create data directory so ringbuffer code can be started while waiting
# to launch
if not os.path.isdir(op.datadir):
os.makedirs(op.datadir)
# wait for the start time if it is not past
while (st is not None) and (
(st - pytz.utc.localize(datetime.utcnow())) > timedelta(seconds=SETUP_TIME)
):
ttl = int((st - pytz.utc.localize(datetime.utcnow())).total_seconds())
if (ttl % 10) == 0:
print("Standby {0} s remaining...".format(ttl))
sys.stdout.flush()
time.sleep(1)
usrp, stream = self._usrp_setup()
# finalize options (for settings that depend on USRP setup)
self._finalize_options()
# set device time
tt = time.time()
if op.time_sync:
# wait until time 0.2 to 0.5 past full second, then latch
# we have to trust NTP to be 0.2 s accurate
while tt - math.floor(tt) < 0.2 or tt - math.floor(tt) > 0.3:
time.sleep(0.01)
tt = time.time()
if op.verbose:
print("Latching at " + str(tt))
# waits for the next pps to happen
# (at time math.ceil(tt))
# then sets the time for the subsequent pps
# (at time math.ceil(tt) + 1.0)
usrp.set_time_unknown_pps(uhd.types.TimeSpec(math.ceil(tt) + 1.0))
else:
usrp.set_time_now(uhd.types.TimeSpec(tt), ALL_MBOARDS)
# set launch time
# (at least 2 seconds out so USRP start time can be set properly and
# there is time to set up flowgraph)
if st is not None:
lt = st
else:
now = pytz.utc.localize(datetime.utcnow())
# launch on integer second by default for convenience (ceil + 2)
lt = now.replace(microsecond=0) + timedelta(seconds=3)
ltts = (lt - drf.util.epoch).total_seconds()
# adjust launch time forward so it falls on an exact sample since epoch
lt_rsamples = int(np.ceil(ltts * op.samplerate))
ltts = lt_rsamples / op.samplerate
lt = drf.util.sample_to_datetime(lt_rsamples, op.samplerate)
if op.verbose:
ltstr = lt.strftime("%a %b %d %H:%M:%S.%f %Y")
msg = "Launch time: {0} ({1})\nSample index: {2}"
print(msg.format(ltstr, repr(ltts), lt_rsamples))
# command launch time
ct_td = lt - drf.util.epoch
ct_secs = ct_td.total_seconds() // 1.0
ct_frac = ct_td.microseconds / 1000000.0
# Craft and send the Stream Command
stream_cmd = uhd.types.StreamCMD(uhd.types.StreamMode.start_cont)
# Need to set this to False if using Multiple receive channels. This is
# buried pretty deep in uhd.
stream_cmd.stream_now = op.nrchs == 1
stream_cmd.time_spec = uhd.types.TimeSpec(ct_secs) + uhd.types.TimeSpec(ct_frac)
stream.issue_stream_cmd(stream_cmd)
# Set up drf writers
drfObjs = []
# ko is for output channel, kr is the radio channel
for ko, kr in enumerate(op.channels):
# make channelizer if necessary
nsc = op.ch_nsubchannels[ko]
mbnum = op.mboardnum_bychan[kr]
cpath = os.path.join(op.datadir, op.channel_names[ko])
ch_samplerate_frac = op.ch_samplerates_frac[ko]
ch_centerfreq = op.ch_centerfreqs[ko]
start_sample_adjust = 0
# make resampling filter blocks if necessary
rs_ratio = op.resampling_ratios[ko]
scaling = op.ch_scalings[ko]
if rs_ratio != 1:
rs_taps = op.resampling_filter_taps[ko]
# integrate scaling into filter taps
rs_taps *= scaling
conv_scaling = 1.0
# frequency shift filter taps to band-pass if necessary
if ch_centerfreq is not False:
# create band-pass filter (complex taps)
f_shift = ch_centerfreq - op.centerfreqs[kr]
phase_inc = 2 * np.pi * f_shift / op.samplerate
rotator = np.exp(phase_inc * 1j * np.arange(len(rs_taps)))
rs_taps = (rs_taps * rotator).astype("complex64")
op.resampling_filter_taps[ko] = rs_taps
else:
# save low-pass filter (float taps)
op.resampling_filter_taps[ko] = rs_taps
else:
conv_scaling = scaling
# make frequency shift rotator if necessary
if ch_centerfreq is not False:
f_shift = ch_centerfreq - op.centerfreqs[kr]
phase_inc = -2 * np.pi * f_shift / ch_samplerate_frac
op.rotator[ko] = phase_inc
else:
ch_centerfreq = op.centerfreqs[kr]
# make channelizer if necessary
if nsc > 1:
sc_taps = op.channelizer_filter_taps[ko]
n_h = np.arange(len(sc_taps))
f_frac = np.arange(nsc, dtype=float) / nsc
tap_list = []
for i_f in f_frac:
f_vec = np.exp(2j * np.pi * i_f * n_h)
tmp_taps = f_vec * sc_taps
tap_list.append(tmp_taps.astype("complex64"))
op.channelizer_filter_taps_list[ko] = tap_list
# # declare sample delay for the filter block so that tags are
# # propagated to the correct sample
# # (for channelized, delay is applied for each filter in the
# # polyphase bank, so this needs to be the output sample delay)
# filt.declare_sample_delay(int(op.channelizer_filter_delays[ko] / nsc))
# adjust start sample to account for filter delay so first
# sample going to output is shifted to an earlier time
# (adjustment is in terms of filter output samples, so need to
# take the input filter delay and account for the output rate)
# start_sample_adjust = int(
# (start_sample_adjust - op.channelizer_filter_delays[ko]) / nsc
# )
# modify output settings accordingly
ch_centerfreq = ch_centerfreq + np.fft.fftfreq(
nsc, 1 / float(ch_samplerate_frac)
)
ch_samplerate_frac = ch_samplerate_frac / nsc
else:
ch_centerfreq = np.array([ch_centerfreq])
# make conversion block if necessary
ot_dict = op.ch_out_specs[ko]
converter = ot_dict["convert"]
if converter is not None:
kw = ot_dict["convert_kwargs"]
# increase vector length of input due to channelizer
# incorporate any scaling into type conversion block
conv_scaling *= kw["scale"]
op.ch_out_specs[ko]["conv_scaling"] = conv_scaling
# get start sample
ch_samplerate_ld = np.longdouble(
ch_samplerate_frac.numerator
) / np.longdouble(ch_samplerate_frac.denominator)
start_sample = int(np.uint64(ltts * ch_samplerate_ld)) + start_sample_adjust
metadata = dict(
# receiver metadata for USRP
center_frequencies=ch_centerfreq,
receiver=dict(
description="UHD USRP source using pyuhd",
info=op.usrpinfo[kr],
antenna=op.antennas[kr],
bandwidth=op.bandwidths[kr],
center_freq=op.centerfreqs[kr],
clock_rate=op.clock_rates[mbnum],
clock_source=op.clock_sources[mbnum],
dc_offset=op.dc_offsets[kr],
gain=op.gains[kr],
id=op.mboards_bychan[kr],
iq_balance=op.iq_balances[kr],
lo_export=op.lo_exports[kr],
lo_offset=op.lo_offsets[kr],
lo_source=op.lo_sources[kr],
otw_format=op.otw_format,
samp_rate=usrp.get_rx_rate(),
stream_args=",".join(op.stream_args),
subdev=op.subdevs_bychan[kr],
time_source=op.time_sources[mbnum],
),
processing=dict( # Filtering info
channelizer_filter_taps=op.channelizer_filter_taps[ko],
decimation=op.resampling_ratios[ko].denominator,
interpolation=op.resampling_ratios[ko].numerator,
resampling_filter_taps=op.resampling_filter_taps[ko],
scaling=op.ch_scalings[ko],
),
**op.metadata
)
# Metadata writer and write at first record sample
mdata_path = os.path.join(cpath, "metadata")
if not os.path.isdir(mdata_path):
os.makedirs(mdata_path)
mdatawrite = drf.DigitalMetadataWriter(
mdata_path,
3600,
60,
ch_samplerate_frac.numerator,
ch_samplerate_frac.denominator,
"uhdtodrf",
)
mdatawrite.write(start_sample, metadata)
drf_out = drf.DigitalRFWriter(
cpath,
op.ch_out_specs[ko]["dtype"],
op.subdir_cadence_s,
op.file_cadence_ms,
start_sample,
ch_samplerate_frac.numerator,
ch_samplerate_frac.denominator,
op.uuid,
compression_level=0,
checksum=False,
is_complex=True,
num_subchannels=nsc,
is_continuous=True,
marching_periods=True,
)
drfObjs.append(drf_out)
# Lists for processing and write threads.
proc_threads = []
write_threads = []
radfifo = queue.Queue() # receive queue
write_fifo = queue.Queue()
proc_count = 0
end_rec = threading.Event()
if et is None and duration is not None:
et = lt + timedelta(seconds=duration)
try:
# Start the buffering thread
read_th = threading.Thread(
target=self.buff_func, args=(stream, radfifo, end_rec, start_sample)
)
read_th.start()
read_th.setName("Read Thread")
while not end_rec.is_set():
if et is not None:
stop_bool = pytz.utc.localize(datetime.utcnow()) >= et - timedelta(
seconds=1
)
if stop_bool:
end_rec.set()
if not radfifo.empty():
d1 = radfifo.get()
tmp = self.bufflist[d1[0]][:, : self.pntlist[d1[0]]]
cur_pt = threading.Thread(
target=self.procsamples, args=(write_fifo, tmp, d1[1])
)
cur_pt.start()
cur_pt.setName("Proc{0}".format(proc_count))
proc_threads.append(cur_pt)
proc_count += 1
if proc_threads:
if not proc_threads[0].isAlive():
write_data = write_fifo.get()
cur_wt = threading.Thread(
target=write_samples, args=(drfObjs, write_data)
)
cur_wt.start()
cur_wt.setName("Save to drf")
write_threads.append(cur_wt)
proc_threads.pop(0)
sys.stdout.write(".")
sys.stdout.flush()
if write_threads:
if not write_threads[0].isAlive():
write_threads.pop(0)
time.sleep(1)
except RuntimeError as ex:
print("Runtime error in receive: %s", ex)
# Handle the error codes
except KeyboardInterrupt:
end_rec.set()
time.sleep(1)
finally:
while write_threads or proc_threads or (not radfifo.empty()):
if not radfifo.empty():
d1 = radfifo.get()
tmp = self.bufflist[d1[0]][:, : self.pntlist[d1[0]]]
cur_pt = threading.Thread(
target=self.procsamples, args=(write_fifo, tmp, d1[1])
)
cur_pt.start()
cur_pt.setName("Proc{0}".format(proc_count))
proc_threads.append(cur_pt)
proc_count += 1
if proc_threads:
if not proc_threads[0].isAlive():
write_data = write_fifo.get()
cur_wt = threading.Thread(
target=write_samples, args=(drfObjs, write_data)
)
cur_wt.start()
cur_wt.setName("Save to drf")
write_threads.append(cur_wt)
proc_threads.pop(0)
sys.stdout.write(".")
sys.stdout.flush()
if write_threads:
if not write_threads[0].isAlive():
write_threads.pop(0)
time.sleep(0.1)
for iobj in drfObjs:
iobj.close()
print("done")
sys.stdout.flush()
def buff_func(self, stream, radfifo, end_rec, start_sample):
"""Call the receive command for the streamer and places the data in a buffer.
This function repeatly calls the recv command from the streamer and places
the data in a buffer inside the bufflist object. Once a buffer is full
the function cycles through to the next buffer in the list in a round
robin style assuming that the processing and recording threads have
copied the old data.
Parameters
----------
stream : RX_streamer
Streamer that has already started taking data.
radfifo : Queue
Will be filled for the next steps.
end_rec : Threading.Event
If set will stop taking data.
start_sample :long
Start sample in number of samples in Posix Epoch.
"""
try:
op = self.op
# To estimate the number of dropped samples in an overflow situation,
# we need the following:
# On the first overflow, set had_an_overflow and record the time
# On the next ERROR_CODE_NONE, calculate how long its been since
# the recorded time, and use the tick rate to estimate the
# number of dropped samples. Also, reset the tracking variables.
had_an_overflow = False
last_overflow = uhd.types.TimeSpec(0)
samp_num = 0
m_overlap = op.max_filter
sps = int(op.samplerate)
# Make a receive buffer
num_channels = stream.get_num_channels()
max_samps_per_packet = stream.get_max_num_samps()
# receive buffer from
# HACK need a parameter for the dytpe of the cpu format
recv_buffer = np.empty(
(num_channels, max_samps_per_packet), dtype=op.cpu_dtype
)
radio_meta = uhd.types.RXMetadata()
num_rx_dropped = 0
while not end_rec.is_set():
num_rx = stream.recv(recv_buffer, radio_meta, 1.0)
rec_samp = radio_meta.time_spec.to_ticks(sps) # +np.arange(num_rx)
# Logic for how to deal with samples before start time.
if rec_samp + num_rx < start_sample:
continue
# Go through error checks
if radio_meta.error_code == uhd.types.RXMetadataErrorCode.none:
# Reset the overflow flag
if had_an_overflow:
had_an_overflow = False
num_rx_dropped = uhd.types.TimeSpec(
radio_meta.time_spec.get_real_secs()
- last_overflow.get_real_secs()
).to_ticks(sps)
# Break out of loop if dropped sample.
if op.stop_on_dropped:
end_rec.set()
elif radio_meta.error_code == uhd.types.RXMetadataErrorCode.overflow:
had_an_overflow = True
last_overflow = radio_meta.time_spec
continue
# If we had a sequence error, record it
if radio_meta.out_of_sequence:
end_rec.set()
break
else:
print("Receiver error: %s", radio_meta.strerror())
break
# Write the current set of samples to memory.
# Put this after the error checks to put in nans for overflows.
a = self.act_buff
bpnt = self.pntlist[a]
if self.pntlist[a] + num_rx + num_rx_dropped >= self.bufflen:
b = (a + 1) % self.nbuff
cpnt = self.pntlist[a]
if m_overlap > 0:
self.bufflist[b][:, :m_overlap] = self.bufflist[b][
:, cpnt - m_overlap : cpnt
]
radfifo.put([a, samp_num])
samp_num += bpnt
self.pntlist[b] = m_overlap
self.act_buff = b
a = self.act_buff
bpnt = self.pntlist[a]
if num_rx_dropped:
end_pnt = min(self.bufflen, bpnt + num_rx_dropped)
self.bufflist[a][:, bpnt:end_pnt] = np.nan
self.pntlist[a] += num_rx_dropped
num_rx_dropped = 0
beg_samp = max(0, start_sample - rec_samp)
a = self.act_buff
bpnt = self.pntlist[a]
self.bufflist[a][:, bpnt : bpnt + num_rx - beg_samp] = recv_buffer[
:, beg_samp:num_rx
]
self.pntlist[a] += num_rx - beg_samp
except Exception as error:
print("Radio buffer caught this error: " + repr(error))
end_rec.set()
finally:
# After we get the signal to stop, issue a stop command
stream.issue_stream_cmd(uhd.types.StreamCMD(uhd.types.StreamMode.stop_cont))
# Set the last buffer to be read
a = self.act_buff
radfifo.put([a, samp_num])
def procsamples(self, out_que, data_samples, start_sample):
"""Perform resampling, channelization, scaling and setting data types.
Parameters
----------
out_que : Queue
Output for the processed data.
data_samples : array
Input data samples in nrec x N array.
start_sample : long
Time of the first sample.
"""
try:
op = self.op
nchans = len(op.channels)
outlist = [0 for i in range(nchans)]
# HACK Make this a class method so I don't need to make dictionary.
for ko, kr in enumerate(op.channels):
rec_data = data_samples[kr]
n_data = rec_data.shape[0]
m_over = op.max_filter
h_len = (m_over - 1) / 2
rs = op.resampling_ratios[ko]
rs_taps = op.resampling_filter_taps[ko]
rot = op.rotator[ko]
nsc = op.ch_nsubchannels[ko]
n_del = (rs.numerator * h_len) // (rs.denominator * nsc) + bool(
(rs.numerator * h_len) % (rs.denominator * nsc)
)
n_out = n_data - (m_over - 1)
n_out = n_out * rs.numerator // rs.denominator + bool(
(n_out * rs.numerator) % rs.denominator
)
n_out = n_out // nsc + bool(n_out % nsc)
sc_taps = op.channelizer_filter_taps_list[ko]
ot_dict = op.ch_out_specs[ko]
conv_scaling = ot_dict["conv_scaling"]
convert = ot_dict["convert"]
# Resampling
if rs != 1:
rec_data = sig.resample_poly(
rec_data, rs.numerator, rs.denominator, window=rs_taps
)
# frequency rotation with no resampling
elif rot:
rec_data = rec_data * np.exp(rot * 1j * np.arange(len(rec_data)))
rec_data = rec_data * np.exp(rot * 1j * start_sample)
# sub banding
if nsc > 1:
n_ds = rec_data.shape[0]
n_ch = n_ds // nsc + bool(n_ds % nsc)
xout = np.empty((n_ch, nsc), dtype=rec_data.dtype)
for isc in range(nsc):
cur_tap = sc_taps[isc]
xout[:, isc] = sig.resample_poly(
rec_data, 1, nsc, window=cur_tap
)
rec_data = xout
rec_data = rec_data[n_del : n_out + n_del]
# scaling
if conv_scaling != 1:
rec_data = conv_scaling * rec_data
# HACK Need to make a set of functions to do all of the translations.
# conversion of number type
if not (convert is None):
tmp_data = np.empty(rec_data.shape, dtype=ot_dict["dtype"])
tmp_data["r"] = np.round(rec_data.real)
tmp_data["i"] = np.round(rec_data.imag)
rec_data = tmp_data
outlist[ko] = rec_data
out_que.put(outlist)
except Exception as error:
print("Processor caught this error: " + repr(error))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def write_samples(drfObj, data_samples):
"""Write out data buffer, typically in a separate thread.
Parameters
----------
drfObj : list
List of digital rf writers for each channel.
data_samples : array, shape (nchan, nsample)
Array that will be saved.
"""
try:
for i_num, iobj in enumerate(drfObj):
iobj.rf_write(data_samples[i_num])
except Exception as error:
print("write function caught this error: " + repr(error))
def evalint(s):
"""Evaluate string to an integer."""
return int(eval(s, {}, {}))
def evalfloat(s):
"""Evaluate string to a float."""
return float(eval(s, {}, {}))
def intstrtuple(s):
"""Get (int, string) tuple from int:str strings."""
parts = [p.strip() for p in s.split(":", 1)]
if len(parts) == 2:
return int(parts[0]), parts[1]
else:
return None, parts[0]
def noneorstr(s):
"""Turn empty or 'none' string to None."""
if s.lower() in ("", "none"):
return None
else:
return s
def noneorfloat(s):
"""Turn empty or 'none' to None, else evaluate to float."""
if s.lower() in ("", "none"):
return None
else:
return evalfloat(s)
def noneorbool(s):
"""Turn empty or 'none' string to None, all others to boolean."""
if s.lower() in ("", "none"):
return None
elif s.lower() in ("true", "t", "yes", "y", "1"):
return True
else:
return False
def noneorboolorfloat(s):
"""Turn empty or 'none' to None, else evaluate to a boolean or float."""
if s.lower() in ("", "none"):
return None
elif s.lower() in ("auto", "true", "t", "yes", "y"):
return True
elif s.lower() in ("false", "f", "no", "n"):
return False
else:
return evalfloat(s)
def noneorboolorcomplex(s):
"""Turn empty or 'none' to None, else evaluate to a boolean or complex."""
if s.lower() in ("", "none"):
return None
elif s.lower() in ("auto", "true", "t", "yes", "y"):
return True
elif s.lower() in ("false", "f", "no", "n"):
return False
else:
return complex(eval(s, {}, {}))
class Extend(argparse.Action):
"""Action to split comma-separated arguments and add to a list."""
def __init__(self, option_strings, dest, type=None, **kwargs):
if type is not None:
itemtype = type
else:
def itemtype(s):
return s
def split_string_and_cast(s):
return [itemtype(a.strip()) for a in s.strip().split(",")]
super(Extend, self).__init__(
option_strings, dest, type=split_string_and_cast, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None):
cur_list = getattr(namespace, self.dest, [])
if cur_list is None:
cur_list = []
cur_list.extend(values)
setattr(namespace, self.dest, cur_list)
def _add_dir_group(parser):
dirgroup = parser.add_mutually_exclusive_group(required=True)
dirgroup.add_argument(
"datadir",
nargs="?",
default=None,
help="""Data directory, to be filled with channel subdirectories.""",
)
dirgroup.add_argument(
"-o",
"--out",
dest="outdir",
default=None,
help="""Data directory, to be filled with channel subdirectories.""",
)
return parser
def _add_mainboard_group(parser):
mbgroup = parser.add_argument_group(title="mainboard")
mbgroup.add_argument(
"-m",
"--mainboard",
dest="mboards",
action=Extend,
help="""Mainboard address. (default: first device found)""",
)
mbgroup.add_argument(
"-d",
"--subdevice",
dest="subdevs",
action=Extend,
help="""USRP subdevice string. (default: "A:A")""",
)
mbgroup.add_argument(
"--clock_rate",
dest="clock_rates",
action=Extend,
type=noneorfloat,
help="""Master clock rate for mainboard. Can be 'None'/'' to use
device default or a value in Hz. (default: None)""",
)
mbgroup.add_argument(
"--clock_source",
dest="clock_sources",
action=Extend,
type=noneorstr,
help="""Clock source (i.e. 10 MHz REF) for mainboard. Can be 'None'/''
to use default (do not set if --nolock, otherwise 'external')
or a string like 'external' or 'internal'. (default: '')""",
)
mbgroup.add_argument(
"--time_source",
dest="time_sources",
action=Extend,
type=noneorstr,
help="""Time source (i.e. PPS) for mainboard. Can be 'None'/''
to use default (do not set if --nosync, otherwise 'external')
or a string like 'external' or 'internal'. (default: '')""",
)
return parser
def _add_receiver_group(parser):
recgroup = parser.add_argument_group(title="receiver")
recgroup.add_argument(
"-r",
"--samplerate",
dest="samplerate",
type=evalfloat,
help="""Sample rate in Hz. (default: 1e6)""",
)
recgroup.add_argument(
"-A",
"--devargs",
dest="dev_args",
action=Extend,
help="""Device arguments, e.g. "master_clock_rate=30e6".
(default: 'recv_buff_size=100000000,num_recv_frames=512')""",
)
recgroup.add_argument(
"-a",
"--streamargs",
dest="stream_args",
action=Extend,
help="""Stream arguments, e.g. "peak=0.125,fullscale=1.0".
(default: '')""",
)
recgroup.add_argument(
"-T",
"--tuneargs",
dest="tune_args",
action=Extend,
help="""Tune request arguments, e.g. "mode_n=integer,int_n_step=100e3".
(default: '')""",
)
# kept for backward compatibility,
# replaced by clock_source/time_source in 2.6
recgroup.add_argument("--sync_source", dest="sync_source", help=argparse.SUPPRESS)
recgroup.add_argument(
"--nosync",
dest="time_sync",
action="store_false",
help="""Skip syncing with reference time. (default: False)""",
)
recgroup.add_argument(
"--nolock",
dest="wait_for_lock",
action="store_false",
help="""Don't wait for reference clock to lock. (default: False)""",
)
recgroup.add_argument(
"--stop_on_dropped",
dest="stop_on_dropped",
action="store_true",
help="""Stop on dropped packet. (default: %(default)s)""",
)
recgroup.add_argument(
"--realtime",
dest="realtime",
action="store_true",
help="""Enable realtime scheduling if possible.
(default: %(default)s)""",
)
recgroup.add_argument(
"--notest",
dest="test_settings",
action="store_false",
help="""Do not test USRP settings until experiment start.
(default: False)""",
)
return parser
def _add_rchannel_group(parser):
chgroup = parser.add_argument_group(title="receiver channel")
chgroup.add_argument(
"-f",
"--centerfreq",
dest="centerfreqs",
action=Extend,
type=evalfloat,
help="""Center frequency in Hz. (default: 100e6)""",
)
chgroup.add_argument(
"-F",
"--lo_offset",
dest="lo_offsets",
action=Extend,
type=evalfloat,
help="""Frontend tuner offset from center frequency, in Hz.
(default: 0)""",
)
chgroup.add_argument(
"--lo_source",
dest="lo_sources",
action=Extend,
type=noneorstr,
help="""Local oscillator source. Typically 'None'/'' (do not set),
'internal' (e.g. LO1 for CH1, LO2 for CH2),
'companion' (e.g. LO2 for CH1, LO1 for CH2), or
'external' (neighboring board via connector).
(default: '')""",
)
chgroup.add_argument(
"--lo_export",
dest="lo_exports",
action=Extend,
type=noneorbool,
help="""Whether to export the LO's source to the external connector.
Can be 'None'/'' to skip the channel, otherwise it can be
'True' or 'False' provided the LO source is set.
(default: None)""",
)
chgroup.add_argument(
"--dc_offset",
dest="dc_offsets",
action=Extend,
type=noneorboolorcomplex,
help="""DC offset correction to use. Can be 'None'/'' to keep device
default, 'True'/'auto' to enable automatic correction, 'False'
to disable automatic correction, or a complex value
(e.g. "1+1j"). (default: False)""",
)
chgroup.add_argument(
"--iq_balance",
dest="iq_balances",
action=Extend,
type=noneorboolorcomplex,
help="""IQ balance correction to use. Can be 'None'/'' to keep device
default, 'True'/'auto' to enable automatic correction, 'False'
to disable automatic correction, or a complex value
(e.g. "1+1j"). (default: None)""",
)
chgroup.add_argument(
"-g",
"--gain",
dest="gains",
action=Extend,
type=evalfloat,
help="""Gain in dB. (default: 0)""",
)
chgroup.add_argument(
"-b",
"--bandwidth",
dest="bandwidths",
action=Extend,
type=evalfloat,
help="""Frontend bandwidth in Hz. (default: 0 == frontend default)""",
)
chgroup.add_argument(
"-y",
"--antenna",
dest="antennas",
action=Extend,
type=noneorstr,
help="""Name of antenna to select on the frontend.
(default: frontend default))""",
)
return parser
def _add_ochannel_group(parser):
chgroup = parser.add_argument_group(title="output channel")
chgroup.add_argument(
"+c",
"-c",
"--channel",
dest="chs",
action=Extend,
type=intstrtuple,
help="""Output channel specification, including names and mapping from
receiver channels. Each output channel must be specified here
and given a unique name. Specifications are given as a receiver
channel number and name pair, e.g. "0:ch0". The number and
colon are optional; if omitted, any unused receiver channels
will be assigned to output channels in the supplied name order.
(default: "ch0")""",
)
chgroup.add_argument(
"+r",
"--ch_samplerate",
dest="ch_samplerates",
action=Extend,
type=noneorfloat,
help="""Output channel sample rate in Hz. If 'None'/'', use the
receiver sample rate. Filtering and resampling will be
performed to achieve the desired rate (set filter specs with
lpf_* options). Must be less than or equal to the receiver
sample rate. (default: None)""",
)
# deprecated by ch_samplerate in 2.6
# if used, all ch_samplerate arguments will be ignored
chgroup.add_argument(
"-i",
"--dec",
dest="decimations",
action=Extend,
type=evalint,
help=argparse.SUPPRESS,
)
chgroup.add_argument(
"+f",
"--ch_centerfreq",
dest="ch_centerfreqs",
action=Extend,
type=noneorboolorfloat,
help="""Output channel center frequency in Hz. Can be 'True'/'auto' to
use the receiver channel target frequency (correcting for
actual tuner offset), 'False' to use the receiver channel
frequency unchanged, or a float value. (default: False)""",
)
chgroup.add_argument(
"+k",
"--scale",
dest="ch_scalings",
action=Extend,
type=evalfloat,
help="""Scale output channel by this factor. (default: 1)""",
)
chgroup.add_argument(
"+n",
"--subchannels",
dest="ch_nsubchannels",
action=Extend,
type=evalint,
help="""Number of subchannels for channelizing the output. A polyphase
filter bank will be applied after the otherwise specified
resampling and frequency shifting to further decimate the
output and divide it into this many equally-spaced channels.
(default: 1)""",
)
chgroup.add_argument(
"--lpf_cutoff",
dest="ch_lpf_cutoffs",
action=Extend,
type=evalfloat,
help="""Normalized low-pass filter cutoff frequency (start of
transition band), where a value of 1 indicates half the
*output* sampling rate. Value in Hz is therefore
(cutoff * out_sample_rate / 2.0). (default: 0.9)""",
)
chgroup.add_argument(
"--lpf_transition_width",
dest="ch_lpf_transition_widths",
action=Extend,
type=evalfloat,
help="""Normalized width (in frequency) of low-pass filter transition
region from pass band to stop band, where a value of 1
indicates half the *output* sampling rate. Value in Hz is
therefore (transition_width * out_sample_rate / 2.0).
(default: 0.2)""",
)
chgroup.add_argument(
"--lpf_attenuation",
dest="ch_lpf_attenuations",
action=Extend,
type=evalfloat,
help="""Minimum attenuation of the low-pass filter stop band in dB.
(default: 80)""",
)
chgroup.add_argument(
"--lpf_pass_ripple",
dest="ch_lpf_pass_ripples",
action=Extend,
type=noneorfloat,
help="""Maximum ripple of the low-pass filter pass band in dB. If
'None', use the same value as `lpf_attenuation`.
(default: None)""",
)
chgroup.add_argument(
"+t",
"--type",
dest="ch_out_types",
action=Extend,
type=noneorstr,
help="""Output channel data type to convert to ('scXX' for complex
integer and 'fcXX' for complex float with XX bits). Use 'None'
to skip conversion and use the USRP or filter output type.
Conversion from float to integer will map a magnitude of 1.0
(after any scaling) to the maximum integer value.
(default: None)""",
)
return parser
def _add_drf_group(parser):
drfgroup = parser.add_argument_group(title="digital_rf")
drfgroup.add_argument(
"-n",
"--file_cadence_ms",
dest="file_cadence_ms",
type=evalint,
help="""Number of milliseconds of data per file.
(default: 1000)""",
)
drfgroup.add_argument(
"-N",
"--subdir_cadence_s",
dest="subdir_cadence_s",
type=evalint,
help="""Number of seconds of data per subdirectory.
(default: 3600)""",
)
drfgroup.add_argument(
"--metadata",
action=Extend,
metavar="{KEY}={VALUE}",
help="""Key, value metadata pairs to include with data.
(default: "")""",
)
drfgroup.add_argument(
"--uuid",
dest="uuid",
help="""Unique ID string for this data collection.
(default: random)""",
)
return parser
def _add_time_group(parser):
timegroup = parser.add_argument_group(title="time")
timegroup.add_argument(
"-s",
"--starttime",
dest="starttime",
help="""Start time of the experiment as datetime (if in ISO8601 format:
2016-01-01T15:24:00Z) or Unix time (if float/int).
(default: start ASAP)""",
)
timegroup.add_argument(
"-e",
"--endtime",
dest="endtime",
help="""End time of the experiment as datetime (if in ISO8601 format:
2016-01-01T16:24:00Z) or Unix time (if float/int).
(default: wait for Ctrl-C)""",
)
timegroup.add_argument(
"-l",
"--duration",
dest="duration",
type=evalint,
help="""Duration of experiment in seconds. When endtime is not given,
end this long after start time. (default: wait for Ctrl-C)""",
)
timegroup.add_argument(
"-p",
"--cycle-length",
dest="period",
type=evalint,
help="""Repeat time of experiment cycle. Align to start of next cycle
if start time has passed. (default: 10)""",
)
return parser
def _build_parser(Parser, *args):
scriptname = os.path.basename(sys.argv[0])
formatter = argparse.RawDescriptionHelpFormatter(scriptname)
width = formatter._width
title = "uhdtodrf"
copyright = "Copyright (c) 2017 Massachusetts Institute of Technology"
shortdesc = "Record data from synchronized USRPs in DigitalRF format."
desc = "\n".join(
(
"*" * width,
"*{0:^{1}}*".format(title, width - 2),
"*{0:^{1}}*".format(copyright, width - 2),
"*{0:^{1}}*".format("", width - 2),
"*{0:^{1}}*".format(shortdesc, width - 2),
"*" * width,
)
)
usage = (
"%(prog)s [-m MBOARD] [-d SUBDEV] [-c CH] [-y ANT] [-f FREQ]"
" [-F OFFSET] \\\n"
"{0:8}[-g GAIN] [-b BANDWIDTH] [-r RATE] [options] DIR\n".format("")
)
epi_pars = [
"""\
Arguments in the "mainboard", "receiver channel", and "output channel"
groups accept multiple values, allowing multiple mainboards and
channels to be specified. Multiple arguments can be provided by
repeating the argument flag, by passing a comma-separated list of
values, or both. Within each argument group, parameters will be grouped
in the order in which they are given to form the complete set of
parameters for each mainboard/channel. For any argument with fewer
values given than the number of mainboards/channels, its values will be
extended by repeatedly cycling through the values given up to the
needed number.
""",
"""\
Arguments in other groups apply to all mainboards/channels (including
the receiver sample rate).
""",
"""\
Example usage:
""",
]
epi_pars = [fill(dedent(s), width) for s in epi_pars]
egtw = TextWrapper(
width=(width - 2),
break_long_words=False,
break_on_hyphens=False,
subsequent_indent=" " * (len(scriptname) + 1),
)
egs = [
"""\
{0} -m 192.168.20.2 -d "A:A A:B" -c h,v -f 95e6 -r 100e6/24
/data/test
""",
"""\
{0} -m 192.168.10.2 -d "A:0" -c ch1 -y "TX/RX" -f 20e6 -F 10e3 -g 20
-b 0 -r 1e6 /data/test
""",
]
egs = [" \\\n".join(egtw.wrap(dedent(s.format(scriptname)))) for s in egs]
epi = "\n" + "\n\n".join(epi_pars + egs) + "\n"
# parse options
parser = Parser(
description=desc,
usage=usage,
epilog=epi,
prefix_chars="-+",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--version",
action="version",
version="THOR 3.1, using digital_rf {0}".format(drf.__version__),
)
parser.add_argument(
"-q",
"--quiet",
dest="verbose",
action="store_false",
help="""Reduce text output to the screen. (default: False)""",
)
parser = _add_dir_group(parser)
parser = _add_mainboard_group(parser)
parser = _add_receiver_group(parser)
parser = _add_rchannel_group(parser)
parser = _add_ochannel_group(parser)
parser = _add_drf_group(parser)
parser = _add_time_group(parser)
# parser.set_defaults(func=_run_thor)
return parser
def _ops_setup(args):
if args.datadir is None:
args.datadir = args.outdir
del args.outdir
# handle deprecated decimation argument, converting it to sample rate
if args.decimations is not None:
if args.samplerate is None:
args.samplerate = 1e6
args.ch_samplerates = [args.samplerate / d for d in args.decimations]
del args.decimations
# handle deprecated sync_source argument, converting it to clock_sources
# and time_sources
if args.sync_source is not None:
if args.clock_sources is None:
args.clock_sources = [args.sync_source]
if args.time_sources is None:
args.time_sources = [args.sync_source]
del args.sync_source
# separate args.chs (num, name) tuples into args.channels and
# args.channel_names
if args.chs is not None:
args.channels, args.channel_names = map(list, zip(*args.chs))
del args.chs
# remove redundant arguments in dev_args, stream_args, tune_args
if args.dev_args is not None:
try:
dev_args_dict = dict([a.split("=") for a in args.dev_args])
except ValueError:
raise ValueError("Device arguments must be {KEY}={VALUE} pairs.")
args.dev_args = ["{0}={1}".format(k, v) for k, v in dev_args_dict.items()]
if args.stream_args is not None:
try:
stream_args_dict = dict([a.split("=") for a in args.stream_args])
except ValueError:
raise ValueError("Stream arguments must be {KEY}={VALUE} pairs.")
args.stream_args = ["{0}={1}".format(k, v) for k, v in stream_args_dict.items()]
if args.tune_args is not None:
try:
tune_args_dict = dict([a.split("=") for a in args.tune_args])
except ValueError:
raise ValueError("Tune request arguments must be {KEY}={VALUE} pairs.")
args.tune_args = ["{0}={1}".format(k, v) for k, v in tune_args_dict.items()]
# convert metadata strings to a dictionary
if args.metadata is not None:
metadata_dict = {}
for a in args.metadata:
try:
k, v = a.split("=")
except ValueError:
k = None
v = a
try:
v = ast.literal_eval(v)
except ValueError:
pass
if k is None:
metadata_dict.setdefault("metadata", []).append(v)
else:
metadata_dict[k] = v
args.metadata = metadata_dict
# ignore test_settings option if no starttime is set (starting right now)
if args.starttime is None:
args.test_settings = False
options = {k: v for k, v in args._get_kwargs() if v is not None}
runopts = {
k: options.pop(k)
for k in list(options.keys())
if k in ("starttime", "endtime", "duration", "period")
}
return options, runopts
if __name__ == "__main__":
parser = _build_parser(argparse.ArgumentParser)
args = parser.parse_args()
options, runopts = _ops_setup(args)
import signal
# handle SIGTERM (getting killed) gracefully by calling sys.exit
def sigterm_handler(signal, frame):
print("Killed")
sys.stdout.flush()
sys.exit(128 + signal)
signal.signal(signal.SIGTERM, sigterm_handler)
rec1 = Recorder(**options)
rec1.run(**runopts)
|
views.py
|
'''
Copyright (C) 2013 TopCoder Inc., All Rights Reserved.
'''
'''
This is the module that defines all the views which will respond to client requests.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
v1.1 - Healthcare Fraud Prevention Release Assembly v1.0
- updated for added StudyID
@author: TCSASSEMBLER
@version: 1.1
'''
from django.template.loader import get_template
from django.template import Context
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from urllib.parse import urlencode
from decision_module import helper
from httpservices import DataRequestHandler, handle_deny_operation
from threading import Thread
from validationhelper import check_string
from appliance.config import dbconfig
from appliance.request_persistence import MySQLRequestPersistence
from appliance.request_persistence import RedisRequestPersistence
import isodate
import logging
def translateRequests(requests):
'''
This method translate requests list of sequent type into of map type.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param requests: The partner requests.
@return: The mapped partner requests.
'''
nRequests = []
requestColumns = ['request_id', 'study_id', 'query', 'expiration_time',
'cache_available', 'cache_timestamp', 'status']
for req in requests:
nReq = {}
idx = 0
if dbconfig["type"] == "mysql":
req = req[1:]
for field in requestColumns:
nReq[field] = req[idx]
idx = idx + 1
nRequests.append(nReq)
return nRequests
def get_request_persistence():
"""
Get appropriate db persistence object from config.
"""
if dbconfig["type"]=='redis':
return RedisRequestPersistence()
elif dbconfig["type"] == "mysql":
return MySQLRequestPersistence()
else:
raise ValueError("Invalid db type: " + config.dbconfig["type"])
@require_http_methods(["GET"])
def list_partner_requests(request):
'''
This is the view function for listing one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.list_partner_requests'
helper.log_entrance(LOGGER, signature, {'request': request})
p = get_request_persistence()
p.connectionConfig = dbconfig
pending = []
approved = []
denied = []
try:
p.begin()
if dbconfig["type"]=='redis':
pending = translateRequests(p.queryRequests('status=pending', None, None))
approved = translateRequests(p.queryRequests('status=approved', None, None))
denied = translateRequests(p.queryRequests('status=denied', None, None))
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
pending = translateRequests(p.queryRequests('status="pending"', None, None))
approved = translateRequests(p.queryRequests('status="approved"', None, None))
denied = translateRequests(p.queryRequests('status="denied"', None, None))
finally:
if p.connection:
p.close()
# Render templates
t = get_template('RequestList.html')
ret = HttpResponse(t.render(Context(
{'pending': pending,
'approved': approved if len(approved) > 0 else None,
'denied': denied if len(denied) > 0 else None})))
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def create_partner_request(request):
'''
This is the view function for creating one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.create_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
# Check posted values
try:
check_string('request_id', request.POST['request_id'])
check_string('study_id', request.POST['study_id'])
check_string('query', request.POST['query'])
check_string('expiration_time', request.POST['expiration_time'])
check_string('cache_available', request.POST['cache_available'])
if request.POST['cache_available'] == 'true':
check_string('cache_timestamp', request.POST['cache_timestamp'])
check_string('status', request.POST['status'])
except Exception as e:
helper.log_exception(LOGGER, signature, e)
if dbconfig["type"]=='redis':
fields = [request.POST['request_id'], request.POST['study_id'], request.POST['query'],
request.POST['expiration_time'], request.POST['cache_available'],
request.POST['cache_timestamp'], request.POST['status'],]
else:# MySQL - SQL statements must translate ' to double ', or sql statement is illegal.
fields = [request.POST['request_id'], request.POST['study_id'], request.POST['query'].replace("'", "''"),
request.POST['expiration_time'], request.POST['cache_available'],
request.POST['cache_timestamp'], request.POST['status'],]
p = get_request_persistence()
p.connectionConfig = dbconfig
try:
p.begin()
p.createRequest(fields)
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Redirect to /partner_tags
# ret = HttpResponseRedirect('/')
ret = HttpResponse(status=200)
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def approval_partner_request(request):
'''
This is the view function for approval one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.approval_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
request_id = request.POST['request_id']
p = get_request_persistence()
p.connectionConfig = dbconfig
req = []
try:
p.begin()
if dbconfig["type"]=='redis':
req = p.queryRequests('request_id={0}'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status=approved', 'request_id={0}'.format(request_id))
p.commit()
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
req = p.queryRequests('request_id="{0}"'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status="approved"', 'request_id="{0}"'.format(request_id))
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Kick off a new thread to handle the request
try:
if len(req) == 8:
req = req[1:]
if len(req) < 7:
raise ValueError('Request misses parameters')
request_id = req[0]
study_id = req[1]
query = req[2]
expiration_time = isodate.parse_datetime(req[3])
cache_available = 'true' == req[4]
cache_timestamp = None
if req[5] and len(req[5]) > 0:
cache_timestamp = isodate.parse_datetime(req[5])
handler = DataRequestHandler()
t = Thread(target=handler.handle_data_request, args=(request_id, study_id, query,
expiration_time, cache_available,
cache_timestamp, True))
t.daemon = False
t.start()
except Exception as e:
helper.log_exception(LOGGER, signature, e)
# Redirect to /partner_tags
ret = HttpResponseRedirect('/')
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def deny_partner_request(request):
'''
This is the view function for denying one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.deny_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
request_id = request.POST['request_id']
p = get_request_persistence()
p.connectionConfig = dbconfig
req = []
try:
p.begin()
if dbconfig["type"]=='redis':
req = p.queryRequests('request_id={0}'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status=denied', 'request_id={0}'.format(request_id))
p.commit()
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
req = p.queryRequests('request_id="{0}"'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status="denied"', 'request_id="{0}"'.format(request_id))
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Kick off a new thread to handle the request
try:
if len(req) == 8:
req = req[1:]
if len(req) < 7:
raise ValueError('Request misses parameters')
request_id = req[0]
t = Thread(target=handle_deny_operation, args=([request_id],))
t.daemon = False
t.start()
except Exception as e:
helper.log_exception(LOGGER, signature, e)
# Redirect to /partner_tags
ret = HttpResponseRedirect('/')
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
|
io_wrap.py
|
#!/usr/bin/env python
from __future__ import print_function
"""Utilities for capturing output from the current process and processes it
starts.
This file is also a test harness for I/O wrapping: run it as a script with a
shell command in the commandline arguments to see how PTY redirection behaves
for that command.
Watch out for bugs in this module. Uncaught exceptions here may prevent their
own tracebacks from being written to the terminal. Disable STDERR wrapping by
setting WANDB_DEBUG to 'true'.
== Resources
The TTY demystified. Great article on Linux terminals, sessions and process groups.
http://www.linusakesson.net/programming/tty/
Pymux, a Python implementation of tmux:
https://github.com/jonathanslenders/pymux
PTY module source code:
https://github.com/python/cpython/blob/master/Lib/pty.py
PTYProcess from Pexpect, a Python implementation of expect (good *nix support):
https://github.com/pexpect/ptyprocess/blob/master/ptyprocess/ptyprocess.py
https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
https://stackoverflow.com/questions/34186035/can-you-fool-isatty-and-log-stdout-and-stderr-separately?rq=1
"""
import atexit
import functools
import io
import logging
import os
try:
import fcntl
import pty
import tty
import termios
except ImportError: # windows
pty = tty = termios = fcntl = None
import signal
import struct
import subprocess
import sys
import tempfile
import threading
import traceback
import platform
import six
from six.moves import queue, shlex_quote
import wandb.env
logger = logging.getLogger(__name__)
SIGWINCH_HANDLER = None
class SimpleTee(object):
"""Monkey patches the given io to write to itself and the object passed in"""
def __init__(self, source_io, destination_io):
self.source_write = source_io.write
self.destination = destination_io
source_io.orig_write = self.source_write
source_io.write = self.write
def write(self, data):
self.source_write(data)
try:
# We need bytes, but sometimes we get strings
data = data.encode('utf-8')
except AttributeError:
pass
self.destination.write(data)
class WindowSizeChangeHandler(object):
"""A SIGWINCH handler that keeps a list of FDs to update any time the
window size changes.
This is a singleton initialized by init_sigwinch_handler().
"""
def __init__(self):
self.fds = []
# bind one of these so we can compare instances to each other
self._handler = self._handle_function
def register(self):
try:
old_handler = signal.signal(signal.SIGWINCH, self._handler)
except ValueError: # windows
logger.warn('Setting SIGWINCH handler failed')
else:
if old_handler is not None:
logger.warn('SIGWINCH handler was not None: %r', old_handler)
def unregister(self):
try:
old_handler = signal.signal(signal.SIGWINCH, None)
except ValueError: # windows
logger.warn('Setting SIGWINCH handler failed')
else:
if old_handler is not self._handler:
logger.warn(
'SIGWINCH handler was not from W&B: %r', old_handler)
def add_fd(self, fd):
self.fds.append(fd)
self._set_win_sizes()
def _handle_function(self, signum, frame):
try:
self._set_win_sizes()
except:
logger.exception('Exception during SIGWINCH')
def _set_win_sizes(self):
try:
win_size = fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ, '\0' * 8)
except OSError: # eg. in MPI we can't do this
rows, cols, xpix, ypix = 25, 80, 0, 0
else:
rows, cols, xpix, ypix = struct.unpack('HHHH', win_size)
if cols == 0:
cols = 80
win_size = struct.pack("HHHH", rows, cols, xpix, ypix)
for fd in self.fds:
try:
fcntl.ioctl(fd, termios.TIOCSWINSZ, win_size)
except OSError: # eg. in MPI we can't do this
pass
def init_sigwinch_handler():
global SIGWINCH_HANDLER
if SIGWINCH_HANDLER is None and sys.stdout.isatty() and platform.system() != "Windows" and not wandb.env.is_debug():
SIGWINCH_HANDLER = WindowSizeChangeHandler()
SIGWINCH_HANDLER.register()
def wandb_pty(resize=True):
"""Get a PTY set to raw mode and registered to hear about window size changes.
"""
master_fd, slave_fd = pty.openpty()
# raw mode so carriage returns etc. don't get added by the terminal driver,
# bash for windows blows up on this so we catch the error and do nothing
# TODO(adrian): (when) will this be called on windows?
try:
tty.setraw(master_fd)
except termios.error:
pass
if resize:
if SIGWINCH_HANDLER is not None:
SIGWINCH_HANDLER.add_fd(master_fd)
return master_fd, slave_fd
class Tee(object):
"""Reads raw data from a file and writes it to other files.
Writes synchronously to one file and asynchronously to any number of others.
"""
@classmethod
def pty(cls, sync_dst_file, *async_dst_files):
master_fd, slave_fd = wandb_pty()
master = os.fdopen(master_fd, 'rb')
tee = cls(master, sync_dst_file, *async_dst_files)
tee.tee_file = os.fdopen(slave_fd, 'wb')
return tee
@classmethod
def pipe(cls, sync_dst_file, *async_dst_files):
read_fd, write_fd = os.pipe()
read_file = os.fdopen(read_fd, 'rb')
tee = cls(read_file, sync_dst_file, *async_dst_files)
tee.tee_file = os.fdopen(write_fd, 'wb')
return tee
def __init__(self, src_file, sync_dst_file, *async_dst_files):
"""Constructor.
Args:
src_file: file to read from.
sync_dst_file: file to write to synchronously when `self.write()` is
called.
async_dst_files: files to write to asynchronously
"""
# save the stack at construction time for debugging later
self._origin_stack = '\n'.join(traceback.format_stack())
self.tee_file = None # convenience for users that want a writable file to put things into the tee
self._src_file = src_file
self._sync_dst_file = sync_dst_file
self._async_dst_files = list(async_dst_files)
self._write_queues = []
self._write_threads = []
for f in async_dst_files:
q = queue.Queue()
t = spawn_reader_writer(q.get, functools.partial(self._write, f))
self._write_queues.append(q)
self._write_threads.append(t)
src_fd = self._src_file.fileno()
def read():
# We use `os.read()` instead of `file.read()` because `os.read()` will return
# any non-empty amount of data, blocking only until there is data available to
# be read. On the other hand, `file.read()` waits until its buffer is full.
# Since we use this code for console output, `file.read()`'s stuttering output
# is undesirable.
try:
return os.read(src_fd, 1024)
except OSError:
# errno 5 on linux; happens with PTYs if the slave is closed. mac os just
# returns b'' from os.read().
return six.b('')
self._read_thread = spawn_reader_writer(read, self._write_to_all)
def _write_to_all(self, data):
self._write(self._sync_dst_file, data)
for q in self._write_queues:
q.put(data)
@classmethod
def _write(_, f, data):
if not data:
# windows explodes if you try to write an empty string to a terminal:
# OSError: [WinError 87] The parameter is incorrect
# https://github.com/pytest-dev/py/issues/103
return
i = f.write(data)
if i is not None: # python 3 w/ unbuffered i/o: we need to keep writing
while i < len(data):
i += f.write(data[i:])
def close_join(self):
# TODO(adrian): any way we can clean up the read thread properly? do we need to?
# this hangs in headless mode with python 2. maybe normal behaviour for fdopen on stdout?
#self._src_file.close()
#self._read_thread.join()
self._write_to_all(six.b('')) # empty bytes is the signal to stop
for t in self._write_threads:
t.join()
def spawn_reader_writer(get_data_fn, put_data_fn):
"""Spawn a thread that reads from a data source and writes to a sink.
The thread will terminate if it receives a Falsey value from the source.
Args:
get_data_fn: Data-reading function. Called repeatedly until it returns
False-y to indicate that the thread should terminate.
put_data_fn: Data-writing function.
Returns: threading.Thread
"""
def _reader_thread():
while True:
out = get_data_fn()
put_data_fn(out)
if not out:
# EOF.
# We've passed this on so things farther down the pipeline will
# know to shut down.
break
t = threading.Thread(target=_reader_thread)
t.daemon = True
t.start()
return t
class WindowsRedirector(object):
"""Simple windows Tee
"""
def __init__(self, from_stream, to_file):
self.from_stream = from_stream
self.to_file = to_file
def redirect(self):
self.tee = SimpleTee(self.from_stream, self.to_file)
def restore(self):
if not self.to_file.closed:
self.to_file.close()
if hasattr(self.from_stream, "orig_write"):
self.from_stream.write = self.from_stream.orig_write
class FileRedirector(object):
"""Redirects a file object to a different file descriptor.
Properties:
redir_file: The file object that gets redirected.
orig_file: A unbuffered new file object that points where `redir_file` originally pointed.
Adapted from
https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
"""
def __init__(self, redir_file, to_file):
"""Constructor
Args:
redir_file: (file) The file object to redirect
to_file: (file) The file object `redir_file` should be redirected to.
"""
self.redir_file = redir_file
self._from_fd = redir_file.fileno()
self._to_fd = to_file.fileno()
# copy from_fd before it is overwritten
# NOTE: `self._from_fd` is inheritable on Windows when duplicating a standard stream
# we make this unbuffered because we want to rely on buffers earlier in the I/O chain
self.orig_file = os.fdopen(os.dup(self._from_fd), 'wb', 0)
def redirect(self):
self.redir_file.flush() # flush library buffers that dup2 knows nothing about
# TODO: mirror stdout / err here
os.dup2(self._to_fd, self._from_fd) # $ exec >&to
# This isn't tested properly:
def restore(self):
"""Restore `self.redir_file` to its original state.
"""
# NOTE: dup2 makes `self._from_fd` inheritable unconditionally
self.redir_file.flush()
os.dup2(self.orig_file.fileno(), self._from_fd) # $ exec >&copied
# self.orig_file.close()
#self.orig_file = None
#self.redir_file = None
|
environment.py
|
import abc
import consul
import datetime
import etcd
import kazoo.client
import kazoo.exceptions
import os
import psutil
import psycopg2
import json
import shutil
import signal
import six
import subprocess
import tempfile
import threading
import time
import yaml
@six.add_metaclass(abc.ABCMeta)
class AbstractController(object):
def __init__(self, context, name, work_directory, output_dir):
self._context = context
self._name = name
self._work_directory = work_directory
self._output_dir = output_dir
self._handle = None
self._log = None
def _has_started(self):
return self._handle and self._handle.pid and self._handle.poll() is None
def _is_running(self):
return self._has_started()
@abc.abstractmethod
def _is_accessible(self):
"""process is accessible for queries"""
@abc.abstractmethod
def _start(self):
"""start process"""
def start(self, max_wait_limit=5):
if self._is_running():
return True
self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a')
self._handle = self._start()
assert self._has_started(), "Process {0} is not running after being started".format(self._name)
max_wait_limit *= self._context.timeout_multiplier
for _ in range(max_wait_limit):
if self._is_accessible():
break
time.sleep(1)
else:
assert False,\
"{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit)
def stop(self, kill=False, timeout=15, _=False):
term = False
start_time = time.time()
timeout *= self._context.timeout_multiplier
while self._handle and self._is_running():
if kill:
self._handle.kill()
elif not term:
self._handle.terminate()
term = True
time.sleep(1)
if not kill and time.time() - start_time > timeout:
kill = True
if self._log:
self._log.close()
def cancel_background(self):
pass
class PatroniController(AbstractController):
__PORT = 5440
PATRONI_CONFIG = '{}.yml'
""" starts and stops individual patronis"""
def __init__(self, context, name, work_directory, output_dir, custom_config=None):
super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir)
PatroniController.__PORT += 1
self._data_dir = os.path.join(work_directory, 'data', name)
self._connstring = None
if custom_config and 'watchdog' in custom_config:
self.watchdog = WatchdogMonitor(name, work_directory, output_dir)
custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'}
else:
self.watchdog = None
self._scope = (custom_config or {}).get('scope', 'batman')
self._config = self._make_patroni_test_config(name, custom_config)
self._closables = []
self._conn = None
self._curs = None
def write_label(self, content):
with open(os.path.join(self._data_dir, 'label'), 'w') as f:
f.write(content)
def read_label(self):
try:
with open(os.path.join(self._data_dir, 'label'), 'r') as f:
return f.read().strip()
except IOError:
return None
def add_tag_to_config(self, tag, value):
with open(self._config) as r:
config = yaml.safe_load(r)
config['tags']['tag'] = value
with open(self._config, 'w') as w:
yaml.safe_dump(config, w, default_flow_style=False)
def _start(self):
if self.watchdog:
self.watchdog.start()
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.create_pod(self._name[8:], self._scope)
os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1]
return subprocess.Popen(['coverage', 'run', '--source=patroni', '-p', 'patroni.py', self._config],
stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory)
def stop(self, kill=False, timeout=15, postgres=False):
if postgres:
return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w'])
super(PatroniController, self).stop(kill, timeout)
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.delete_pod(self._name[8:])
if self.watchdog:
self.watchdog.stop()
def _is_accessible(self):
cursor = self.query("SELECT 1", fail_ok=True)
if cursor is not None:
cursor.execute("SET synchronous_commit TO 'local'")
return True
def _make_patroni_test_config(self, name, custom_config):
patroni_config_name = self.PATRONI_CONFIG.format(name)
patroni_config_path = os.path.join(self._output_dir, patroni_config_name)
with open(patroni_config_name) as f:
config = yaml.safe_load(f)
config.pop('etcd', None)
host = config['postgresql']['listen'].split(':')[0]
config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT)
config['name'] = name
config['postgresql']['data_dir'] = self._data_dir
config['postgresql']['use_unix_socket'] = True
config['postgresql']['parameters'].update({
'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir,
'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1',
'unix_socket_directories': self._data_dir})
if 'bootstrap' in config:
config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"'
if 'initdb' in config['bootstrap']:
config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}])
if custom_config is not None:
def recursive_update(dst, src):
for k, v in src.items():
if k in dst and isinstance(dst[k], dict):
recursive_update(dst[k], v)
else:
dst[k] = v
recursive_update(config, custom_config)
with open(patroni_config_path, 'w') as f:
yaml.safe_dump(config, f, default_flow_style=False)
user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {})
self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user}
self._connkwargs.update({'host': host, 'port': self.__PORT, 'database': 'postgres'})
self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {})
self._replication.update({'host': host, 'port': self.__PORT, 'database': 'postgres'})
return patroni_config_path
def _connection(self):
if not self._conn or self._conn.closed != 0:
self._conn = psycopg2.connect(**self._connkwargs)
self._conn.autocommit = True
return self._conn
def _cursor(self):
if not self._curs or self._curs.closed or self._curs.connection.closed != 0:
self._curs = self._connection().cursor()
return self._curs
def query(self, query, fail_ok=False):
try:
cursor = self._cursor()
cursor.execute(query)
return cursor
except psycopg2.Error:
if not fail_ok:
raise
def check_role_has_changed_to(self, new_role, timeout=10):
bound_time = time.time() + timeout
recovery_status = new_role != 'primary'
while time.time() < bound_time:
cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True)
if cur:
row = cur.fetchone()
if row and row[0] == recovery_status:
return True
time.sleep(1)
return False
def get_watchdog(self):
return self.watchdog
def _get_pid(self):
try:
pidfile = os.path.join(self._data_dir, 'postmaster.pid')
if not os.path.exists(pidfile):
return None
return int(open(pidfile).readline().strip())
except Exception:
return None
def database_is_running(self):
pid = self._get_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def patroni_hang(self, timeout):
hang = ProcessHang(self._handle.pid, timeout)
self._closables.append(hang)
hang.start()
def checkpoint_hang(self, timeout):
pid = self._get_pid()
if not pid:
return False
proc = psutil.Process(pid)
for child in proc.children():
if 'checkpoint' in child.cmdline()[0]:
checkpointer = child
break
else:
return False
hang = ProcessHang(checkpointer.pid, timeout)
self._closables.append(hang)
hang.start()
return True
def cancel_background(self):
for obj in self._closables:
obj.close()
self._closables = []
def terminate_backends(self):
pid = self._get_pid()
if not pid:
return False
proc = psutil.Process(pid)
for p in proc.children():
if 'process' not in p.cmdline()[0]:
p.terminate()
@property
def backup_source(self):
return 'postgres://{username}:{password}@{host}:{port}/{database}'.format(**self._replication)
def backup(self, dest='basebackup'):
subprocess.call([PatroniPoolController.BACKUP_SCRIPT, '--walmethod=none',
'--datadir=' + os.path.join(self._output_dir, dest),
'--dbname=' + self.backup_source])
class ProcessHang(object):
"""A background thread implementing a cancelable process hang via SIGSTOP."""
def __init__(self, pid, timeout):
self._cancelled = threading.Event()
self._thread = threading.Thread(target=self.run)
self.pid = pid
self.timeout = timeout
def start(self):
self._thread.start()
def run(self):
os.kill(self.pid, signal.SIGSTOP)
try:
self._cancelled.wait(self.timeout)
finally:
os.kill(self.pid, signal.SIGCONT)
def close(self):
self._cancelled.set()
self._thread.join()
class AbstractDcsController(AbstractController):
_CLUSTER_NODE = '/service/{0}'
def __init__(self, context, mktemp=True):
work_directory = mktemp and tempfile.mkdtemp() or None
super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir)
def _is_accessible(self):
return self._is_running()
def stop(self, kill=False, timeout=15):
""" terminate process and wipe out the temp work directory, but only if we actually started it"""
super(AbstractDcsController, self).stop(kill=kill, timeout=timeout)
if self._work_directory:
shutil.rmtree(self._work_directory)
def path(self, key=None, scope='batman'):
return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '')
@abc.abstractmethod
def query(self, key, scope='batman'):
""" query for a value of a given key """
@abc.abstractmethod
def cleanup_service_tree(self):
""" clean all contents stored in the tree used for the tests """
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subsubclass in subclass.get_subclasses():
yield subsubclass
yield subclass
@classmethod
def name(cls):
return cls.__name__[:-10].lower()
class ConsulController(AbstractDcsController):
def __init__(self, context):
super(ConsulController, self).__init__(context)
os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500'
self._client = consul.Consul()
self._config_file = None
def _start(self):
self._config_file = self._work_directory + '.json'
with open(self._config_file, 'wb') as f:
f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}')
return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir',
self._work_directory], stdout=self._log, stderr=subprocess.STDOUT)
def stop(self, kill=False, timeout=15):
super(ConsulController, self).stop(kill=kill, timeout=timeout)
if self._config_file:
os.unlink(self._config_file)
def _is_running(self):
try:
return bool(self._client.status.leader())
except Exception:
return False
def path(self, key=None, scope='batman'):
return super(ConsulController, self).path(key, scope)[1:]
def query(self, key, scope='batman'):
_, value = self._client.kv.get(self.path(key, scope))
return value and value['Value'].decode('utf-8')
def cleanup_service_tree(self):
self._client.kv.delete(self.path(scope=''), recurse=True)
def start(self, max_wait_limit=15):
super(ConsulController, self).start(max_wait_limit)
class EtcdController(AbstractDcsController):
""" handles all etcd related tasks, used for the tests setup and cleanup """
def __init__(self, context):
super(EtcdController, self).__init__(context)
os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379'
self._client = etcd.Client(port=2379)
def _start(self):
return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory],
stdout=self._log, stderr=subprocess.STDOUT)
def query(self, key, scope='batman'):
try:
return self._client.get(self.path(key, scope)).value
except etcd.EtcdKeyNotFound:
return None
def cleanup_service_tree(self):
try:
self._client.delete(self.path(scope=''), recursive=True)
except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed):
return
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
def _is_running(self):
# if etcd is running, but we didn't start it
try:
return bool(self._client.machines)
except Exception:
return False
class KubernetesController(AbstractDcsController):
def __init__(self, context):
super(KubernetesController, self).__init__(context)
self._namespace = 'default'
self._labels = {"application": "patroni"}
self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items())
os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels)
os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true'
from kubernetes import client as k8s_client, config as k8s_config
k8s_config.load_kube_config(context='local')
self._client = k8s_client
self._api = self._client.CoreV1Api()
def _start(self):
pass
def create_pod(self, name, scope):
labels = self._labels.copy()
labels['cluster-name'] = scope
metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels)
spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')])
body = self._client.V1Pod(metadata=metadata, spec=spec)
self._api.create_namespaced_pod(self._namespace, body)
def delete_pod(self, name):
try:
self._api.delete_namespaced_pod(name, self._namespace, self._client.V1DeleteOptions())
except:
pass
while True:
try:
self._api.read_namespaced_pod(name, self._namespace)
except:
break
def query(self, key, scope='batman'):
if key.startswith('members/'):
pod = self._api.read_namespaced_pod(key[8:], self._namespace)
return (pod.metadata.annotations or {}).get('status', '')
else:
try:
e = self._api.read_namespaced_endpoints(scope + ('' if key == 'leader' else '-' + key), self._namespace)
if key == 'leader':
return e.metadata.annotations[key]
else:
return json.dumps(e.metadata.annotations)
except:
return None
def cleanup_service_tree(self):
try:
self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector)
except:
pass
try:
self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector)
except:
pass
while True:
result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector)
if len(result.items) < 1:
break
def _is_running(self):
return True
class ZooKeeperController(AbstractDcsController):
""" handles all zookeeper related tasks, used for the tests setup and cleanup """
def __init__(self, context, export_env=True):
super(ZooKeeperController, self).__init__(context, False)
if export_env:
os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'"
self._client = kazoo.client.KazooClient()
def _start(self):
pass # TODO: implement later
def query(self, key, scope='batman'):
try:
return self._client.get(self.path(key, scope))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
return None
def cleanup_service_tree(self):
try:
self._client.delete(self.path(scope=''), recursive=True)
except (kazoo.exceptions.NoNodeError):
return
except Exception as e:
assert False, "exception when cleaning up zookeeper contents: {0}".format(e)
def _is_running(self):
# if zookeeper is running, but we didn't start it
if self._client.connected:
return True
try:
return self._client.start(1) or True
except Exception:
return False
class ExhibitorController(ZooKeeperController):
def __init__(self, context):
super(ExhibitorController, self).__init__(context, False)
os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': '8181'})
class PatroniPoolController(object):
BACKUP_SCRIPT = 'features/backup_create.sh'
def __init__(self, context):
self._context = context
self._dcs = None
self._output_dir = None
self._patroni_path = None
self._processes = {}
self.create_and_set_output_directory('')
self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()}
@property
def patroni_path(self):
if self._patroni_path is None:
cwd = os.path.realpath(__file__)
while True:
cwd, entry = os.path.split(cwd)
if entry == 'features' or cwd == '/':
break
self._patroni_path = cwd
return self._patroni_path
@property
def output_dir(self):
return self._output_dir
def start(self, name, max_wait_limit=20, custom_config=None):
if name not in self._processes:
self._processes[name] = PatroniController(self._context, name, self.patroni_path,
self._output_dir, custom_config)
self._processes[name].start(max_wait_limit)
def __getattr__(self, func):
if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to', 'add_tag_to_config',
'get_watchdog', 'database_is_running', 'checkpoint_hang', 'patroni_hang',
'terminate_backends', 'backup']:
raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func))
def wrapper(name, *args, **kwargs):
return getattr(self._processes[name], func)(*args, **kwargs)
return wrapper
def stop_all(self):
for ctl in self._processes.values():
ctl.cancel_background()
ctl.stop()
self._processes.clear()
def create_and_set_output_directory(self, feature_name):
feature_dir = os.path.join(self.patroni_path, 'features/output', feature_name.replace(' ', '_'))
if os.path.exists(feature_dir):
shutil.rmtree(feature_dir)
os.makedirs(feature_dir)
self._output_dir = feature_dir
def clone(self, from_name, cluster_name, to_name):
f = self._processes[from_name]
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'pg_basebackup',
'pg_basebackup': {
'command': self.BACKUP_SCRIPT + ' --walmethod=stream --dbname=' + f.backup_source
}
},
'postgresql': {
'parameters': {
'archive_mode': 'on',
'archive_command': 'mkdir -p {0} && test ! -f {0}/%f && cp %p {0}/%f'.format(
os.path.join(self._output_dir, 'wal_archive'))
},
'authentication': {
'superuser': {'password': 'zalando1'},
'replication': {'password': 'rep-pass1'}
}
}
}
self.start(to_name, custom_config=custom_config)
def bootstrap_from_backup(self, name, cluster_name):
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'backup_restore',
'backup_restore': {
'command': 'features/backup_restore.sh --sourcedir=' + os.path.join(self._output_dir, 'basebackup'),
'recovery_conf': {
'recovery_target_action': 'promote',
'recovery_target_timeline': 'latest',
'restore_command': 'cp {0}/wal_archive/%f %p'.format(self._output_dir)
}
}
},
'postgresql': {
'authentication': {
'superuser': {'password': 'zalando2'},
'replication': {'password': 'rep-pass2'}
}
}
}
self.start(name, custom_config=custom_config)
@property
def dcs(self):
if self._dcs is None:
self._dcs = os.environ.pop('DCS', 'etcd')
assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs
return self._dcs
class WatchdogMonitor(object):
"""Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we
require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered.
"""
def __init__(self, name, work_directory, output_dir):
self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name))
self.fifo_file = None
self._stop_requested = False # Relying on bool setting being atomic
self._thread = None
self.last_ping = None
self.was_pinged = False
self.was_closed = False
self._was_triggered = False
self.timeout = 60
self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w')
self._log("watchdog {0} initialized".format(name))
def _log(self, msg):
tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")
self._log_file.write("{0}: {1}\n".format(tstamp, msg))
def start(self):
assert self._thread is None
self._stop_requested = False
self._log("starting fifo {0}".format(self.fifo_path))
fifo_dir = os.path.dirname(self.fifo_path)
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
elif not os.path.exists(fifo_dir):
os.mkdir(fifo_dir)
os.mkfifo(self.fifo_path)
self.last_ping = time.time()
self._thread = threading.Thread(target=self.run)
self._thread.start()
def run(self):
try:
while not self._stop_requested:
self._log("opening")
self.fifo_file = os.open(self.fifo_path, os.O_RDONLY)
try:
self._log("Fifo {0} connected".format(self.fifo_path))
self.was_closed = False
while not self._stop_requested:
c = os.read(self.fifo_file, 1)
if c == b'X':
self._log("Stop requested")
return
elif c == b'':
self._log("Pipe closed")
break
elif c == b'C':
command = b''
c = os.read(self.fifo_file, 1)
while c != b'\n' and c != b'':
command += c
c = os.read(self.fifo_file, 1)
command = command.decode('utf8')
if command.startswith('timeout='):
self.timeout = int(command.split('=')[1])
self._log("timeout={0}".format(self.timeout))
elif c in [b'V', b'1']:
cur_time = time.time()
if cur_time - self.last_ping > self.timeout:
self._log("Triggered")
self._was_triggered = True
if c == b'V':
self._log("magic close")
self.was_closed = True
elif c == b'1':
self.was_pinged = True
self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time)))
self.last_ping = cur_time
else:
self._log('Unknown command {0} received from fifo'.format(c))
finally:
self.was_closed = True
self._log("closing")
os.close(self.fifo_file)
except Exception as e:
self._log("Error {0}".format(e))
finally:
self._log("stopping")
self._log_file.flush()
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
def stop(self):
self._log("Monitor stop")
self._stop_requested = True
try:
if os.path.exists(self.fifo_path):
fd = os.open(self.fifo_path, os.O_WRONLY)
os.write(fd, b'X')
os.close(fd)
except Exception as e:
self._log("err while closing: {0}".format(str(e)))
if self._thread:
self._thread.join()
self._thread = None
def reset(self):
self._log("reset")
self.was_pinged = self.was_closed = self._was_triggered = False
@property
def was_triggered(self):
delta = time.time() - self.last_ping
triggered = self._was_triggered or not self.was_closed and delta > self.timeout
self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta))
return triggered
# actions to execute on start/stop of the tests and before running invidual features
def before_all(context):
os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'})
context.ci = 'TRAVIS_BUILD_NUMBER' in os.environ or 'BUILD_NUMBER' in os.environ
context.timeout_multiplier = 2 if context.ci else 1
context.pctl = PatroniPoolController(context)
context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context)
context.dcs_ctl.start()
try:
context.dcs_ctl.cleanup_service_tree()
except AssertionError: # after_all handlers won't be executed in before_all
context.dcs_ctl.stop()
raise
def after_all(context):
context.dcs_ctl.stop()
subprocess.call(['coverage', 'combine'])
subprocess.call(['coverage', 'report'])
def before_feature(context, feature):
""" create per-feature output directory to collect Patroni and PostgreSQL logs """
context.pctl.create_and_set_output_directory(feature.name)
def after_feature(context, feature):
""" stop all Patronis, remove their data directory and cleanup the keys in etcd """
context.pctl.stop_all()
shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data'))
context.dcs_ctl.cleanup_service_tree()
|
main.py
|
from kivy.config import Config
import os
from os import listdir
from os.path import isfile, join
from kivy.core.window import Window
from kivymd.app import MDApp
import sqlite3
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
from GUI.popups import show_message_popup
from kivy.uix.modalview import ModalView
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from GUI.modified_classes import DataButton
from GUI.login_view import LoginView
from GUI.settings_view import SettingsView
from GUI.settings_panel_view import SettingsPanelView
from GUI.data_manager import DataManager
from threading import Thread
import os.path
from DATA.database_manager import clean_table
import concurrent.futures
Config.set('kivy', 'exit_on_escape', '0')
# Program needs this to import LocationsMapView class (IGNORE INTERPRETER WARNING).
from GUI.locations_mapview import LocationsMapView
class MainLayout(Widget):
btn_toggle_airports = ObjectProperty(None) # Hold a reference to the "Show Airports" button after the graphics are rendered.
btn_toggle_airplanes = ObjectProperty(None) # Hold a reference to the "Show Airplanes" button after the graphics are rendered.
locations_map = ObjectProperty(None) # Hold a reference to the map after the graphics are rendered.
airports_search_bar = ObjectProperty(None) # Hold a reference to the airports search bar after the graphics are rendered.
airplanes_search_bar = ObjectProperty(None) # Hold a reference to the airplanes search bar after the graphics are rendered.
settings_panel = ObjectProperty(None) # Hold a reference to the settings panel after the graphics are rendered.
def __init__(self):
super(MainLayout, self).__init__()
self.suggestions_dropdown = DropDown() # Declare and initialize the suggestions drop-down object for both search bars.
self.app = MDApp.get_running_app() # Hold a reference to the main class which inherits from App to jump-start the app.
def toggle_airports(self):
"""
Allows to application to add the airports found within the field of view.
:return: None
"""
if self.locations_map.show_airports:
self.locations_map.show_airports = False
else:
if self.locations_map.zoom > 5:
self.locations_map.show_airports = True
self.locations_map.start_getting_locations_in_fov()
else:
self.btn_toggle_airports.state = 'normal'
show_message_popup("Zoom level must be greater than 5.")
def toggle_airplanes(self):
"""
Allows the application to add the airplanes found within the field of view.
:return: None
"""
if self.locations_map.show_airplanes:
self.locations_map.show_airplanes = False
else:
if self.locations_map.zoom > 5:
self.locations_map.show_airplanes = True
self.locations_map.start_getting_locations_in_fov()
else:
self.btn_toggle_airplanes.state = 'normal'
show_message_popup("Zoom level must be greater than 5.")
def get_airport_suggestions(self):
"""
Renders the airports suggestions as options within a drop-down menu
based on the text given by the user within the airports search bar.
:return: None
"""
if not self.airports_search_bar.focus: # If the function gets called after the user has chosen option,
self.suggestions_dropdown.dismiss() # hide drop-down.
return
else: # else, if the function is called to select option,
self.suggestions_dropdown.dismiss() # hide previous drop-down.
self.suggestions_dropdown = DropDown()
airports_data = self.app.data_manager.airports_tree_manager.get_in_order_list(self.app.data_manager.airports_tree, self.airports_search_bar.text.upper())
airport_id_index = self.app.data_manager.airports_tree_manager.index
if airports_data is None:
btn_suggestion = Button(text='NOT FOUND', size_hint_y=None, height=44)
self.suggestions_dropdown.add_widget(btn_suggestion)
else:
for airport_data in airports_data:
btn_suggestion = DataButton(data=airport_data, text=airport_data[airport_id_index], size_hint_y=None, height=44)
btn_suggestion.bind(on_release=lambda btn_suggestion_ref: self.focus_on_airport(btn_suggestion_ref))
self.suggestions_dropdown.add_widget(btn_suggestion)
self.suggestions_dropdown.bind(on_select=lambda instance, btn_suggestion_ref: setattr(self.airports_search_bar, 'text', btn_suggestion_ref))
self.suggestions_dropdown.open(self.airports_search_bar)
self.airports_search_bar.bind(on_parent=self.suggestions_dropdown.dismiss)
def focus_on_airport(self, btn_suggestion):
"""
Focuses the map's current field of view on the chosen airport.
:param btn_suggestion: The button carrying the airport's information.
:return: None
"""
self.locations_map.remove_airports()
self.locations_map.remove_airplanes()
self.suggestions_dropdown.select(btn_suggestion.text)
self.locations_map.zoom = 10
self.locations_map.center_on(btn_suggestion.data[15], btn_suggestion.data[16])
self.airports_search_bar.focus = False
self.locations_map.focus_on_airport = True
self.locations_map.focus_on_airplane = True
#self.locations_map.add_airport(btn_suggestion.data)
Thread(target=self.app.data_manager.get_potential_collisions,
args=(self.app.data_manager.collision_forecaster.get_potential_collisions_from_airport,
(btn_suggestion.data[15], btn_suggestion.data[16]),)).start()
self.locations_map.get_locations_in_fov(airport_focus=True, airplane_focus=True)
def get_airplane_suggestions(self):
"""
Renders the airplanes suggestions as options within a drop-down menu
based on the text given by the user within the airplanes search bar.
:return: None
"""
if not self.airplanes_search_bar.focus: # If the function gets called after the user has chosen option,
self.suggestions_dropdown.dismiss() # hide drop-down.
return
else: # else, if the function is called to select option,
self.suggestions_dropdown.dismiss() # hide previous drop-down.
self.suggestions_dropdown = DropDown()
airplanes_data = self.app.data_manager.airplanes_tree_manager.get_in_order_list(self.app.data_manager.airplanes_tree, self.airplanes_search_bar.text.upper())
airplane_id_index = self.app.data_manager.airplanes_tree_manager.index
if airplanes_data is None:
btn_suggestion = Button(text='NOT FOUND', size_hint_y=None, height=44)
self.suggestions_dropdown.add_widget(btn_suggestion)
else:
for airplane_data in airplanes_data:
btn_suggestion = DataButton(data=airplane_data, text=airplane_data[airplane_id_index], size_hint_y=None, height=44)
btn_suggestion.bind(on_release=lambda btn_suggestion_ref: self.focus_on_airplane(btn_suggestion_ref))
self.suggestions_dropdown.add_widget(btn_suggestion)
self.suggestions_dropdown.bind(on_select=lambda instance, btn_suggestion_ref: setattr(self.airplanes_search_bar, 'text', btn_suggestion_ref))
self.suggestions_dropdown.open(self.airplanes_search_bar)
self.airplanes_search_bar.bind(on_parent=self.suggestions_dropdown.dismiss)
def focus_on_airplane(self, btn_suggestion):
"""
Focuses the map's current field of view on the chosen airplane.
:param btn_suggestion: The button carrying the airplane's information.
:return: None
"""
self.locations_map.remove_airports()
self.locations_map.remove_airplanes()
self.suggestions_dropdown.select(btn_suggestion.text)
self.locations_map.zoom = 10
self.locations_map.center_on(btn_suggestion.data[6], btn_suggestion.data[7])
self.airports_search_bar.focus = False
self.locations_map.focus_on_airplane = True
self.locations_map.focus_on_airport = True
#self.locations_map.add_airplane(btn_suggestion.data)
Thread(target=self.app.data_manager.get_potential_collisions,
args=(self.app.data_manager.collision_forecaster.get_potential_collisions_from_plane, btn_suggestion.data[0],)).start()
self.locations_map.get_locations_in_fov(airplane_focus=True, airport_focus=True)
@staticmethod
def open_settings_window():
"""
Opens the settings window.
:return: None
"""
settings_window = ModalView(size_hint=(0.5, 0.5), auto_dismiss=False)
settings_window.add_widget(SettingsView(settings_window))
settings_window.open()
def close_app(self):
"""
Cleans the airplanes database information, deletes the rotated airplanes images and closes the application.
:return: None
"""
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "..", "DATA", "AIRCRAFT_COLLISION_FORECAST_SYSTEM.db")
clean_table(db_path, 'AIRPLANES')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(BASE_DIR, "..", "GUI", "IMAGE")
# img_path = 'GUI\\IMAGE\\'
img_file_names = [file_name for file_name in listdir(img_path) if isfile(join(img_path, file_name))]
for file_name in img_file_names:
if file_name not in ('map_marker.png', 'airplane_marker.png', 'collision_marker.png'):
os.remove(os.path.join(img_path, file_name))
print('Closing app')
self.app.root_window.close()
class MainApp(MDApp):
airports_connection = None
airports_cursor = None
data_manager = None
def build(self):
"""
Builds the application main layout.
:return: The application's main layout.
"""
self.main_layout = MainLayout()
self.main_layout.settings_panel.load_settings()
return self.main_layout
def on_start(self):
"""
Sets the main window's size, opens airports database connection, decides to render the login window
based on the saved settings, and initializes the data manager once the application starts.
:return: None
"""
# Config.set('graphics', 'width', '1050')
# Config.set('graphics', 'height', '800')
# Config.write()
Window.clearcolor = (0, 0, 0, 1)
Window.size = (1200, 800)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "..", "DATA", "global_airports.db")
self.airports_connection = sqlite3.connect(db_path)
self.airports_cursor = self.airports_connection.cursor()
db_path = os.path.join(BASE_DIR, "..", "DATA", "AIRCRAFT_COLLISION_FORECAST_SYSTEM.db")
settings_connection = sqlite3.connect(db_path)
settings_cursor = settings_connection.cursor()
query = f"SELECT STATE FROM SETTINGS WHERE NAME = 'SHOW LOGIN WINDOW'"
settings_cursor.execute(query)
result = settings_cursor.fetchall()
settings_connection.close()
if result[0][0] == '1':
login_window = ModalView(size_hint=(0.5, 0.5), auto_dismiss=False)
login_window.add_widget(LoginView(login_window))
login_window.open()
else:
self.data_manager = DataManager(airport_id_index=2, airplane_id_index=0)
Thread(target=self.data_manager.load_airports).start()
def on_stop(self):
self.main_layout.close_app()
if __name__ == "__main__":
MainApp().run()
|
ultrasound.py
|
import sys
import RPi.GPIO as GPIO
import relay
import time
from threading import Thread
import datetime
starting = 0
ending = 0
current_distance = 0
down = 0
# relay.cleanup(True)
def calculate_distance(pin):
global ending
global current_distance
global down
ending = time.time()
now = datetime.datetime.now()
if now.hour > 7:
current_distance = 17150 * (ending - starting) - 10
if current_distance < 50:
down += 1
if down >= 1 and current_distance > 50:
relay.toggle(relay.IN1)
down = 0
GPIO.add_event_detect(relay.U_ECHO, GPIO.FALLING, callback=calculate_distance, bouncetime=5)
def get_distance():
global current_distance
global starting
GPIO.setup(relay.U_TRIG, GPIO.OUT)
relay.on(relay.U_TRIG)
starting = time.time()
time.sleep(0.000001)
relay.off(relay.U_TRIG)
def collect_distance():
global current_distance
try:
while True:
get_distance()
time.sleep(0.01)
except:
GPIO.cleanup()
Thread(target=collect_distance).start()
|
conftest.py
|
import http.server
import os
import threading
from typing import Generator
import pytest
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from sylenium import driver_manager
from tests.integration.webserver.tcp_server import IntegrationTCPServer
@pytest.fixture
def default_driver(default_driver) -> RemoteWebDriver:
return default_driver
@pytest.fixture
def webserver() -> Generator[IntegrationTCPServer, None, None]:
handler = http.server.SimpleHTTPRequestHandler
server = IntegrationTCPServer(("localhost", 0), handler)
os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "http_content"))
print(f"Http server started for integration testing on: {server.server_address}")
try:
thread = threading.Thread(target=server.serve_forever, daemon=True)
thread.start()
yield server
except Exception:
pass
@pytest.fixture(autouse=True)
def close_threaded_drivers(request):
request.addfinalizer(driver_manager.terminate_all)
|
collectinfotest.py
|
import subprocess, time, os
from subprocess import call
from threading import Thread
from clitest.cli_base import CliBaseTest
from remote.remote_util import RemoteMachineHelper,\
RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection, RestHelper
from testconstants import LOG_FILE_NAMES
from couchbase_helper.document import View
LOG_FILE_NAME_LIST = ["couchbase.log", "diag.log", "ddocs.log", "ini.log", "syslog.tar.gz",
"ns_server.couchdb.log", "ns_server.debug.log", "ns_server.babysitter.log",
"ns_server.error.log", "ns_server.info.log",
"ns_server.views.log", "stats.log",
"memcached.log", "ns_server.mapreduce_errors.log",
"ns_server.stats.log", "ns_server.xdcr_errors.log",
"ns_server.xdcr.log"]
class CollectinfoTests(CliBaseTest):
def setUp(self):
super(CollectinfoTests, self).setUp()
self.log_filename = self.input.param("filename", "info")
self.doc_ops = self.input.param("doc_ops", None)
self.expire_time = self.input.param("expire_time", 5)
self.value_size = self.input.param("value_size", 256)
self.node_down = self.input.param("node_down", False)
if self.doc_ops is not None:
self.doc_ops = self.doc_ops.split(";")
def tearDown(self):
super(CollectinfoTests, self).tearDown()
def collectinfo_test(self):
"""We use cbcollect_info to automatically collect the logs for server node
First we load some items to the node. Optionally you can do some mutation
against these items. Then we use cbcollect_info the automatically generate
the zip file containing all the logs about the node. We want to verify we have
all the log files according to the LOG_FILE_NAME_LIST and in stats.log, we have
stats for all the buckets we have created"""
gen_load = BlobGenerator('nosql', 'nosql-', self.value_size,
end=self.num_items)
gen_update = BlobGenerator('nosql', 'nosql-', self.value_size,
end=(self.num_items // 2 - 1))
gen_expire = BlobGenerator('nosql', 'nosql-', self.value_size,
start=self.num_items // 2,
end=(self.num_items * 3 // 4 - 1))
gen_delete = BlobGenerator('nosql', 'nosql-', self.value_size,
start=self.num_items * 3 // 4,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
if(self.doc_ops is not None):
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_update, "update", 0)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_delete, "delete", 0)
if("expire" in self.doc_ops):
self._load_all_buckets(self.master, gen_expire, "update",\
self.expire_time)
self.sleep(self.expire_time + 1)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.shell.delete_files("%s.zip" % (self.log_filename))
""" This is the folder generated after unzip the log package """
self.shell.delete_files("cbcollect_info*")
cb_server_started = False
if self.node_down:
""" set autofailover to off """
rest = RestConnection(self.master)
rest.update_autofailover_settings(False, 60)
if self.os == 'linux':
output, error = self.shell.execute_command(
"killall -9 memcached & killall -9 beam.smp")
self.shell.log_command_output(output, error)
output, error = self.shell.execute_cbcollect_info("%s.zip"
% (self.log_filename))
if self.os != "windows":
if len(error) > 0:
if self.node_down:
shell = RemoteMachineShellConnection(self.master)
shell.start_server()
self.sleep(15)
shell.disconnect()
raise Exception("Command throw out error: %s " % error)
for output_line in output:
if output_line.find("ERROR") >= 0 or output_line.find("Error") >= 0:
if "from http endpoint" in output_line.lower():
continue
""" remove this code when bug in MB-45867 is fixed """
if "error occurred getting server guts" in output_line.lower() or \
"error: unable to retrieve statistics" in output_line.lower():
continue
""" *************************** """
if self.node_down:
shell = RemoteMachineShellConnection(self.master)
shell.start_server()
self.sleep(15)
shell.disconnect()
raise Exception("Command throw out error: %s " % output_line)
try:
if self.node_down:
if self.os == 'linux':
self.shell = RemoteMachineShellConnection(self.master)
self.shell.start_server()
self.sleep(30, "wait for server up completely")
rest = RestConnection(self.master)
if RestHelper(rest).is_ns_server_running(timeout_in_seconds=60):
cb_server_started = True
else:
self.fail("CB server failed to start")
self.verify_results(self, self.log_filename)
finally:
if self.node_down and not cb_server_started:
if self.os == 'linux':
self.shell.start_server()
rest = RestConnection(self.master)
if not RestHelper(rest).is_ns_server_running(timeout_in_seconds=60):
self.fail("CB server failed to start")
def test_cbcollectinfo_detect_container(self):
""" this test only runs inside docker host and
detect if a node is a docker container.
It should run with param skip_init_check_cbserver=true """
docker_id = None
if "." in self.ip:
self.fail("This test only run in docker host")
elif self.ip is not None:
docker_id = self.ip
os.system("docker exec %s %scbcollect_info testlog.zip"
% (docker_id, self.cli_command_path))
os.system("docker cp %s:/testlog.zip ." % (docker_id))
os.system("unzip testlog.zip")
output = call("cd cbcollect_info_*; grep 'docker' ./* ")
if output and "docker" in output:
self.log.info("cbcollect log detected docker container")
else:
self.fail("cbcollect info could not detect docker container")
os.system("docker exec %s rm testlog.zip" % (docker_id))
def test_not_collect_stats_hash_in_cbcollectinfo(self):
""" this test verifies we don't collect stats hash
in when run cbcollectinfo
params: nodes_init=2
"""
check_version = ["5.1.2", "5.5.1"]
mesg = "memcached stats ['hash', 'detail']"
if self.cb_version[:5] not in check_version \
or float(self.cb_version[:3]) < 6.0:
self.log.info("\nThis version {0} does not need to test {1}"\
.format(self.cb_version, mesg))
return
self.shell.delete_files("{0}.zip".format(self.log_filename))
""" This is the folder generated after unzip the log package """
self.shell.delete_files("cbcollect_info*")
output, error = self.shell.execute_cbcollect_info("%s.zip"
% (self.log_filename))
if output:
for x in output:
if x.startswith(mesg):
self.fail("cbcollectinfo should not collect {0}".format(mesg))
self.log.info("cbcollectinfo does not collect {0}".format(mesg))
@staticmethod
def verify_results(self, output_file_name):
try:
os = "linux"
zip_file = "%s.zip" % (output_file_name)
info = self.shell.extract_remote_info()
type = info.type.lower()
if type == 'windows':
os = "windows"
if os == "linux":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.sleep(2)
if self.debug_logs:
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
command = "ls cbcollect_info*/"
output, error = self.shell.execute_command(command)
if self.debug_logs:
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to list the files. Check ls command output for help")
missing_logs = False
nodes_services = RestConnection(self.master).get_nodes_services()
for node, services in list(nodes_services.items()):
for service in services:
if service.encode("ascii") == "fts" and \
self.master.ip in node and \
"fts_diag.json" not in LOG_FILE_NAMES:
LOG_FILE_NAMES.append("fts_diag.json")
if service.encode("ascii") == "index" and \
self.master.ip in node:
if "indexer_mprof.log" not in LOG_FILE_NAMES:
LOG_FILE_NAMES.append("indexer_mprof.log")
if "indexer_pprof.log" not in LOG_FILE_NAMES:
LOG_FILE_NAMES.append("indexer_pprof.log")
if self.debug_logs:
self.log.info('\nlog files sample: {0}'.format(LOG_FILE_NAMES))
self.log.info('\nlog files in zip: {0}'.format(output))
for x in LOG_FILE_NAMES:
find_log = False
for output_line in output:
if output_line.find(x) >= 0:
find_log = True
if not find_log:
# missing syslog.tar.gz in mac as in ticket MB-9110
# need to remove 3 lines below if it is fixed in 2.2.1
# in mac os
if x == "syslog.tar.gz" and info.distribution_type.lower() == "mac":
missing_logs = False
else:
missing_logs = True
self.log.error("The log zip file miss %s" % (x))
missing_buckets = False
if not self.node_down:
for bucket in self.buckets:
command = "grep %s cbcollect_info*/stats.log" % (bucket.name)
output, error = self.shell.execute_command(command)
if self.debug_logs:
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to grep key words. Check grep command output for help")
if len(output) == 0:
missing_buckets = True
self.log.error("%s stats are missed in stats.log" % (bucket.name))
command = "du -s cbcollect_info*/*"
output, error = self.shell.execute_command(command)
if self.debug_logs:
self.shell.log_command_output(output, error)
empty_logs = False
if len(error) > 0:
raise Exception("unable to list file size. Check du command output for help")
for output_line in output:
output_line = output_line.split()
file_size = int(output_line[0])
if "dist_cfg" in output_line[1]:
continue
if self.debug_logs:
print(("File size: ", file_size))
if file_size == 0:
if "kv_trace" in output_line[1] and self.node_down:
continue
else:
empty_logs = True
self.log.error("%s is empty" % (output_line[1]))
if missing_logs:
raise Exception("Bad log file package generated. Missing logs")
if missing_buckets:
raise Exception("Bad stats.log which miss some bucket information")
if empty_logs:
raise Exception("Collect empty log files")
elif os == "windows":
# try to figure out what command works for windows for verification
pass
finally:
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
def collectinfo_test_for_views(self):
self.default_design_doc_name = "Doc1"
self.view_name = self.input.param("view_name", "View")
self.generate_map_reduce_error = self.input.param("map_reduce_error", False)
self.default_map_func = 'function (doc) { emit(doc.age, doc.first_name);}'
self.gen_load = BlobGenerator('couch', 'cb-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, self.gen_load, "create", 0)
self.reduce_fn = "_count"
expected_num_items = self.num_items
if self.generate_map_reduce_error:
self.reduce_fn = "_sum"
expected_num_items = None
view = View(self.view_name, self.default_map_func, self.reduce_fn, dev_view=False)
self.cluster.create_view(self.master, self.default_design_doc_name, view,
'default', self.wait_timeout * 2)
query = {"stale": "false", "connection_timeout": 60000}
try:
self.cluster.query_view(self.master, self.default_design_doc_name, self.view_name, query,
expected_num_items, 'default', timeout=self.wait_timeout)
except Exception as ex:
if not self.generate_map_reduce_error:
raise ex
self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
self.verify_results(self, self.log_filename)
def test_default_collect_logs_in_cluster(self):
"""
In a cluster, if we run cbcollectinfo from 1 node, it will collect logs
on 1 node only.
Initial nodes: 3
"""
gen_load = BlobGenerator('cbcollect', 'cbcollect-', self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.log.info("Delete old logs files")
self.shell.delete_files("%s.zip" % (self.log_filename))
self.log.info("Delete old logs directory")
self.shell.delete_files("cbcollect_info*")
output, error = self.shell.execute_cbcollect_info("%s.zip "\
% (self.log_filename))
if output:
if self.debug_logs:
self.shell.log_command_output(output, error)
for line in output:
if "noLogs=1" in line:
if "oneNode=1" not in line:
self.log.error("Error line: %s" % line)
self.fail("cbcollect did not set to collect diag only at 1 node ")
self.verify_results(self, self.log_filename)
def test_cbcollectinfo_memory_usuage(self):
"""
Test to make sure cbcollectinfo did not use a lot of memory.
We run test with 200K items with size 128 bytes
"""
gen_load = BlobGenerator('cbcollect', 'cbcollect-', self.value_size,
end=200000)
self._load_all_buckets(self.master, gen_load, "create", 0)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.log.info("Delete old logs files")
self.shell.delete_files("%s.zip" % (self.log_filename))
self.log.info("Delete old logs directory")
self.shell.delete_files("cbcollect_info*")
collect_threads = []
col_thread = Thread(target=self.shell.execute_cbcollect_info,
args=("%s.zip" % (self.log_filename)))
collect_threads.append(col_thread)
col_thread.start()
monitor_mem_thread = Thread(target=self._monitor_collect_log_mem_process)
collect_threads.append(monitor_mem_thread)
monitor_mem_thread.start()
self.thred_end = False
while not self.thred_end:
if not col_thread.isAlive():
self.thred_end = True
for t in collect_threads:
t.join()
def _monitor_collect_log_mem_process(self):
mem_stat = []
results = []
shell = RemoteMachineShellConnection(self.master)
vsz, rss = RemoteMachineHelper(shell).monitor_process_memory('cbcollect_info')
vsz_delta = max(abs(x - y) for (x, y) in zip(vsz[1:], vsz[:-1]))
rss_delta = max(abs(x - y) for (x, y) in zip(rss[1:], rss[:-1]))
self.log.info("The largest delta in VSZ: %s KB " % vsz_delta)
self.log.info("The largest delta in RSS: %s KB " % rss_delta)
if vsz_delta > 20000:
self.fail("cbcollect_info process spikes up to 20 MB")
|
run_micro_service.py
|
from threading import Thread
import base64
import flask
import redis
import uuid
import time
import json
import sys
import io
import algorithm
import numpy
IMAGE_QUEUE = "image_queue"
BATCH_SIZE = 32
DTYPE = numpy.float32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
# initialize our Flask application, Redis server, and Keras model
app = flask.Flask(__name__)
db = redis.StrictRedis(host="localhost", port=6379, db=0)
def base64_encode_image(a):
# base64 encode the input NumPy array
return base64.b64encode(a).decode("utf-8")
def base64_decode_image(a, dtype, shape):
# if this is Python 3, we need the extra step of encoding the
# serialized NumPy string as a byte object
if sys.version_info.major == 3:
a = bytes(a, encoding="utf-8")
# convert the string to a NumPy array using the supplied data
# type and target shape
a = np.frombuffer(base64.decodestring(a), dtype=dtype)
a = a.reshape(shape)
# return the decoded image
return a
def pre_process_file(I):
# if sys.version_info.major == 3:
# I = bytes(I, encoding="utf-8")
# I = base64.decodestring(I)
I = io.BytesIO(base64.b64decode(I))
# print(type(I))
# print(I[:10])
return I
def classify_process():
while True:
# get BATCH_SIZE no of requests
queue = db.lrange(IMAGE_QUEUE, 0, BATCH_SIZE - 1)
n = 0
for q in queue:
q = json.loads(q.decode("utf-8"))
q['input_file'] = pre_process_file(q['input_file'])
item_id = q["id"]
# compute result for one request
result = algorithm.compute(q)
# put result in queue
db.set(item_id,json.dumps(result))
n = n+1
# delete the processed requests
db.ltrim(IMAGE_QUEUE, n,-1)
# sleep for some time before polling again
time.sleep(SERVER_SLEEP)
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the view
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("input_file"):
# print(flask.request.files["input_file"])
input_file = flask.request.files["input_file"].read()
#image = numpy.frombuffer(io.BytesIO(image).getbuffer())
#image = numpy.zeros((3,3))
#image = prepare_image(image, (IMAGE_WIDTH, IMAGE_HEIGHT))
# ensure our NumPy array is C-contiguous as well,
# otherwise we won't be able to serialize it
#image = image.copy(order="C")
# generate an ID for the classification then add the
# classification ID + image to the queue
# input_file = io.BytesIO(input_file)
#input_file = numpy.frombuffer(input_file.getbuffer(),dtype="u8")
#print(input_file.shape)
input_file = base64.b64encode(input_file).decode("utf-8")
k = str(uuid.uuid4())
#d = {"id": k, "input_file": base64_encode_image(input_file)}
d = {"id": k, "input_file": input_file}
db.rpush(IMAGE_QUEUE, json.dumps(d))
# keep looping until our model server returns the output
# predictions
while True:
# attempt to grab the output predictions
output = db.get(k)
# check to see if our model has classified the input
# image
if output is not None:
# add the output predictions to our data
# dictionary so we can return it to the client
output = output.decode("utf-8")
data["output"] = json.loads(output)
# delete the result from the database and break
# from the polling loop
db.delete(k)
break
# sleep for a small amount to give the model a chance
# to classify the input image
time.sleep(CLIENT_SLEEP)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
# load the function used to classify input images in a *separate*
# thread than the one used for main classification
print("* Starting model service...")
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
# start the web server
print("* Starting web service...")
app.run()
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform not in ('win32', 'OpenVMS'):
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
# @unittest.skipIf(sys.platform in ('OpenVMS'), 'OpenVMS has no os.fork()')
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform not in ('win32', 'OpenVMS'),
"Don't support PTY for Windows and OpenVMS")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform not in ('win32', 'OpenVMS'),
"Don't support PTY for Windows and OpenVMS")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform not in ('win32', 'OpenVMS'),
"Don't support PTY for Windows and OpenVMS")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform in ('win32', 'OpenVMS'), "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
elif sys.platform == 'OpenVMS':
pass
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform not in ('win32', 'OpenVMS'):
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
linkcheck.py
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import socket
import codecs
import threading
from os import path
from requests.exceptions import HTTPError
from six.moves import queue # type: ignore
from six.moves.urllib.parse import unquote
from six.moves.html_parser import HTMLParser
from docutils import nodes
# 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and
# removed in Python 3.5, however for backward compatibility reasons, we're not
# going to just remove it. If it doesn't exist, define an exception that will
# never be caught but leaves the code in check_anchor() intact.
try:
from six.moves.html_parser import HTMLParseError # type: ignore
except ImportError:
class HTMLParseError(Exception): # type: ignore
pass
from sphinx.builders import Builder
from sphinx.util import encode_uri
from sphinx.util.console import ( # type: ignore
purple, red, darkgreen, darkgray, darkred, turquoise
)
from sphinx.util.requests import requests, useragent_header, is_ssl_error
if False:
# For type annotation
from typing import Any, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
# type: (unicode) -> None
HTMLParser.__init__(self)
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag, attrs):
# type: (Any, Dict[unicode, unicode]) -> None
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
break
def check_anchor(response, anchor):
# type: (requests.Response, unicode) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
try:
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
parser.feed(chunk)
if parser.found:
break
parser.close()
except HTMLParseError:
# HTMLParser is usually pretty good with sloppy HTML, but it tends to
# choke on EOF. But we're done then anyway.
pass
return parser.found
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
# type: () -> None
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
self.good = set() # type: Set[unicode]
self.broken = {} # type: Dict[unicode, unicode]
self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
self.headers = dict(useragent_header)
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue()
self.rqueue = queue.Queue()
self.workers = [] # type: List[threading.Thread]
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self):
# type: () -> None
kwargs = {}
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
kwargs['allow_redirects'] = True
def check_uri():
# type: () -> Tuple[unicode, unicode, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = None
break
else:
req_url = uri
anchor = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
try:
if anchor and self.app.config.linkcheck_anchors:
# Read the whole document and see if #anchor exists
response = requests.get(req_url, stream=True, headers=self.headers,
**kwargs)
found = check_anchor(response, unquote(anchor))
if not found:
raise Exception("Anchor '%s' not found" % anchor)
else:
try:
# try a HEAD request first, which should be easier on
# the server and the network
response = requests.head(req_url, headers=self.headers, **kwargs)
response.raise_for_status()
except HTTPError as err:
# retry with GET request if that fails, some servers
# don't like HEAD requests.
response = requests.get(req_url, stream=True, headers=self.headers,
**kwargs)
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401:
# We'll take "Unauthorized" as working.
return 'working', ' - unauthorized', 0
else:
return 'broken', str(err), 0
except Exception as err:
if is_ssl_error(err):
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
if response.url.rstrip('/') == req_url.rstrip('/'):
return 'working', '', 0
else:
new_url = response.url
if anchor:
new_url += '#' + anchor
# history contains any redirects, get last
if response.history:
code = response.history[-1].status_code
return 'redirected', new_url, code
def check():
# type: () -> Tuple[unicode, unicode, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
elif not uri.startswith(('http:', 'https:')):
return 'local', '', 0
elif uri in self.good:
return 'working', 'old', 0
elif uri in self.broken:
return 'broken', self.broken[uri], 0
elif uri in self.redirected:
return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
# need to actually check the URI
for _ in range(self.app.config.linkcheck_retries):
status, info, code = check_uri()
if status != "broken":
break
if status == "working":
self.good.add(uri)
elif status == "broken":
self.broken[uri] = info
elif status == "redirected":
self.redirected[uri] = (info, code)
return (status, info, code)
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info, code = check()
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
# type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
if status == 'working' and info == 'old':
return
if lineno:
self.info('(line %4d) ' % lineno, nonl=1)
if status == 'ignored':
if info:
self.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
self.info(darkgray('-ignored- ') + uri)
elif status == 'local':
self.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
self.info(darkgreen('ok ') + uri + info)
elif status == 'broken':
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet or self.app.warningiserror:
self.warn('broken link: %s (%s)' % (uri, info),
'%s:%s' % (self.env.doc2path(docname), lineno))
else:
self.info(red('broken ') + uri + red(' - ' + info))
elif status == 'redirected':
text, color = {
301: ('permanently', darkred),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
0: ('with unknown code', purple),
}[code]
self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info)
self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (nodes.Node) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
self.info()
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
uri = node['refuri']
lineno = None
while lineno is None:
node = node.parent
if node is None:
break
lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
# type: (unicode, unicode, int, unicode) -> None
with codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') as output: # type: ignore # NOQA
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
def finish(self):
# type: () -> None
for worker in self.workers:
self.wqueue.put((None, None, None), False)
def setup(app):
# type: (Sphinx) -> None
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
app.add_config_value('linkcheck_retries', 1, None)
app.add_config_value('linkcheck_timeout', None, None, [int])
app.add_config_value('linkcheck_workers', 5, None)
app.add_config_value('linkcheck_anchors', True, None)
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value('linkcheck_anchors_ignore', ["^!"], None)
|
RAE.py
|
__author__ = 'patras'
from actors.RAE.RAE1Stack import RAE1
from shared.timer import globalTimer
import threading
from shared import GLOBALS
from learning.learningData import WriteTrainingData
import multiprocessing as mp
import importlib
#****************************************************************
#To control Progress of each stack step by step
class IpcArgs():
def __init__(self):
self.sem = [threading.Semaphore(1)] #the semaphores to control progress of each stack and master
self.nextStack = 0 #the master thread is the next in line to be executed, master thread adds a new stack for every new incoming task
self.threadList = [] #keeps track of all the stacks in RAE's Agenda
def BeginCriticalRegion(self, stackid):
#while(ipcArgs.nextStack != stackid):
# pass
self.sem[stackid].acquire()
def EndCriticalRegion(self):
#ipcArgs.nextStack = 0
self.sem[0].release()
class EnvArgs():
def __init__(self):
self.sem = threading.Semaphore(0)
self.exit = False
#****************************************************************
class rae():
def __init__(self, domain, problem, useLearningStrategy, planner, plannerParams, v, state, rv, RestoreState, GetDomainState):
self.verbosity = v
self.taskQueue = mp.Queue() # where the tasks come in
self.cmdExecQueue = mp.Queue() # where commands go out
self.cmdStatusQueue = mp.Queue() # where the status of commands come in
self.cmdStatusStack = {} # where the command statuses are saved after reading from cmdStatusQueue
self.cmdStackTable = {} # keeps track of which command belongs to which stack/job
self.TASKS = {} # dictionary of tasknames and the task parameters
self.methods = {} # dictionary of the list of methods for every task, initialized once for every run via the domain file
self.heuristic = {}
self.ipcArgs = IpcArgs() # for inter stack (thread) control transfer
self.envArgs = EnvArgs() # for control transfer between environment and stacks; checks for events in the env
self.state = state # only used via RAE1
self.rv = rv
self.RestoreState = RestoreState
self.GetDomainState = GetDomainState
self.InitializeDomain(domain, problem)
self.rae1Instances = {} # dictionary mapping stack ids to the rae1 objects
self.useLearningStrategy = useLearningStrategy
self.planner = planner
self.plannerParams = plannerParams
def InitializeDomain(self, domain, problem, startState=None):
'''
:param domain: code of the domain which you are running
:param problem: id of the problem
:return:none
'''
self.domain = domain
if domain in ['AIRS_dev', 'Mobipick']:
pass
else:
module = 'domains.' + domain + '.problems.auto.' + problem + '_' + domain
print("Importing ", module)
self.problemModule = importlib.import_module(module)
self.problemModule.SetInitialStateVariables(self.state, self.rv)
def GetNextAlive(self, lastActiveStack, numstacks, threadList):
'''
:param lastActiveStack: the stack which was progressed before this
:param numstacks: total number of stacks in the Agenda
:param threadList: list of all the threads, each running a RAE stack
:return: The stack which should be executed next
'''
nextAlive = -1
i = 1
j = lastActiveStack % numstacks + 1
while i <= numstacks:
if threadList[j-1].is_alive() == True:
nextAlive = j
break
i = i + 1
j = j % numstacks + 1
return nextAlive
def noNewTasks(self):
if self.domain in ['AIRS_dev', 'Mobipick']:
return False
for c in self.problemModule.tasks:
if c > self.newTasksCounter:
return False
return True
def GetNewTasks(self):
'''
:return: gets the new task that appears in the problem at the current time
'''
self.newTasksCounter += 1
if self.domain not in ['AIRS_dev', 'Mobipick']:
if self.newTasksCounter in self.problemModule.tasks:
return self.problemModule.tasks[self.newTasksCounter]
else:
return []
else:
tasks = []
while not self.taskQueue.empty():
tasks.append(self.taskQueue.get())
return tasks
def BeginFreshIteration(self, lastActiveStack, numstacks, threadList):
begin = True
i = lastActiveStack % numstacks + 1
while i != 1:
if threadList[i - 1].isAlive():
begin = False
break
i = i % numstacks + 1
return begin
def CreateNewStack(self, taskInfo, raeArgs):
stackid = raeArgs.stack
rae1 = RAE1(
raeArgs.task,
raeArgs,
self.domain,
self.ipcArgs,
self.cmdStatusStack,
self.cmdStackTable,
self.cmdExecQueue,
self.verbosity,
self.state,
self.methods,
self.heuristic,
self.useLearningStrategy,
self.planner,
self.plannerParams,
self.RestoreState,
self.GetDomainState
)
self.rae1Instances[stackid] = rae1
retcode, retryCount, eff, height, taskCount, commandCount, traces, utilVal, utilitiesList = rae1.RAE1Main(raeArgs.task, raeArgs)
taskInfo[stackid] = ([raeArgs.task] + raeArgs.taskArgs, retcode, retryCount, eff, height, taskCount, commandCount, traces, utilVal, utilitiesList)
def PrintResult(self, taskInfo):
output = "\nRESULTS:"
for stackid in taskInfo:
args, res, retryCount, eff, height, taskCount, commandCount, traces, utilVal, utilitiesList = taskInfo[stackid]
output += '\n Task : ' + '\t{}{}'.format(args[0], args[1:]) + \
'\n Result : \t' + str(res) + \
'\n Retry Count: \t' + str(retryCount) + \
'\n Utility: \t' + str(eff) + \
"\n -----------------\n"
return output
def PrintResultSummaryVersion1(self, taskInfo):
succ = 0
fail = 0
retries = 0
effTotal = 0
h = 0
t = 0
c = 0
for stackid in taskInfo:
args, res, retryCount, eff, height, taskCount, commandCount = taskInfo[stackid]
if res == 'Success':
succ += 1
else:
fail += 1
retries += retryCount
effTotal += eff.GetValue()
c += commandCount
t += taskCount
if height > h:
h = height
print(succ, succ+fail, retries, globalTimer.GetSimulationCounter(), globalTimer.GetRealCommandExecutionCounter(), effTotal, h, t, c)
#print(' '.join('-'.join([key, str(cmdNet[key])]) for key in cmdNet))
def PrintResultSummaryVersion2(self, taskInfo):
for stackid in taskInfo:
args, res, retryCount, eff, height, taskCount, commandCount, traces, utilVal, utilitiesList = taskInfo[stackid]
if res == 'Success':
succ = 1
fail = 0
else:
succ = 0
fail = 1
print("v2", succ, succ+fail, retryCount, globalTimer.GetSimulationCounter(),
globalTimer.GetRealCommandExecutionCounter(), eff, height, taskCount, commandCount, utilVal)
utilString = ""
for u in utilitiesList:
utilString += str(u)
utilString += " "
print(utilString)
# with open("traces.txt", "a") as f:
# f.write("\n\n")
# f.write(traces)
#print(' '.join('-'.join([key, str(cmdNet[key])]) for key in cmdNet))
def StartEnv(self):
while True:
self.envArgs.sem.acquire()
if self.envArgs.exit:
self.ipcArgs.sem[0].release() # main controller
return
self.startEnvCounter += 1
if self.domain not in ["AIRS_dev", "Mobipick", "AIRS"]:
if self.startEnvCounter in self.problemModule.eventsEnv:
eventArgs = self.problemModule.eventsEnv[self.startEnvCounter]
event = eventArgs[0]
eventParams = eventArgs[1]
t = threading.Thread(target=event, args=eventParams)
t.setDaemon(True) # Setting the environment thread to daemon because we don't want the environment running once the tasks are done
t.start()
self.ipcArgs.sem[0].release()
def add_tasks(self, tasks):
current_counter = self.newTasksCounter
if current_counter + 1 not in self.problemModule.tasks:
self.problemModule.tasks[current_counter + 1] = tasks
else:
self.problemModule.tasks[current_counter + 1] += tasks
def raeMult(self, outputQueue=None):
lastActiveStack = 0 #keeps track of the last stack that was Progressed
numstacks = 0 #keeps track of the total number of stacks
self.newTasksCounter = 0
self.startEnvCounter = 0
taskInfo = {}
envThread = threading.Thread(target=self.StartEnv)
#startTime = time()
envThread.start()
while (True):
#if ipcArgs.nextStack == 0 or ipcArgs.threadList[ipcArgs.nextStack-1].isAlive() == False:
if True:
self.ipcArgs.sem[0].acquire()
if numstacks == 0 or self.BeginFreshIteration(lastActiveStack, numstacks, self.ipcArgs.threadList):
# Check for incoming tasks after progressing all stacks
taskParams = self.GetNewTasks()
if taskParams != []:
for newTask in taskParams:
numstacks = numstacks + 1
raeArgs = GLOBALS.RaeArgs()
raeArgs.stack = numstacks
raeArgs.task = newTask[0]
raeArgs.taskArgs = newTask[1:]
self.ipcArgs.sem.append(threading.Semaphore(0))
self.ipcArgs.threadList.append(threading.Thread(target=self.CreateNewStack, args = (taskInfo, raeArgs)))
self.ipcArgs.threadList[numstacks-1].start()
lastActiveStack = 0 # for the environment
self.envArgs.sem.release()
self.ipcArgs.sem[0].acquire()
if self.domain in ["AIRS", "AIRS_dev"]:
self.UpdateCommandStatus()
globalTimer.IncrementTime()
if numstacks > 0:
res = self.GetNextAlive(lastActiveStack, numstacks, self.ipcArgs.threadList)
if res != -1:
self.ipcArgs.nextStack = res
lastActiveStack = res
self.ipcArgs.sem[res].release()
else:
if self.noNewTasks():
self.envArgs.exit = True
self.envArgs.sem.release()
break
else:
self.ipcArgs.sem[0].release()
else:
self.ipcArgs.sem[0].release()
WriteTrainingData()
if self.verbosity > 0:
print("----Done with RAE----\n")
else:
self.PrintResultSummaryVersion2(taskInfo)
#globalTimer.Callibrate(startTime)
if outputQueue:
outputQueue.put(self.PrintResult(taskInfo))
return taskInfo # for unit tests
def do_task(self, task, *taskArgs):
# the current active stack do_task
currentStackId = self.ipcArgs.nextStack
self.rae1Instances[currentStackId].do_task(task, *taskArgs)
def do_command(self, cmd, *cmdArgs):
# the current active stack do_task
currentStackId = self.ipcArgs.nextStack
self.rae1Instances[currentStackId].do_command(cmd, *cmdArgs)
# RAE reads the cmdStatusQueue and updates the cmdStatusStack
def UpdateCommandStatus(self):
while(self.cmdStatusQueue.empty() == False):
(id, res, nextState) = self.cmdStatusQueue.get()
stackid = self.cmdStackTable[id]
self.cmdStatusStack[stackid] = (id, res, nextState)
def declare_task(self, t, *args):
self.TASKS[t] = args
# declares the refinement methods for a task;
# ensuring that some constraints are satisfied
def declare_methods(self, task_name, *method_list):
self.methods[task_name] = []
for m in method_list:
self.add_new_method(task_name, m)
def add_new_method(self, task_name, m):
taskArgs = self.TASKS[task_name]
q = len(taskArgs)
variableArgs = False
if len(taskArgs) == 1:
if taskArgs[0] == "*":
variableArgs = True
if variableArgs != True:
# ensure that the method has atleast as many parameters as the task
assert(m.__code__.co_argcount - 1 >= q)
# ensure that the variable names of the
# first q parameters of m match with the parameters of task t
assert(m.__code__.co_varnames[1:q+1] == taskArgs)
self.methods[task_name].append(m)
#print(self.methods['fix_component'])
def declare_heuristic(self, task, name):
self.heuristic[task] = name
def declare_goal_method(self, method, goalState):
# TODO
# save the methods and corresponding states in a dict
pass
|
utilities.py
|
#!/bin/env python
# -*coding: UTF-8 -*-
#
# Disclaimer:
# Functions get_sys_info, netcdf_and_hdf5_versions and show_versions are from:
# xarray/util/print_versions.py
#
import os
import sys
import warnings
import urllib
import json
import collections
import copy
from functools import reduce
from packaging import version
import importlib
import locale
import platform
import struct
import subprocess
import contextlib
import xarray as xr
import pandas as pd
import numpy as np
from scipy import interpolate
import pickle
import pkg_resources
import shutil
import threading
import time
from argopy.options import OPTIONS, set_options
from argopy.stores import httpstore
from argopy.errors import (
FtpPathError,
InvalidFetcher,
InvalidFetcherAccessPoint,
)
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
path2pkl = pkg_resources.resource_filename("argopy", "assets/")
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
def clear_cache(fs=None):
""" Delete argopy cache folder content """
if os.path.exists(OPTIONS["cachedir"]):
# shutil.rmtree(OPTIONS["cachedir"])
for filename in os.listdir(OPTIONS["cachedir"]):
file_path = os.path.join(OPTIONS["cachedir"], filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
if fs:
fs.clear_cache()
def load_dict(ptype):
if ptype == "profilers":
with open(os.path.join(path2pkl, "dict_profilers.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
elif ptype == "institutions":
with open(os.path.join(path2pkl, "dict_institutions.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
else:
raise ValueError("Invalid dictionary pickle file")
def mapp_dict(Adictionnary, Avalue):
if Avalue not in Adictionnary:
return "Unknown"
else:
return Adictionnary[Avalue]
def list_available_data_src():
""" List all available data sources """
sources = {}
try:
from .data_fetchers import erddap_data as Erddap_Fetchers
sources["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_data as LocalFTP_Fetchers
sources["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import argovis_data as ArgoVis_Fetchers
sources["argovis"] = ArgoVis_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ArgoVis data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
# return dict(sorted(sources.items()))
return sources
def list_available_index_src():
""" List all available index sources """
AVAILABLE_SOURCES = {}
try:
from .data_fetchers import erddap_index as Erddap_Fetchers
AVAILABLE_SOURCES["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_index as LocalFTP_Fetchers
AVAILABLE_SOURCES["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
return AVAILABLE_SOURCES
def list_standard_variables():
""" Return the list of variables for standard users """
return [
"DATA_MODE",
"LATITUDE",
"LONGITUDE",
"POSITION_QC",
"DIRECTION",
"PLATFORM_NUMBER",
"CYCLE_NUMBER",
"PRES",
"TEMP",
"PSAL",
"PRES_QC",
"TEMP_QC",
"PSAL_QC",
"PRES_ADJUSTED",
"TEMP_ADJUSTED",
"PSAL_ADJUSTED",
"PRES_ADJUSTED_QC",
"TEMP_ADJUSTED_QC",
"PSAL_ADJUSTED_QC",
"PRES_ADJUSTED_ERROR",
"TEMP_ADJUSTED_ERROR",
"PSAL_ADJUSTED_ERROR",
"JULD",
"JULD_QC",
"TIME",
"TIME_QC",
"CONFIG_MISSION_NUMBER",
]
def list_multiprofile_file_variables():
""" Return the list of variables in a netcdf multiprofile file.
This is for files created by GDAC under <DAC>/<WMO>/<WMO>_prof.nc
"""
return [
"CONFIG_MISSION_NUMBER",
"CYCLE_NUMBER",
"DATA_CENTRE",
"DATA_MODE",
"DATA_STATE_INDICATOR",
"DATA_TYPE",
"DATE_CREATION",
"DATE_UPDATE",
"DC_REFERENCE",
"DIRECTION",
"FIRMWARE_VERSION",
"FLOAT_SERIAL_NO",
"FORMAT_VERSION",
"HANDBOOK_VERSION",
"HISTORY_ACTION",
"HISTORY_DATE",
"HISTORY_INSTITUTION",
"HISTORY_PARAMETER",
"HISTORY_PREVIOUS_VALUE",
"HISTORY_QCTEST",
"HISTORY_REFERENCE",
"HISTORY_SOFTWARE",
"HISTORY_SOFTWARE_RELEASE",
"HISTORY_START_PRES",
"HISTORY_STEP",
"HISTORY_STOP_PRES",
"JULD",
"JULD_LOCATION",
"JULD_QC",
"LATITUDE",
"LONGITUDE",
"PARAMETER",
"PI_NAME",
"PLATFORM_NUMBER",
"PLATFORM_TYPE",
"POSITIONING_SYSTEM",
"POSITION_QC",
"PRES",
"PRES_ADJUSTED",
"PRES_ADJUSTED_ERROR",
"PRES_ADJUSTED_QC",
"PRES_QC",
"PROFILE_PRES_QC",
"PROFILE_PSAL_QC",
"PROFILE_TEMP_QC",
"PROJECT_NAME",
"PSAL",
"PSAL_ADJUSTED",
"PSAL_ADJUSTED_ERROR",
"PSAL_ADJUSTED_QC",
"PSAL_QC",
"REFERENCE_DATE_TIME",
"SCIENTIFIC_CALIB_COEFFICIENT",
"SCIENTIFIC_CALIB_COMMENT",
"SCIENTIFIC_CALIB_DATE",
"SCIENTIFIC_CALIB_EQUATION",
"STATION_PARAMETERS",
"TEMP",
"TEMP_ADJUSTED",
"TEMP_ADJUSTED_ERROR",
"TEMP_ADJUSTED_QC",
"TEMP_QC",
"VERTICAL_SAMPLING_SCHEME",
"WMO_INST_TYPE",
]
def check_localftp(path, errors: str = "ignore"):
""" Check if the path has the expected GDAC ftp structure
Check if the path is structured like:
.
└── dac
├── aoml
├── ...
├── coriolis
├── ...
├── meds
└── nmdis
Parameters
----------
path: str
Path name to check
errors: str
"ignore" or "raise" (or "warn"
Returns
-------
checked: boolean
True if at least one DAC folder is found under path/dac/<dac_name>
False otherwise
"""
dacs = [
"aoml",
"bodc",
"coriolis",
"csio",
"csiro",
"incois",
"jma",
"kma",
"kordi",
"meds",
"nmdis",
]
# Case 1:
check1 = (
os.path.isdir(path)
and os.path.isdir(os.path.join(path, "dac"))
and np.any([os.path.isdir(os.path.join(path, "dac", dac)) for dac in dacs])
)
if check1:
return True
elif errors == "raise":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
raise FtpPathError(
"This path is no longer GDAC compliant for argopy.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
else:
raise FtpPathError("This path is not GDAC compliant:\n%s" % path)
elif errors == "warn":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
warnings.warn(
"This path is no longer GDAC compliant for argopy. This will raise an error in the future.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
return False
else:
warnings.warn("This path is not GDAC compliant:\n%s" % path)
return False
else:
return False
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("argopy"):
try:
pipe = subprocess.Popen(
'git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
so, serr = pipe.communicate()
except Exception:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode("utf-8")
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(("commit", commit))
try:
(sysname, nodename, release, version_, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get("LC_ALL", "None")),
("LANG", "%s" % os.environ.get("LANG", "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
]
)
except Exception:
pass
return blob
def netcdf_and_hdf5_versions():
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except ImportError:
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except ImportError:
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file=sys.stdout): # noqa: C901
""" Print the versions of argopy and its dependencies
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
sys_info = get_sys_info()
try:
sys_info.extend(netcdf_and_hdf5_versions())
except Exception as e:
print(f"Error collecting netcdf / hdf5 version: {e}")
deps = [
# (MODULE_NAME, f(mod) -> mod version)
# In REQUIREMENTS:
("argopy", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
("sklearn", lambda mod: mod.__version__),
("netCDF4", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("toolz", lambda mod: mod.__version__),
("erddapy", lambda mod: mod.__version__),
("fsspec", lambda mod: mod.__version__),
("gsw", lambda mod: mod.__version__),
("aiohttp", lambda mod: mod.__version__),
#
("bottleneck", lambda mod: mod.__version__),
("cartopy", lambda mod: mod.__version__),
("cftime", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("iris", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("nc_time_axis", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("packaging", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("PseudoNetCDF", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("seaborn", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("zarr", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
except Exception:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, "installed"))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in sys_info:
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in deps_blob:
print(f"{k}: {stat}", file=file)
def show_options(file=sys.stdout): # noqa: C901
""" Print options of argopy
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
print("\nARGOPY OPTIONS", file=file)
print("--------------", file=file)
opts = copy.deepcopy(OPTIONS)
opts = dict(sorted(opts.items()))
for k, v in opts.items():
print(f"{k}: {v}", file=file)
def isconnected(host="https://www.ifremer.fr"):
""" check if we have a live internet connection
Parameters
----------
host: str
URL to use, 'https://www.ifremer.fr' by default
Returns
-------
bool
"""
if "http" in host or "ftp" in host:
try:
urllib.request.urlopen(host, timeout=1) # Python 3.x
return True
except Exception:
return False
else:
return os.path.exists(host)
def isAPIconnected(src="erddap", data=True):
""" Check if a source API is alive or not
The API is connected when it has a live URL or valid folder path.
Parameters
----------
src: str
The data or index source name, 'erddap' default
data: bool
If True check the data fetcher (default), if False, check the index fetcher
Returns
-------
bool
"""
if data:
list_src = list_available_data_src()
else:
list_src = list_available_index_src()
if src in list_src and getattr(
list_src[src], "api_server_check", None
):
if "localftp" in src:
# This is a special case because the source here is a local folder
result = check_localftp(OPTIONS["local_ftp"])
else:
result = isconnected(list_src[src].api_server_check)
return result
else:
raise InvalidFetcher
def erddap_ds_exists(ds: str = "ArgoFloats", erddap: str = 'http://www.ifremer.fr/erddap') -> bool:
""" Check if a dataset exists on a remote erddap server
return a bool
Parameter
---------
ds: str
Name of the erddap dataset to check (default: 'ArgoFloats')
erddap: str
Url of the erddap server (default: 'http://www.ifremer.fr/erddap')
Return
------
bool
"""
with httpstore(timeout=OPTIONS['api_timeout']).open("".join([erddap, "/info/index.json"])) as of:
erddap_index = json.load(of)
return ds in [row[-1] for row in erddap_index["table"]["rows"]]
def badge(label="label", message="message", color="green", insert=False):
""" Return or insert shield.io badge image
Use the shields.io service to create a badge image
https://img.shields.io/static/v1?label=<LABEL>&message=<MESSAGE>&color=<COLOR>
Parameters
----------
label: str
Left side badge text
message: str
Right side badge text
color: str
Right side background color
insert: bool
Return url to badge image (False, default) or directly insert the image with HTML (True)
Returns
-------
str or IPython.display.Image
"""
from IPython.display import Image
url = (
"https://img.shields.io/static/v1?style=flat-square&label={}&message={}&color={}"
).format
img = url(urllib.parse.quote(label), urllib.parse.quote(message), color)
if not insert:
return img
else:
return Image(url=img)
def fetch_status(stdout: str = "html", insert: bool = True):
""" Fetch and report web API status
Parameters
----------
stdout: str
Format of the results, default is 'html'. Otherwise a simple string.
insert: bool
Print or display results directly in stdout format.
Returns
-------
IPython.display.HTML or str
"""
results = {}
list_src = list_available_data_src()
for api, mod in list_src.items():
if getattr(mod, "api_server_check", None):
# status = isconnected(mod.api_server_check)
status = isAPIconnected(api)
if api=='localftp' and OPTIONS['local_ftp'] == '-':
message = "ok" if status else "path undefined !"
else:
# message = "up" if status else "down"
message = "ok" if status else "offline"
results[api] = {"value": status, "message": message}
if "IPython" in sys.modules and stdout == "html":
cols = []
for api in sorted(results.keys()):
color = "green" if results[api]["value"] else "orange"
if isconnected():
# img = badge("src='%s'" % api, message=results[api]['message'], color=color, insert=False)
# img = badge(label="argopy src", message="%s is %s" %
# (api, results[api]['message']), color=color, insert=False)
img = badge(
label="src %s is" % api,
message="%s" % results[api]["message"],
color=color,
insert=False,
)
html = ('<td><img src="{}"></td>').format(img)
else:
# html = "<th>src %s is:</th><td>%s</td>" % (api, results[api]['message'])
html = (
"<th><div>src %s is:</div></th><td><div style='color:%s;'>%s</div></td>"
% (api, color, results[api]["message"])
)
cols.append(html)
this_HTML = ("<table><tr>{}</tr></table>").format("".join(cols))
if insert:
from IPython.display import HTML, display
return display(HTML(this_HTML))
else:
return this_HTML
else:
rows = []
for api in sorted(results.keys()):
# rows.append("argopy src %s: %s" % (api, results[api]['message']))
rows.append("src %s is: %s" % (api, results[api]["message"]))
txt = "\n".join(rows)
if insert:
print(txt)
else:
return txt
class monitor_status:
""" Monitor data source status with a refresh rate """
def __init__(self, refresh=1):
import ipywidgets as widgets
self.refresh_rate = refresh
self.text = widgets.HTML(
value=fetch_status(stdout="html", insert=False),
placeholder="",
description="",
)
self.start()
def work(self):
while True:
time.sleep(self.refresh_rate)
self.text.value = fetch_status(stdout="html", insert=False)
def start(self):
from IPython.display import display
thread = threading.Thread(target=self.work)
display(self.text)
thread.start()
# def open_etopo1(box, res="l"):
# """ Download ETOPO for a box
#
# Parameters
# ----------
# box: [xmin, xmax, ymin, ymax]
#
# Returns
# -------
# xarray.Dataset
# """
# # This function is in utilities to anticipate usage outside of plotting, eg interpolation, grounding detection
# resx, resy = 0.1, 0.1
# if res == "h":
# resx, resy = 0.016, 0.016
#
# uri = (
# "https://gis.ngdc.noaa.gov/mapviewer-support/wcs-proxy/wcs.groovy?filename=etopo1.nc"
# "&request=getcoverage&version=1.0.0&service=wcs&coverage=etopo1&CRS=EPSG:4326&format=netcdf"
# "&resx={}&resy={}"
# "&bbox={}"
# ).format
# thisurl = uri(
# resx, resy, ",".join([str(b) for b in [box[0], box[2], box[1], box[3]]])
# )
# ds = httpstore(cache=True).open_dataset(thisurl)
# da = ds["Band1"].rename("topo")
# for a in ds.attrs:
# da.attrs[a] = ds.attrs[a]
# da.attrs["Data source"] = "https://maps.ngdc.noaa.gov/viewers/wcs-client/"
# da.attrs["URI"] = thisurl
# return da
#
# From xarrayutils : https://github.com/jbusecke/xarrayutils/blob/master/xarrayutils/vertical_coordinates.py
# Direct integration of those 2 functions to minimize dependencies and possibility of tuning them to our needs
#
def linear_interpolation_remap(
z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped"
):
# interpolation called in xarray ufunc
def _regular_interp(x, y, target_values):
# remove all nans from input x and y
idx = np.logical_or(np.isnan(x), np.isnan(y))
x = x[~idx]
y = y[~idx]
# Need at least 5 points in the profile to interpolate, otherwise, return NaNs
if len(y) < 5:
interpolated = np.empty(len(target_values))
interpolated[:] = np.nan
else:
# replace nans in target_values with out of bound Values (just in case)
target_values = np.where(
~np.isnan(target_values), target_values, np.nanmax(x) + 1
)
# Interpolate with fill value parameter to extend min pressure toward 0
interpolated = interpolate.interp1d(
x, y, bounds_error=False, fill_value=(y[0], y[-1])
)(target_values)
return interpolated
# infer dim from input
if z_dim is None:
if len(z.dims) != 1:
raise RuntimeError("if z_dim is not specified, x must be a 1D array.")
dim = z.dims[0]
else:
dim = z_dim
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(data, xr.Dataset):
raise ValueError("Dataset input is not supported yet")
# TODO: for a dataset input just apply the function for each appropriate array
if version.parse(xr.__version__) > version.parse("0.15.0"):
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
dask_gufunc_kwargs={'output_sizes': {output_dim: len(z_regridded[z_regridded_dim])}},
)
else:
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
)
remapped = xr.apply_ufunc(_regular_interp, z, data, z_regridded, **kwargs)
remapped.coords[output_dim] = z_regridded.rename(
{z_regridded_dim: output_dim}
).coords[output_dim]
return remapped
class Chunker:
""" To chunk fetcher requests """
# Default maximum chunks size for all possible request parameters
default_chunksize = {
"box": {
"lon": 20, # degree
"lat": 20, # degree
"dpt": 500, # meters/db
"time": 3 * 30,
}, # Days
"wmo": {"wmo": 5, "cyc": 100}, # Nb of floats
} # Nb of cycles
def __init__(self, request: dict, chunks: str = "auto", chunksize: dict = {}):
""" Create a request Chunker
Allow to easily split an access point request into chunks
Parameters
----------
request: dict
Access point request to be chunked. One of the following:
{'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max, time_min, time_max]}
{'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max]}
{'wmo': [wmo1, wmo2, ...], 'cyc': [0,1, ...]}
chunks: 'auto' or dict
Dictionary with request access point as keys and number of chunks to create as values.
Eg: {'wmo':10} will create a maximum of 10 chunks along WMOs.
chunksize: dict, optional
Dictionary with request access point as keys and chunk size as values (used as maximum values in
'auto' chunking). Eg: {'wmo': 5} will create chunks with as many as 5 WMOs each.
"""
self.request = request
if "box" in self.request:
is_box(self.request["box"])
if len(self.request["box"]) == 8:
self.this_chunker = self._chunker_box4d
elif len(self.request["box"]) == 6:
self.this_chunker = self._chunker_box3d
elif "wmo" in self.request:
self.this_chunker = self._chunker_wmo
else:
raise InvalidFetcherAccessPoint(
"'%s' not valid access point" % ",".join(self.request.keys())
)
default = self.default_chunksize[[k for k in self.request.keys()][0]]
if len(chunksize) == 0: # chunksize = {}
chunksize = default
if not isinstance(chunksize, collectionsAbc.Mapping):
raise ValueError("chunksize must be mappable")
else: # merge with default:
chunksize = {**default, **chunksize}
self.chunksize = collections.OrderedDict(sorted(chunksize.items()))
default = {k: "auto" for k in self.chunksize.keys()}
if chunks == "auto": # auto for all
chunks = default
elif len(chunks) == 0: # chunks = {}, i.e. chunk=1 for all
chunks = {k: 1 for k in self.request}
if not isinstance(chunks, collectionsAbc.Mapping):
raise ValueError("chunks must be 'auto' or mappable")
chunks = {**default, **chunks}
self.chunks = collections.OrderedDict(sorted(chunks.items()))
def _split(self, lst, n=1):
"""Yield successive n-sized chunks from lst"""
for i in range(0, len(lst), n):
yield lst[i: i + n]
def _split_list_bychunknb(self, lst, n=1):
"""Split list in n-imposed chunks of similar size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
siz = int(np.floor_divide(len(lst), n))
for i in self._split(lst, siz):
res.append(i)
if len(res) > n:
res[n - 1::] = [reduce(lambda i, j: i + j, res[n - 1::])]
return res
def _split_list_bychunksize(self, lst, max_size=1):
"""Split list in chunks of imposed size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
for i in self._split(lst, max_size):
res.append(i)
return res
def _split_box(self, large_box, n=1, d="x"): # noqa: C901
"""Split a box domain in one direction in n-imposed equal chunks """
if d == "x":
i_left, i_right = 0, 1
if d == "y":
i_left, i_right = 2, 3
if d == "z":
i_left, i_right = 4, 5
if d == "t":
i_left, i_right = 6, 7
if n == 1:
return [large_box]
boxes = []
if d in ["x", "y", "z"]:
n += 1 # Required because we split in linspace
bins = np.linspace(large_box[i_left], large_box[i_right], n)
for ii, left in enumerate(bins):
if ii < len(bins) - 1:
right = bins[ii + 1]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
elif "t" in d:
dates = pd.to_datetime(large_box[i_left: i_right + 1])
date_bounds = [
d.strftime("%Y%m%d%H%M%S")
for d in pd.date_range(dates[0], dates[1], periods=n + 1)
]
for i1, i2 in zip(np.arange(0, n), np.arange(1, n + 1)):
left, right = date_bounds[i1], date_bounds[i2]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
return boxes
def _split_this_4Dbox(self, box, nx=1, ny=1, nz=1, nt=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
split_t = self._split_box(bxyz, n=nt, d="t")
for bxyzt in split_t:
box_list.append(bxyzt)
return box_list
def _split_this_3Dbox(self, box, nx=1, ny=1, nz=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
box_list.append(bxyz)
return box_list
def _chunker_box4d(self, request, chunks, chunks_maxsize): # noqa: C901
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.ceil(np.divide(Lx, chunks_maxsize["lon"]))
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.ceil(np.divide(Ly, chunks_maxsize["lat"]))
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.ceil(np.divide(Lz, chunks_maxsize["dpt"]))
)
else:
n_chunks["dpt"] = 1
if axis == "time":
Lt = np.timedelta64(
pd.to_datetime(BOX[7]) - pd.to_datetime(BOX[6]), "D"
)
MaxLen = np.timedelta64(chunks_maxsize["time"], "D")
if Lt > MaxLen: # Max box size in time
n_chunks["time"] = int(np.ceil(np.divide(Lt, MaxLen)))
else:
n_chunks["time"] = 1
boxes = self._split_this_4Dbox(
BOX,
nx=n_chunks["lon"],
ny=n_chunks["lat"],
nz=n_chunks["dpt"],
nt=n_chunks["time"],
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_box3d(self, request, chunks, chunks_maxsize):
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.floor_divide(Lx, chunks_maxsize["lon"])
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.floor_divide(Ly, chunks_maxsize["lat"])
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.floor_divide(Lz, chunks_maxsize["dpt"])
)
else:
n_chunks["dpt"] = 1
# if axis == 'time':
# Lt = np.timedelta64(pd.to_datetime(BOX[5]) - pd.to_datetime(BOX[4]), 'D')
# MaxLen = np.timedelta64(chunks_maxsize['time'], 'D')
# if Lt > MaxLen: # Max box size in time
# n_chunks['time'] = int(np.floor_divide(Lt, MaxLen))
# else:
# n_chunks['time'] = 1
boxes = self._split_this_3Dbox(
BOX, nx=n_chunks["lon"], ny=n_chunks["lat"], nz=n_chunks["dpt"]
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_wmo(self, request, chunks, chunks_maxsize):
WMO = request["wmo"]
n_chunks = chunks
if n_chunks["wmo"] == "auto":
wmo_grps = self._split_list_bychunksize(WMO, max_size=chunks_maxsize["wmo"])
else:
n = np.min([n_chunks["wmo"], len(WMO)])
wmo_grps = self._split_list_bychunknb(WMO, n=n)
n_chunks["wmo"] = len(wmo_grps)
return {"chunks": sorted(n_chunks), "values": wmo_grps}
def fit_transform(self):
""" Chunk a fetcher request
Returns
-------
list
"""
self._results = self.this_chunker(self.request, self.chunks, self.chunksize)
# self.chunks = self._results['chunks']
return self._results["values"]
def format_oneline(s, max_width=65):
""" Return a string formatted for a line print """
if len(s) > max_width:
padding = " ... "
n = (max_width - len(padding)) // 2
q = (max_width - len(padding)) % 2
if q == 0:
return "".join([s[0:n], padding, s[-n:]])
else:
return "".join([s[0: n + 1], padding, s[-n:]])
else:
return s
def is_indexbox(box: list, errors="raise"):
""" Check if this array matches a 2d or 3d index box definition
box = [lon_min, lon_max, lat_min, lat_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
tests = {}
# Formats:
tests["index box must be a list"] = lambda b: isinstance(b, list)
tests["index box must be a list with 4 or 6 elements"] = lambda b: len(b) in [4, 6]
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
# Insert pressure bounds and use full box validator:
tmp_box = box.copy()
tmp_box.insert(4, 0.)
tmp_box.insert(5, 10000.)
return is_box(tmp_box, errors=errors)
def is_box(box: list, errors="raise"):
""" Check if this array matches a 3d or 4d data box definition
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
def is_dateconvertible(d):
try:
pd.to_datetime(d)
isit = True
except Exception:
isit = False
return isit
tests = {}
# print(box)
# Formats:
tests["box must be a list"] = lambda b: isinstance(b, list)
tests["box must be a list with 6 or 8 elements"] = lambda b: len(b) in [6, 8]
# Types:
tests["lon_min must be numeric"] = lambda b: (
isinstance(b[0], int) or isinstance(b[0], float)
)
tests["lon_max must be numeric"] = lambda b: (
isinstance(b[1], int) or isinstance(b[1], float)
)
tests["lat_min must be numeric"] = lambda b: (
isinstance(b[2], int) or isinstance(b[2], float)
)
tests["lat_max must be numeric"] = lambda b: (
isinstance(b[3], int) or isinstance(b[3], float)
)
tests["pres_min must be numeric"] = lambda b: (
isinstance(b[4], int) or isinstance(b[4], float)
)
tests["pres_max must be numeric"] = lambda b: (
isinstance(b[5], int) or isinstance(b[5], float)
)
if len(box) == 8:
tests[
"datetim_min must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-2], str) and is_dateconvertible(b[-2])
tests[
"datetim_max must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-1], str) and is_dateconvertible(b[-1])
# Ranges:
tests["lon_min must be in [-180;180] or [0;360]"] = (
lambda b: b[0] >= -180.0 and b[0] <= 360.0
)
tests["lon_max must be in [-180;180] or [0;360]"] = (
lambda b: b[1] >= -180.0 and b[1] <= 360.0
)
tests["lat_min must be in [-90;90]"] = lambda b: b[2] >= -90.0 and b[2] <= 90
tests["lat_max must be in [-90;90]"] = lambda b: b[3] >= -90.0 and b[3] <= 90.0
tests["pres_min must be in [0;10000]"] = lambda b: b[4] >= 0 and b[4] <= 10000
tests["pres_max must be in [0;10000]"] = lambda b: b[5] >= 0 and b[5] <= 10000
# Orders:
tests["lon_max must be larger than lon_min"] = lambda b: b[0] < b[1]
tests["lat_max must be larger than lat_min"] = lambda b: b[2] < b[3]
tests["pres_max must be larger than pres_min"] = lambda b: b[4] < b[5]
if len(box) == 8:
tests["datetim_max must come after datetim_min"] = lambda b: pd.to_datetime(
b[-2]
) < pd.to_datetime(b[-1])
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
return True
def is_list_of_strings(lst):
return isinstance(lst, list) and all(isinstance(elem, str) for elem in lst)
def is_list_of_dicts(lst):
return all(isinstance(x, dict) for x in lst)
def is_list_of_datasets(lst):
return all(isinstance(x, xr.Dataset) for x in lst)
def check_wmo(lst):
""" Check a WMO option and returned it as a list of integers
Parameters
----------
wmo: int
WMO must be an integer or an iterable with elements that can be casted as integers
errors: 'raise'
Returns
-------
list(int)
"""
is_wmo(lst, errors="raise")
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Then cast list elements as integers
return [abs(int(x)) for x in lst]
def is_wmo(lst, errors="raise"): # noqa: C901
""" Assess validity of a WMO option value
Parameters
----------
wmo: int, list(int), array(int)
WMO must be a single or a list of 5/7 digit positive numbers
errors: 'raise'
Possibly raises a ValueError exception, otherwise fails silently.
Returns
-------
bool
True if wmo is indeed a list of integers
"""
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Error message:
# msg = "WMO must be an integer or an iterable with elements that can be casted as integers"
msg = "WMO must be a single or a list of 5/7 digit positive numbers"
# Then try to cast list elements as integers, return True if ok
result = True
try:
for x in lst:
if not str(x).isdigit():
result = False
if (len(str(x)) != 5) and (len(str(x)) != 7):
result = False
if int(x) <= 0:
result = False
except Exception:
result = False
if errors == "raise":
raise ValueError(msg)
if not result and errors == "raise":
raise ValueError(msg)
else:
return result
# def docstring(value):
# """Replace one function docstring
#
# To be used as a decorator
# """
# def _doc(func):
# func.__doc__ = value
# return func
# return _doc
def warnUnless(ok, txt):
""" Decorator to raise warning unless condition is True
This function must be used as a decorator
Parameters
----------
ok: bool
Condition to raise the warning or not
txt: str
Text to display in the warning
"""
def inner(fct):
def wrapper(*args, **kwargs):
warnings.warn("%s %s" % (fct.__name__, txt))
return fct(*args, **kwargs)
return wrapper
if not ok:
return inner
else:
return lambda f: f
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
# Source: https://github.com/laurent-laporte-pro/stackoverflow-q2059482
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
|
source_worker.py
|
import time
import threading
import zmq
import os
import signal
import pickle
from Heron.communication.socket_for_serialization import Socket
from Heron import constants as ct
from Heron.communication.ssh_com import SSHCom
from Heron.gui.relic import HeronRelic
class SourceWorker:
def __init__(self, port, parameters_topic, initialisation_function, end_of_life_function, num_sending_topics,
relic_path, ssh_local_ip=' ', ssh_local_username=' ', ssh_local_password=' '):
self.parameters_topic = parameters_topic
self.data_port = port
self.pull_heartbeat_port = str(int(self.data_port) + 1)
self.initialisation_function = initialisation_function
self.end_of_life_function = end_of_life_function
self.num_sending_topics = int(num_sending_topics)
self.node_name = parameters_topic.split('##')[-2]
self.node_index = parameters_topic.split('##')[-1]
self.ssh_com = SSHCom(ssh_local_ip=ssh_local_ip, ssh_local_username=ssh_local_username,
ssh_local_password=ssh_local_password)
self.relic_path = relic_path
self.import_reliquery()
self.heron_relic = None
self.num_of_iters_to_update_relics_substate = None
self.time_of_pulse = time.perf_counter()
self.port_sub_parameters = ct.PARAMETERS_FORWARDER_PUBLISH_PORT
self.port_pub_proof_of_life = ct.PROOF_OF_LIFE_FORWARDER_SUBMIT_PORT
self.running_thread = True
self.loops_on = True
self.initialised = False
self.context = None
self.socket_push_data = None
self.socket_sub_parameters = None
self.stream_parameters = None
self.thread_parameters = None
self.parameters = None
self.socket_pull_heartbeat = None
self.stream_heartbeat = None
self.thread_heartbeat = None
self.socket_pub_proof_of_life = None
self.thread_proof_of_life = None
self.index = 0
def connect_socket(self):
"""
Sets up the sockets to do the communication with the source_com process through the forwarders
(for the link and the parameters).
:return: Nothing
"""
self.context = zmq.Context()
# Setup the socket that receives the parameters of the worker_exec function from the node
self.socket_sub_parameters = Socket(self.context, zmq.SUB)
self.socket_sub_parameters.setsockopt(zmq.LINGER, 0)
self.socket_sub_parameters.subscribe(self.parameters_topic)
self.ssh_com.connect_socket_to_local(self.socket_sub_parameters, r'tcp://127.0.0.1', self.port_sub_parameters)
self.socket_sub_parameters.subscribe(self.parameters_topic)
# Setup the socket that pushes the data to the com
self.socket_push_data = Socket(self.context, zmq.PUSH)
self.socket_push_data.setsockopt(zmq.LINGER, 0)
self.socket_push_data.set_hwm(1)
self.socket_push_data.bind(r"tcp://127.0.0.1:{}".format(self.data_port))
# Setup the socket that receives the heartbeat from the com
self.socket_pull_heartbeat = self.context.socket(zmq.PULL)
self.socket_pull_heartbeat.setsockopt(zmq.LINGER, 0)
self.ssh_com.connect_socket_to_local(self.socket_pull_heartbeat, r'tcp://127.0.0.1', self.pull_heartbeat_port)
# Setup the socket that publishes the fact that the worker_exec is up and running to the node com so that it
# can then update the parameters of the worker_exec.
self.socket_pub_proof_of_life = Socket(self.context, zmq.PUB)
self.socket_pub_proof_of_life.setsockopt(zmq.LINGER, 0)
self.ssh_com.connect_socket_to_local(self.socket_pub_proof_of_life, r'tcp://127.0.0.1',
self.port_pub_proof_of_life, skip_ssh=True)
def send_data_to_com(self, data):
self.socket_push_data.send_array(data, copy=False)
self.index += 1
def import_reliquery(self):
"""
This import is required because it takes a good few seconds to load the package and if the import is done
first time in the HeronRelic instance that delays the initialisation of the worker process which can be
a problem
:return: Nothing
"""
#
if self.relic_path != '_':
try:
import reliquery
import reliquery.storage
except ImportError:
pass
def relic_create_parameters_df(self, **parameters):
"""
Creates a new relic with the Parameters pandasdf in it or adds the Parameters pandasdf in the existing Node's
Relic.
:param parameters: The dictionary of the parameters. The keys of the dict will become the column names of the
pandasdf
:return: Nothing
"""
self._relic_create_df('Parameters', **parameters)
def relic_create_substate_df(self, **variables):
"""
Creates a new relic with the Substate pandasdf in it or adds the Substate pandasdf in the existing Node's Relic.
:param variables: The dictionary of the variables to save. The keys of the dict will become the column names of
the pandasdf
:return: Nothing
"""
self._relic_create_df('Substate', **variables)
def _relic_create_df(self, type, **variables):
"""
Base function to create either a Parameters or a Substate pandasdf in a new or the existing Node's Relic
:param type: Parameters or Substate
:param variables: The variables dictionary to be saved in the pandas. The keys of the dict will become the c
olumn names of the pandasdf
:return: Nothing
"""
if self.heron_relic is None:
self.heron_relic = HeronRelic(self.relic_path, self.node_name,
self.node_index, self.num_of_iters_to_update_relics_substate)
if self.heron_relic.operational:
self.heron_relic.create_the_pandasdf(type, **variables)
def relic_update_substate_df(self, **variables):
"""
Updates the Substate pandasdf of the Node's Relic
:param variables: The Substate's variables dict
:return: Nothing
"""
self.heron_relic.update_the_substate_pandasdf(self.index, **variables)
def update_parameters(self):
"""
This updates the self.parameters from the parameters send form the node (through the gui_com)
If the rlic system is up and running it also saves the new parameters into the Parameters df of the relic
:return: Nothing
"""
try:
topic = self.socket_sub_parameters.recv(flags=zmq.NOBLOCK)
parameters_in_bytes = self.socket_sub_parameters.recv(flags=zmq.NOBLOCK)
args = pickle.loads(parameters_in_bytes)
self.parameters = args
if not self.initialised and self.initialisation_function is not None:
self.initialised = self.initialisation_function(self)
if self.initialised and self.heron_relic is not None and self.heron_relic.operational:
self.heron_relic.update_the_parameters_pandasdf(parameters=self.parameters, worker_index=self.index)
except Exception as e:
pass
def parameters_loop(self):
"""
The loop that updates the arguments (self.parameters)
:return: Nothing
"""
while self.loops_on:
self.update_parameters()
time.sleep(0.2)
def start_parameters_thread(self):
"""
Start the thread that runs the infinite arguments_loop
:return: Nothing
"""
self.thread_parameters = threading.Thread(target=self.parameters_loop, daemon=True)
self.thread_parameters.start()
def heartbeat_loop(self):
"""
The loop that reads the heartbeat 'PULSE' from the source_com. If it takes too long to receive the new one
it kills the worker_exec process
:return: Nothing
"""
while self.loops_on:
if self.socket_pull_heartbeat.poll(timeout=(1000 * ct.HEARTBEAT_RATE * ct.HEARTBEATS_TO_DEATH)):
self.socket_pull_heartbeat.recv()
else:
pid = os.getpid()
self.end_of_life_function()
self.on_kill(pid)
os.kill(pid, signal.SIGTERM)
time.sleep(0.5)
time.sleep(int(ct.HEARTBEAT_RATE))
self.socket_pull_heartbeat.close()
def proof_of_life(self):
"""
When the worker_exec process starts it sends to the gui_com (through the proof_of_life_forwarder thread) a signal
that lets the node (in the gui_com process) that the worker_exec is running and ready to receive parameter updates.
:return: Nothing
"""
#print('---Sending POL {}'.format('topic = {}, msg = POL'.format(self.parameters_topic.encode('ascii'))))
for i in range(100):
try:
self.socket_pub_proof_of_life.send(self.parameters_topic.encode('ascii'), zmq.SNDMORE)
self.socket_pub_proof_of_life.send_string('POL')
except:
pass
time.sleep(0.1)
def start_heartbeat_thread(self):
"""
Start the heartbeat thread that run the infinite heartbeat_loop
:return: Nothing
"""
print('Started Worker {}##{} process with PID = {}'.format(self.node_name, self.node_index, os.getpid()))
self.thread_heartbeat = threading.Thread(target=self.heartbeat_loop, daemon=True)
self.thread_heartbeat.start()
self.thread_proof_of_life = threading.Thread(target=self.proof_of_life, daemon=True)
self.thread_proof_of_life.start()
def on_kill(self, pid):
print('Killing {} {} with pid {}'.format(self.node_name, self.node_index, pid))
if self.heron_relic is not None and self.heron_relic.substate_pandasdf_exists:
self.heron_relic.save_substate_at_death()
try:
self.loops_on = False
self.visualisation_on = False
self.socket_sub_parameters.close()
self.socket_push_data.close()
self.socket_pub_proof_of_life.close()
except Exception as e:
print('Trying to kill Source worker {} failed with error: {}'.format(self.node_name, e))
finally:
#self.context.term() # That causes an error
self.ssh_com.kill_tunneling_processes()
|
datamunging.py
|
import logging
import time
import threading
class DataMunging:
mongo = None
def __init__(self, mongo, replicator_queue):
self.mongo = mongo
self.logger = logging.getLogger(__name__)
self.replicator_queue = replicator_queue
self.lock = threading.Lock()
self.last_seqnum = 0
self.run_parser = False
def run(self, module_instance=None):
queue_thread = threading.Thread(target=self.check_queue)
queue_thread.daemon = True
queue_thread.start()
while True:
try:
# 一次性从队列中取出 100 条数据
queue = self.mongo.get_from_queue(100)
except Exception as e:
self.logger.error('Cannot get entries from replicator queue. Error: ' + str(e))
if queue.count() < 1: # 没有需要增量更新的数据
self.logger.debug('No entries in replicator queue')
time.sleep(1)
continue
# if not self.run_parser:
# self.logger.debug('No messages from replicator queue')
# continue
to_delete = list() # 处理的任务序列号加入该数组 后续删除
# db.comcn_actualcontroller.find({"id": {"$type": 16}}).limit(10)
# 在使用 csv 处理的过程中 除了数字 其他都变成了字符串 float 也变成了 float
for record in queue:
if module_instance is not None:
try:
# TODO(furuiyang) 对数据的处理 这里可以写成 csv 文件的处理方式
doc = module_instance.run(record, self.mongo)
except Exception as e:
self.logger.error('Error during parse data with module. Error: ' + str(e))
doc = record
key = None
self.logger.debug('Event: ' + doc['event_type'])
# 废弃这一步
# ---------------------对更新和删除事件获取主键----------------------------------------
# if doc['event_type'] in ['update', 'delete']:
# self.logger.debug('Event: ' + doc['event_type'])
# try:
# key = self.mongo.get_primary_key(doc['table'], doc['schema'])
# self.logger.debug(key)
# except Exception as e:
# self.logger.error('Cannot get primary key for table ' + doc['table'] +
# ' in schema ' + doc['schema'] + '. Error: ' + str(e))
# -------------------------插入事件--------------------------------------------------
if doc['event_type'] == 'insert':
try:
# TODO 对 doc['value'] 进行处理
self.mongo.insert(doc['values'], doc['schema'], doc['table'])
to_delete.append(str(doc['_id']))
# 刷新记录点
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot insert document into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
# 废弃获取 key 直接置为 None
elif doc['event_type'] == 'update':
if key is None:
# 以之前的整个 doc 作为 primary
primary_key = doc['values']['before']
else:
primary_key = dict()
for k in key['primary_key']:
primary_key[k] = doc['values']['after'][k]
try:
self.mongo.update(doc['values']['after'], doc['schema'], doc['table'], primary_key)
to_delete.append(doc['_id'])
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot update document ' + str(doc['_id']) +
' into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
elif doc['event_type'] == 'delete':
if key is not None:
primary_key = dict()
for k in key['primary_key']:
primary_key[k] = doc['values'][k]
else:
primary_key = None
try:
self.mongo.delete(doc=doc['values'], schema=doc['schema'], collection=doc['table'],
primary_key=primary_key)
to_delete.append(doc['_id'])
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot delete document ' + str(doc['_id']) +
' into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
# 删除已经处理过的任务序列号
self.logger.debug('Delete records: ' + str(to_delete))
for queue_id in to_delete:
import bson
queue_id = bson.ObjectId(queue_id)
try:
self.mongo.delete_from_queue({'_id': queue_id})
except Exception as e:
self.logger.error('Cannot delete document from queue Error: ' + str(e))
time.sleep(5)
def check_queue(self):
self.logger.info('Start QueueMonitor')
while True:
if not self.replicator_queue.empty():
try:
self.logger.debug('Try to read from replicator queue')
msg_queue = self.replicator_queue.get()
self.logger.debug('Read from replicator queue')
self.manage_replicator_msg(msg_queue)
self.logger.debug('Replicator message managed')
except Exception as e:
self.logger.error('Cannot read and manage replicator message. Error: ' + str(e))
time.sleep(.1)
def manage_replicator_msg(self, msg):
with self.lock:
self.logger.debug('Message from queue')
self.logger.debug(msg)
self.logger.debug('Last seqnum: ' + str(self.last_seqnum))
if msg['seqnum'] > self.last_seqnum:
self.logger.debug('new entries in queue')
self.run_parser = True
else:
self.logger.debug('NO new entries in queue')
self.run_parser = False
|
edge_betweenness_filter.py
|
from libary.result import Result
import networkx
import threading
class EdgeBetweennessFilter:
def __init__(self):
pass
def get_name(self):
return 'Filter:EdgeBetweeness'
def apply(self, input: Result, parameters: dict, old_results: list):
input_undirected_knn_graph = input.get_undirected_knn_graph()
parameter_p = float(parameters['parameter_p'])
iterations = int(parameters['iterations'])
########################################################
deletion_set_before_start = None
start_iteration = None
for old_result in old_results:
if old_result.get_decoded_parameters()['parameter_p'] == parameter_p: # p has to be the same
old_intermediate_result = old_result.get_intermediate_result()
key = max([x for x in old_intermediate_result.keys() if x < iterations])
if start_iteration is None or key > start_iteration:
start_iteration = key
deletion_set_before_start = old_intermediate_result[key]
new_undirected_graph = input_undirected_knn_graph.copy()
if deletion_set_before_start is None:
start_iteration = 1
else:
for a, b in deletion_set_before_start:
new_undirected_graph.remove_edge(a, b)
######################################################### new application
intermediate_result = dict()
total_deleted_edges = set()
for i in range(start_iteration, iterations+1):
connected_components = networkx.algorithms.connected_components(new_undirected_graph)
parsed_component = list()
for x in connected_components:
parsed_component.append([a for a in x])
worker_list = list()
for component in parsed_component:
if len(component) == 1:
continue
worker = Worker(nodes=component, parameter_p=parameter_p, input_graph=new_undirected_graph)
worker_list.append(worker)
worker.run()
deleted_edges = set()
for worker in worker_list:
worker.join()
for a, b in worker.edges_to_delete:
try:
new_undirected_graph.remove_edge(a, b)
deleted_edges.add((int(a), int(b)))
except:
try:
new_undirected_graph.remove_edge(b, a)
deleted_edges.add((int(b), int(a)))
except:
continue
intermediate_result[i] = list(deleted_edges)
total_deleted_edges = total_deleted_edges.union(deleted_edges)
result = Result(parameter=parameters,
intermediate_result=intermediate_result,
undirected_knn_graph=new_undirected_graph,
deletion_set=total_deleted_edges,
data=input.get_resulting_data(),
knn_distances=input.get_resulting_knn_distances(),
directed_knn_graph=input.get_directed_knn_graph())
return result
def get_controls(self) -> list:
controls = list()
controls.append({'title': 'iterations', 'id': 'iterations', 'type': 'int'})
controls.append({'title': 'p', 'id': 'parameter_p', 'type': 'float'})
return controls
def get_gui_name(self) -> str:
return 'Edge-Betweenness Filter'
def get_tooltip(self):
return 'This filter is a good choice to identify bridges between clusters. It works on the basis of the edge-betweenness concept of the Girvan-Newman algorithm.'
def get_default_parameter(self) -> dict:
return {'iterations': 1, 'parameter_p': 0.0075}
class Worker:
def __init__(self, nodes, parameter_p: float, input_graph: networkx.Graph):
self.parameter_p = parameter_p
self.edges_to_delete = set()
self.input_graph = networkx.Graph(input_graph.subgraph(nodes)) # only graph of the connected component
self.__thread = threading.Thread(target=self.__work)
def run(self):
self.__thread.start()
def join(self):
self.__thread.join()
def __work(self):
edge_betweenness = networkx.edge_betweenness_centrality(self.input_graph, weight='weight')
edge_betweenness = [x for x in edge_betweenness.items()]
edge_betweenness = sorted(edge_betweenness, key=lambda x: x[1], reverse=True)
for k, v in edge_betweenness[:max(int(self.parameter_p * self.input_graph.number_of_edges()), 1)]:
self.edges_to_delete.add((k[0], k[1]))
self.input_graph.remove_edge(k[0], k[1])
|
simple_http_server_batch.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
import json
import sys
import datetime
import plyvel
import requests
import threading
rec = False
zero_t = "0000-00-00 00:00:00.000000"
v_ph = "++++-++-++ ++:++:++.++++++"
t_ph = "---------- --:--:--.------"
class RequestHandler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def do_GET(self):
print(threading.currentThread().getName())
parsed_path = urlparse(self.path)
# handle read request
if parsed_path.path == "/kv/":
# the database is still under recover
if not rec:
message = json.dumps({"is_key_in": "NA"})
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
# print('key is = ', parsed_path.query.split("=")[-1])
print("receive read request")
k = parsed_path.query.split("=")[-1]
v = db.get((v_ph + "[" + k).encode())
s = {"is_key_in": "yes", "value": v.decode()} if v else {"is_key_in": "no", "value": "["}
message = json.dumps(s)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
# data recover from failure
if parsed_path.path == "/rec/":
print("receive recover request")
if not rec:
message = json.dumps({})
self.send_response(400)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
t = parsed_path.query.split("=")[-1]
dic = {}
for k, v in db.iterator(start=t.encode()):
dic[k.decode()] = v.decode()
message = json.dumps(dic)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
def do_POST(self):
print(threading.currentThread().getName())
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
data = json.loads(post_body.decode())
parsed_path = urlparse(self.path)
# direct write request
if parsed_path.path == "/kv/":
# the database is still under recover
if not rec:
message = json.dumps({"is_key_in": "NA"})
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
print("receive write request")
t = str(datetime.datetime.now())
k, v = data['k'], data['v']
old_t = db.get((t_ph + "[" + k).encode(), b"")
# record key-value
old_v = db.get((v_ph + "[" + k).encode())
with db.write_batch() as wb:
# delete old timestamp
db.delete((old_t.decode() + "[" + k).encode())
# add new timestamp with this key
db.put((t + "[" + k).encode(), v.encode())
# update timestamp
db.put((t_ph + "[" + k).encode(), t.encode())
# update value
db.put((v_ph + "[" + k).encode(), v.encode())
# launch http request to sync data for other servers
# even if a server crashes, we will still try to sync with it
for port in server_ports:
if port != server_port:
try:
r = requests.post(url = 'http://%s:%s/sync/' % (server_ip, port), json = {"k": k, "v": v, "t": t}, timeout = 3)
except (requests.ConnectionError, requests.Timeout):
print("Sync Timeout: process %s:%s dead!" % (server_ip, port))
s = {"is_key_in": "yes", "old_value": old_v.decode()} if old_v else {"is_key_in": "no", "old_value": "["}
message = json.dumps(s)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
# data sync during run-time
if parsed_path.path == "/sync/":
# print("post key is = ", data['k'])
# print("post value is = ", data['v'])
# print("post timestamp is = ", data['t'])
print("receive sync request")
k, v, t = data['k'], data['v'], data['t']
old_t = db.get((t_ph + "[" + k).encode(), b"")
with db.write_batch() as wb:
# delete old timestamp
db.delete((old_t.decode() + "[" + k).encode())
# add new timestamp
db.put((t + "[" + k).encode(), v.encode())
# update timestamp
db.put((t_ph + "[" + k).encode(), t.encode())
# update value
db.put((v_ph + "[" + k).encode(), v.encode())
message = json.dumps({})
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
def recover_db():
global rec
print("start db recover process...")
# start the recover process
# get the latest timestamp in db
try:
latest_t = next(db.iterator(reverse=True))[0].split("[")[0]
except:
latest_t = "0000-00-00 00:00:00.000000"
for port in server_ports:
if port != server_port:
try:
r = requests.get(url = 'http://%s:%s/rec/?t=%s' % (server_ip, port, latest_t), timeout = 3)
except (requests.ConnectionError, requests.Timeout):
print("Sync Timeout: process %s:%s dead!" % (server_ip, port))
else:
if r.status_code == 200:
# write to db
for tk, v in r.json().items():
t, k = tk.split("[")[:2]
old_t = db.get((t_ph + "[" + k).encode(), b"")
if old_t.decode() < t:
with db.write_batch() as wb:
# delete old timestamp
wb.delete((old_t.decode() + "[" + k).encode())
# add new timestamp
wb.put((t + "[" + k).encode(), v.encode())
# update timestamp
wb.put((t_ph + "[" + k).encode(), t.encode())
# update value
wb.put((v_ph + "[" + k).encode(), v.encode())
# done with the recovery
break
else:
print("Wrong Status Code: process %s:%s not ready!" % (server_ip, port))
rec = True
print("finish db recover process")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
server_ip, server_ports, server_index = sys.argv[1], sys.argv[2].split(','), int(sys.argv[3])
server_port = server_ports[server_index]
# reconnect to the database
# record 1: v_ph+key->value
# record 2: timestampe+key->value
# record 3: t_ph+key->timestamp
db = plyvel.DB('/tmp/cs739db-%s/' % server_port, create_if_missing=True)
rec = False
# lauch a thread for data restore
threading.Thread(target=recover_db).start()
server = ThreadedHTTPServer((server_ip, int(server_port)), RequestHandler)
print('Starting server at http://%s:%s' % (server_ip, server_port))
server.serve_forever()
|
stim_server_client.py
|
# Author: Mainak Jas <mainak@neuro.hut.fi>
# License: BSD (3-clause)
import queue
import time
import socket
import socketserver
import threading
import numpy as np
from mne.utils import logger, verbose, fill_doc
class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""Create a threaded TCP server.
Parameters
----------
server_address : str
Address on which server is listening
request_handler_class : subclass of BaseRequestHandler
_TriggerHandler which defines the handle method
stim_server : instance of StimServer
object of StimServer class
"""
def __init__(self, server_address, request_handler_class,
stim_server): # noqa: D102
# Basically, this server is the same as a normal TCPServer class
# except that it has an additional attribute stim_server
# Create the server and bind it to the desired server address
socketserver.TCPServer.__init__(self, server_address,
request_handler_class,
False)
self.stim_server = stim_server
class _TriggerHandler(socketserver.BaseRequestHandler):
"""Request handler on the server side."""
def handle(self):
"""Handle requests on the server side."""
self.request.settimeout(None)
while self.server.stim_server._running:
data = self.request.recv(1024) # clip input at 1Kb
data = data.decode() # need to turn it into a string (Py3k)
if data == 'add client':
# Add stim_server._client
client_id = self.server.stim_server \
._add_client(self.client_address[0],
self)
# Instantiate queue for communication between threads
# Note: new queue for each handler
if not hasattr(self, '_tx_queue'):
self._tx_queue = queue.Queue()
self.request.sendall("Client added".encode('utf-8'))
# Mark the client as running
for client in self.server.stim_server._clients:
if client['id'] == client_id:
client['running'] = True
elif data == 'get trigger':
# Pop triggers and send them
if (self._tx_queue.qsize() > 0 and
self.server.stim_server, '_clients'):
trigger = self._tx_queue.get()
self.request.sendall(str(trigger).encode('utf-8'))
else:
self.request.sendall("Empty".encode('utf-8'))
class StimServer(object):
"""Stimulation Server.
Server to communicate with StimClient(s).
Parameters
----------
port : int
The port to which the stimulation server must bind to.
n_clients : int
The number of clients which will connect to the server.
See Also
--------
StimClient
"""
def __init__(self, port=4218, n_clients=1): # noqa: D102
# Start a threaded TCP server, binding to localhost on specified port
self._data = _ThreadedTCPServer(('', port),
_TriggerHandler, self)
self.n_clients = n_clients
def __enter__(self): # noqa: D105
# This is done to avoid "[Errno 98] Address already in use"
self._data.allow_reuse_address = True
self._data.server_bind()
self._data.server_activate()
# Start a thread for the server
self._thread = threading.Thread(target=self._data.serve_forever)
# Ctrl-C will cleanly kill all spawned threads
# Once the main thread exits, other threads will exit
self._thread.daemon = True
self._thread.start()
self._running = False
self._clients = list()
return self
def __exit__(self, type, value, traceback): # noqa: D105
self.shutdown()
@verbose
def start(self, timeout=np.inf, verbose=None):
"""Start the server.
Parameters
----------
timeout : float
Maximum time to wait for clients to be added.
%(verbose)s
"""
# Start server
if not self._running:
logger.info('RtServer: Start')
self._running = True
start_time = time.time() # init delay counter.
# wait till n_clients are added
while (len(self._clients) < self.n_clients):
current_time = time.time()
if (current_time > start_time + timeout):
raise StopIteration
time.sleep(0.1)
@verbose
def _add_client(self, ip, sock, verbose=None):
"""Add client.
Parameters
----------
ip : str
IP address of the client.
sock : instance of socket.socket
The client socket.
%(verbose)s
"""
logger.info("Adding client with ip = %s" % ip)
client = dict(ip=ip, id=len(self._clients), running=False, socket=sock)
self._clients.append(client)
return client['id']
@verbose
def shutdown(self, verbose=None):
"""Shutdown the client and server.
Parameters
----------
%(verbose)s
"""
logger.info("Shutting down ...")
# stop running all the clients
if hasattr(self, '_clients'):
for client in self._clients:
client['running'] = False
self._running = False
self._data.shutdown()
self._data.server_close()
self._data.socket.close()
@verbose
def add_trigger(self, trigger, verbose=None):
"""Add a trigger.
Parameters
----------
trigger : int
The trigger to be added to the queue for sending to StimClient.
%(verbose_meth)s
See Also
--------
StimClient.get_trigger
"""
for client in self._clients:
client_id = client['id']
logger.info("Sending trigger %d to client %d"
% (trigger, client_id))
client['socket']._tx_queue.put(trigger)
@fill_doc
class StimClient(object):
"""Stimulation Client.
Client to communicate with StimServer
Parameters
----------
host : str
Hostname (or IP address) of the host where StimServer is running.
port : int
Port to use for the connection.
timeout : float
Communication timeout in seconds.
%(verbose)s
See Also
--------
StimServer
"""
@verbose
def __init__(self, host, port=4218, timeout=5.0,
verbose=None): # noqa: D102
try:
logger.info("Setting up client socket")
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
self._sock.connect((host, port))
logger.info("Establishing connection with server")
data = "add client".encode('utf-8')
n_sent = self._sock.send(data)
if n_sent != len(data):
raise RuntimeError('Could not communicate with server')
resp = self._sock.recv(1024).decode() # turn bytes into str (Py3k)
if resp == 'Client added':
logger.info("Connection established")
else:
raise RuntimeError('Client not added')
except Exception:
raise RuntimeError('Setting up acquisition <-> stimulation '
'computer connection (host: %s '
'port: %d) failed. Make sure StimServer '
'is running.' % (host, port))
def close(self):
"""Close the socket object."""
self._sock.close()
@verbose
def get_trigger(self, timeout=5.0, verbose=None):
"""Get triggers from StimServer.
Parameters
----------
timeout : float
maximum time to wait for a valid trigger from the server
%(verbose_meth)s
See Also
--------
StimServer.add_trigger
"""
start_time = time.time() # init delay counter. Will stop iterations
while True:
try:
current_time = time.time()
# Raise timeout error
if current_time > (start_time + timeout):
logger.info("received nothing")
return None
self._sock.send("get trigger".encode('utf-8'))
trigger = self._sock.recv(1024)
if trigger != 'Empty':
logger.info("received trigger %s" % str(trigger))
return int(trigger)
except RuntimeError as err:
logger.info('Cannot receive triggers: %s' % (err))
|
pyto_ui.py
|
"""
UI for scripts
The ``pyto_ui`` module contains classes for building and presenting a native UI, in app or in the Today Widget.
This library's API is very similar to UIKit.
.. warning::
This library requires iOS / iPadOS 13.
This library may have a lot of similarities with ``UIKit``, but subclassing isn't supported very well. Instead of overriding methods, you will often need to set properties to a function. For properties, setters are what makes the passed value take effect, so instead of override the getter, you should just set properties. If you really want to subclass a :class:`View`, you can set properties from the initializer.
(Many docstrings are quoted from the Apple Documentation)
"""
from __future__ import annotations
from UIKit import UIFont as __UIFont__, UIImage as UIImage
from typing import List, Callable, Tuple
from pyto import __Class__, ConsoleViewController, PyAlert as __PyAlert__
from time import sleep
from io import BytesIO
from threading import Thread
import os
import base64
import io
import threading
import _values
import ui_constants
try:
from rubicon.objc import ObjCClass, CGFloat
except ValueError:
def ObjCClass(class_name):
return None
try:
from PIL import Image
except ImportError:
pass
if "widget" not in os.environ:
from urllib.request import urlopen
class __v__:
def __init__(self, string):
self.s = string
def __eq__(self, other):
return other == self.s
def __repr__(self):
return self.s
#############################
# MARK: - Objective-C Classes
#############################
__PyView__ = __Class__("PyView")
__PyControl__ = __Class__("PyControl")
__PySlider__ = __Class__("PySlider")
__PySegmentedControl__ = __Class__("PySegmentedControl")
__PySwitch__ = __Class__("PySwitch")
__PyButton__ = __Class__("PyButton")
__PyLabel__ = __Class__("PyLabel")
__UIImageView__ = __Class__("PyImageView")
__PyTableView__ = __Class__("PyTableView")
__PyTableViewCell__ = __Class__("PyTableViewCell")
__PyTableViewSection__ = __Class__("PyTableViewSection")
__PyTextView__ = __Class__("PyTextView")
__PyTextField__ = __Class__("PyTextField")
__PyWebView__ = __Class__("PyWebView")
__PyGestureRecognizer__ = __Class__("PyGestureRecognizer")
__PyColor__ = __Class__("PyColor")
__PyButtonItem__ = __Class__("PyButtonItem")
__PyTextInputTraitsConstants__ = __Class__("PyTextInputTraitsConstants")
try:
__NSData__ = ObjCClass("NSData")
except NameError:
pass
###################
# MARK: - Constants
###################
# MARK: - Gesture Recognizer Type
GESTURE_TYPE = ui_constants.GESTURE_TYPE
GESTURE_TYPE_LONG_PRESS = ui_constants.GESTURE_TYPE_LONG_PRESS
"""
A long press gesture.
"""
GESTURE_TYPE_PAN = ui_constants.GESTURE_TYPE_PAN
"""
A dragging gesture.
"""
GESTURE_TYPE_TAP = ui_constants.GESTURE_TYPE_TAP
"""
A tap gesture.
"""
# MARK: - Keyboard Appearance
KEYBOARD_APPEARANCE = ui_constants.KEYBOARD_APPEARANCE
KEYBOARD_APPEARANCE_DEFAULT = ui_constants.KEYBOARD_APPEARANCE_DEFAULT
"""
Specifies the default keyboard appearance for the current input method.
"""
KEYBOARD_APPEARANCE_LIGHT = ui_constants.KEYBOARD_APPEARANCE_LIGHT
"""
Specifies a keyboard appearance suitable for a light UI look.
"""
KEYBOARD_APPEARANCE_DARK = ui_constants.KEYBOARD_APPEARANCE_DARK
"""
Specifies a keyboard appearance suitable for a dark UI look.
"""
# MARK: - Keyboard Type
KEYBOARD_TYPE = ui_constants.KEYBOARD_TYPE
KEYBOARD_TYPE_DEFAULT = ui_constants.KEYBOARD_TYPE_DEFAULT
"""
Specifies the default keyboard for the current input method.
"""
KEYBOARD_TYPE_ASCII_CAPABLE = ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE
"""
Specifies a keyboard that displays standard ASCII characters.
"""
KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD = (
ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD
)
"""
Specifies a number pad that outputs only ASCII digits.
"""
KEYBOARD_TYPE_DECIMAL_PAD = ui_constants.KEYBOARD_TYPE_DECIMAL_PAD
"""
Specifies a keyboard with numbers and a decimal point.
"""
KEYBOARD_TYPE_EMAIL_ADDRESS = ui_constants.KEYBOARD_TYPE_EMAIL_ADDRESS
"""
Specifies a keyboard optimized for entering email addresses. This keyboard type prominently features the at (“@”), period (“.”) and space characters.
"""
KEYBOARD_TYPE_NAME_PHONE_PAD = ui_constants.KEYBOARD_TYPE_NAME_PHONE_PAD
"""
Specifies a keypad designed for entering a person’s name or phone number. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBER_PAD = ui_constants.KEYBOARD_TYPE_NUMBER_PAD
"""
Specifies a numeric keypad designed for PIN entry. This keyboard type prominently features the numbers 0 through 9. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION = (
ui_constants.KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION
)
"""
Specifies the numbers and punctuation keyboard.
"""
KEYBOARD_TYPE_PHONE_PAD = ui_constants.KEYBOARD_TYPE_PHONE_PAD
"""
Specifies a keypad designed for entering telephone numbers. This keyboard type prominently features the numbers 0 through 9 and the “*” and “#” characters. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_TWITTER = ui_constants.KEYBOARD_TYPE_TWITTER
"""
Specifies a keyboard optimized for Twitter text entry, with easy access to the at (“@”) and hash (“#”) characters.
"""
KEYBOARD_TYPE_URL = ui_constants.KEYBOARD_TYPE_URL
"""
Specifies a keyboard optimized for URL entry. This keyboard type prominently features the period (“.”) and slash (“/”) characters and the “.com” string.
"""
KEYBOARD_TYPE_WEB_SEARCH = ui_constants.KEYBOARD_TYPE_WEB_SEARCH
"""
Specifies a keyboard optimized for web search terms and URL entry. This keyboard type prominently features the space and period (“.”) characters.
"""
# MARK: - Return Key Type
RETURN_KEY_TYPE = ui_constants.RETURN_KEY_TYPE
RETURN_KEY_TYPE_DEFAULT = ui_constants.RETURN_KEY_TYPE_DEFAULT
"""
Specifies that the visible title of the Return key is “return”.
"""
RETURN_KEY_TYPE_CONTINUE = ui_constants.RETURN_KEY_TYPE_CONTINUE
"""
Specifies that the visible title of the Return key is “Continue”.
"""
RETURN_KEY_TYPE_DONE = ui_constants.RETURN_KEY_TYPE_DONE
"""
Specifies that the visible title of the Return key is “Done”.
"""
RETURN_KEY_TYPE_EMERGENCY_CALL = ui_constants.RETURN_KEY_TYPE_EMERGENCY_CALL
"""
Specifies that the visible title of the Return key is “Emergency Call”.
"""
RETURN_KEY_TYPE_GO = ui_constants.RETURN_KEY_TYPE_GO
"""
Specifies that the visible title of the Return key is “Go”.
"""
RETURN_KEY_TYPE_GOOGLE = ui_constants.RETURN_KEY_TYPE_GOOGLE
"""
Specifies that the visible title of the Return key is “Google”.
"""
RETURN_KEY_TYPE_JOIN = ui_constants.RETURN_KEY_TYPE_JOIN
"""
Specifies that the visible title of the Return key is “Join”.
"""
RETURN_KEY_TYPE_NEXT = ui_constants.RETURN_KEY_TYPE_NEXT
"""
Specifies that the visible title of the Return key is “Next”.
"""
RETURN_KEY_TYPE_ROUTE = ui_constants.RETURN_KEY_TYPE_ROUTE
"""
Specifies that the visible title of the Return key is “Route”.
"""
RETURN_KEY_TYPE_SEARCH = ui_constants.RETURN_KEY_TYPE_SEARCH
"""
Specifies that the visible title of the Return key is “Search”.
"""
RETURN_KEY_TYPE_SEND = ui_constants.RETURN_KEY_TYPE_SEND
"""
Specifies that the visible title of the Return key is “Send”.
"""
RETURN_KEY_TYPE_YAHOO = ui_constants.RETURN_KEY_TYPE_YAHOO
"""
Specifies that the visible title of the Return key is “Yahoo”.
"""
# MARK: - Autocapitalization Type
AUTO_CAPITALIZE = ui_constants.AUTO_CAPITALIZE
AUTO_CAPITALIZE_NONE = ui_constants.AUTO_CAPITALIZE_NONE
"""
Specifies that there is no automatic text capitalization.
"""
AUTO_CAPITALIZE_ALL = ui_constants.AUTO_CAPITALIZE_ALL
"""
Specifies automatic capitalization of all characters, such as for entry of two-character state abbreviations for the United States.
"""
AUTO_CAPITALIZE_SENTENCES = ui_constants.AUTO_CAPITALIZE_SENTENCES
"""
Specifies automatic capitalization of the first letter of each sentence.
"""
AUTO_CAPITALIZE_WORDS = ui_constants.AUTO_CAPITALIZE_WORDS
"""
Specifies automatic capitalization of the first letter of each word.
"""
# MARK: - Font Text Style
FONT_TEXT_STYLE = ui_constants.FONT_TEXT_STYLE
FONT_TEXT_STYLE_BODY = ui_constants.FONT_TEXT_STYLE_BODY
"""
The font used for body text.
"""
FONT_TEXT_STYLE_CALLOUT = ui_constants.FONT_TEXT_STYLE_CALLOUT
"""
The font used for callouts.
"""
FONT_TEXT_STYLE_CAPTION_1 = ui_constants.FONT_TEXT_STYLE_CAPTION_1
"""
The font used for standard captions.
"""
FONT_TEXT_STYLE_CAPTION_2 = ui_constants.FONT_TEXT_STYLE_CAPTION_2
"""
The font used for alternate captions.
"""
FONT_TEXT_STYLE_FOOTNOTE = ui_constants.FONT_TEXT_STYLE_FOOTNOTE
"""
The font used in footnotes.
"""
FONT_TEXT_STYLE_HEADLINE = ui_constants.FONT_TEXT_STYLE_HEADLINE
"""
The font used for headings.
"""
FONT_TEXT_STYLE_SUBHEADLINE = ui_constants.FONT_TEXT_STYLE_SUBHEADLINE
"""
The font used for subheadings.
"""
FONT_TEXT_STYLE_LARGE_TITLE = ui_constants.FONT_TEXT_STYLE_LARGE_TITLE
"""
The font style for large titles.
"""
FONT_TEXT_STYLE_TITLE_1 = ui_constants.FONT_TEXT_STYLE_TITLE_1
"""
The font used for first level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_2 = ui_constants.FONT_TEXT_STYLE_TITLE_2
"""
The font used for second level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_3 = ui_constants.FONT_TEXT_STYLE_TITLE_3
"""
The font used for third level hierarchical headings.
"""
# MARK: - Font Size
FONT_SIZE = ui_constants.FONT_SIZE
FONT_LABEL_SIZE = ui_constants.FONT_LABEL_SIZE
"""
Returns the standard font size used for labels.
"""
FONT_BUTTON_SIZE = ui_constants.FONT_BUTTON_SIZE
"""
Returns the standard font size used for buttons.
"""
FONT_SMALL_SYSTEM_SIZE = ui_constants.FONT_SMALL_SYSTEM_SIZE
"""
Returns the size of the standard small system font.
"""
FONT_SYSTEM_SIZE = ui_constants.FONT_SYSTEM_SIZE
"""
Returns the size of the standard system font.
"""
# MARK: - Presentation Mode
PRESENTATION_MODE = ui_constants.PRESENTATION_MODE
PRESENTATION_MODE_SHEET = ui_constants.PRESENTATION_MODE_SHEET
"""
A presentation style that displays the content centered in the screen.
"""
PRESENTATION_MODE_FULLSCREEN = ui_constants.PRESENTATION_MODE_FULLSCREEN
"""
A presentation style in which the presented view covers the screen.
"""
PRESENTATION_MODE_WIDGET = ui_constants.PRESENTATION_MODE_WIDGET
"""
A presentation mode style which simulates a Today Widget. Should be used in app to preview how a widget will look.
"""
# MARK: - Appearance
APPEARANCE = ui_constants.APPEARANCE
APPEARANCE_UNSPECIFIED = ui_constants.APPEARANCE_UNSPECIFIED
"""
An unspecified interface style.
"""
APPEARANCE_LIGHT = ui_constants.APPEARANCE_LIGHT
"""
The light interface style.
"""
APPEARANCE_DARK = ui_constants.APPEARANCE_DARK
"""
The dark interface style.
"""
# MARK: - Auto Resizing
AUTO_RESIZING = ui_constants.AUTO_RESIZING
FLEXIBLE_WIDTH = ui_constants.FLEXIBLE_WIDTH
"""
Resizing performed by expanding or shrinking a view’s width.
"""
FLEXIBLE_HEIGHT = ui_constants.FLEXIBLE_HEIGHT
"""
Resizing performed by expanding or shrinking a view's height.
"""
FLEXIBLE_TOP_MARGIN = ui_constants.FLEXIBLE_TOP_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the top margin.
"""
FLEXIBLE_BOTTOM_MARGIN = ui_constants.FLEXIBLE_BOTTOM_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the bottom margin.
"""
FLEXIBLE_LEFT_MARGIN = ui_constants.FLEXIBLE_LEFT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the left margin.
"""
FLEXIBLE_RIGHT_MARGIN = ui_constants.FLEXIBLE_RIGHT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the right margin.
"""
# MARK: - Content Mode
CONTENT_MODE = ui_constants.CONTENT_MODE
CONTENT_MODE_SCALE_TO_FILL = ui_constants.CONTENT_MODE_SCALE_TO_FILL
"""
The option to scale the content to fit the size of itself by changing the aspect ratio of the content if necessary.
"""
CONTENT_MODE_SCALE_ASPECT_FIT = ui_constants.CONTENT_MODE_SCALE_ASPECT_FIT
"""
The option to scale the content to fit the size of the view by maintaining the aspect ratio. Any remaining area of the view’s bounds is transparent.
"""
CONTENT_MODE_SCALE_ASPECT_FILL = ui_constants.CONTENT_MODE_SCALE_ASPECT_FILL
"""
The option to scale the content to fill the size of the view. Some portion of the content may be clipped to fill the view’s bounds.
"""
CONTENT_MODE_REDRAW = ui_constants.CONTENT_MODE_REDRAW
"""
The option to redisplay the view when the bounds change by invoking the ``setNeedsDisplay()`` method.
"""
CONTENT_MODE_CENTER = ui_constants.CONTENT_MODE_CENTER
"""
The option to center the content in the view’s bounds, keeping the proportions the same.
"""
CONTENT_MODE_TOP = ui_constants.CONTENT_MODE_TOP
"""
The option to center the content aligned at the top in the view’s bounds.
"""
CONTENT_MODE_BOTTOM = ui_constants.CONTENT_MODE_BOTTOM
"""
The option to center the content aligned at the bottom in the view’s bounds.
"""
CONTENT_MODE_LEFT = ui_constants.CONTENT_MODE_LEFT
"""
The option to align the content on the left of the view.
"""
CONTENT_MODE_RIGHT = ui_constants.CONTENT_MODE_RIGHT
"""
The option to align the content on the right of the view.
"""
CONTENT_MODE_TOP_LEFT = ui_constants.CONTENT_MODE_TOP_LEFT
"""
The option to align the content in the top-left corner of the view.
"""
CONTENT_MODE_TOP_RIGHT = ui_constants.CONTENT_MODE_TOP_RIGHT
"""
The option to align the content in the top-right corner of the view.
"""
CONTENT_MODE_BOTTOM_LEFT = ui_constants.CONTENT_MODE_BOTTOM_LEFT
"""
The option to align the content in the bottom-left corner of the view.
"""
CONTENT_MODE_BOTTOM_RIGHT = ui_constants.CONTENT_MODE_BOTTOM_RIGHT
"""
The option to align the content in the bottom-right corner of the view.
"""
# MARK: - Horizontal Alignment
HORZONTAL_ALIGNMENT = ui_constants.HORZONTAL_ALIGNMENT
HORZONTAL_ALIGNMENT_CENTER = ui_constants.HORZONTAL_ALIGNMENT_CENTER
"""
Aligns the content horizontally in the center of the control.
"""
HORZONTAL_ALIGNMENT_FILL = ui_constants.HORZONTAL_ALIGNMENT_FILL
"""
Aligns the content horizontally to fill the content rectangles; text may wrap and images may be stretched.
"""
HORZONTAL_ALIGNMENT_LEADING = ui_constants.HORZONTAL_ALIGNMENT_LEADING
"""
Aligns the content horizontally from the leading edge of the control.
"""
HORZONTAL_ALIGNMENT_LEFT = ui_constants.HORZONTAL_ALIGNMENT_LEFT
"""
Aligns the content horizontally from the left of the control (the default).
"""
HORZONTAL_ALIGNMENT_RIGHT = ui_constants.HORZONTAL_ALIGNMENT_RIGHT
"""
Aligns the content horizontally from the right of the control.
"""
HORZONTAL_ALIGNMENT_TRAILING = ui_constants.HORZONTAL_ALIGNMENT_TRAILING
"""
Aligns the content horizontally from the trailing edge of the control.
"""
# MARK: - Vertical Alignment
VERTICAL_ALIGNMENT = ui_constants.VERTICAL_ALIGNMENT
VERTICAL_ALIGNMENT_BOTTOM = ui_constants.VERTICAL_ALIGNMENT_BOTTOM
"""
Aligns the content vertically at the bottom in the control.
"""
VERTICAL_ALIGNMENT_CENTER = ui_constants.VERTICAL_ALIGNMENT_CENTER
"""
Aligns the content vertically in the center of the control.
"""
VERTICAL_ALIGNMENT_FILL = ui_constants.VERTICAL_ALIGNMENT_FILL
"""
Aligns the content vertically to fill the content rectangle; images may be stretched.
"""
VERTICAL_ALIGNMENT_TOP = ui_constants.VERTICAL_ALIGNMENT_TOP
"""
Aligns the content vertically at the top in the control (the default).
"""
# MARK: - Button Type
BUTTON_TYPE = ui_constants.BUTTON_TYPE
BUTTON_TYPE_SYSTEM = ui_constants.BUTTON_TYPE_SYSTEM
"""
A system style button, such as those shown in navigation bars and toolbars.
"""
BUTTON_TYPE_CONTACT_ADD = ui_constants.BUTTON_TYPE_CONTACT_ADD
"""
A contact add button.
"""
BUTTON_TYPE_CUSTOM = ui_constants.BUTTON_TYPE_CUSTOM
"""
No button style.
"""
BUTTON_TYPE_DETAIL_DISCLOSURE = ui_constants.BUTTON_TYPE_DETAIL_DISCLOSURE
"""
A detail disclosure button.
"""
BUTTON_TYPE_INFO_DARK = ui_constants.BUTTON_TYPE_INFO_DARK
"""
An information button that has a dark background.
"""
BUTTON_TYPE_INFO_LIGHT = ui_constants.BUTTON_TYPE_INFO_LIGHT
"""
An information button that has a light background.
"""
# MARK: - Text Alignment
TEXT_ALIGNMENT = ui_constants.TEXT_ALIGNMENT
TEXT_ALIGNMENT_LEFT = ui_constants.TEXT_ALIGNMENT_LEFT
"""
Text is visually left aligned.
"""
TEXT_ALIGNMENT_RIGHT = ui_constants.TEXT_ALIGNMENT_RIGHT
"""
Text is visually right aligned.
"""
TEXT_ALIGNMENT_CENTER = ui_constants.TEXT_ALIGNMENT_CENTER
"""
Text is visually center aligned.
"""
TEXT_ALIGNMENT_JUSTIFIED = ui_constants.TEXT_ALIGNMENT_JUSTIFIED
"""
Text is justified.
"""
TEXT_ALIGNMENT_NATURAL = ui_constants.TEXT_ALIGNMENT_NATURAL
"""
Use the default alignment associated with the current localization of the app. The default alignment for left-to-right scripts is left, and the default alignment for right-to-left scripts is right.
"""
# MARK: - Line Break Mode
LINE_BREAK_MODE = ui_constants.LINE_BREAK_MODE
LINE_BREAK_MODE_BY_WORD_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_WORD_WRAPPING
"""
Wrapping occurs at word boundaries, unless the word itself doesn’t fit on a single line.
"""
LINE_BREAK_MODE_BY_CHAR_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_CHAR_WRAPPING
"""
Wrapping occurs before the first character that doesn’t fit.
"""
LINE_BREAK_MODE_BY_CLIPPING = ui_constants.LINE_BREAK_MODE_BY_CLIPPING
"""
Lines are simply not drawn past the edge of the text container.
"""
LINE_BREAK_MODE_BY_TRUNCATING_HEAD = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_HEAD
"""
The line is displayed so that the end fits in the container and the missing text at the beginning of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_TAIL = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_TAIL
"""
The line is displayed so that the beginning fits in the container and the missing text at the end of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE
"""
The line is displayed so that the beginning and end fit in the container and the missing text in the middle is indicated by an ellipsis glyph. This mode is used for single-line layout; using it with multiline text truncates the text into a single line.
"""
# MARK: - Touch Type
TOUCH_TYPE = ui_constants.TOUCH_TYPE
TOUCH_TYPE_DIRECT = ui_constants.TOUCH_TYPE_DIRECT
"""
A touch resulting from direct contact with the screen.
"""
TOUCH_TYPE_INDIRECT = ui_constants.TOUCH_TYPE_INDIRECT
"""
A touch that did not result from contact with the screen.
"""
TOUCH_TYPE_PENCIL = ui_constants.TOUCH_TYPE_PENCIL
"""
A touch from Apple Pencil.
"""
# MARK: - Gesture State
GESTURE_STATE = ui_constants.GESTURE_STATE
GESTURE_STATE_POSSIBLE = ui_constants.GESTURE_STATE_POSSIBLE
"""
The gesture recognizer has not yet recognized its gesture, but may be evaluating touch events. This is the default state.
"""
GESTURE_STATE_BEGAN = ui_constants.GESTURE_STATE_BEGAN
"""
The gesture recognizer has received touch objects recognized as a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_CHANGED = ui_constants.GESTURE_STATE_CHANGED
"""
The gesture recognizer has received touches recognized as a change to a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_ENDED = ui_constants.GESTURE_STATE_ENDED
"""
The gesture recognizer has received touches recognized as the end of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_CANCELLED = ui_constants.GESTURE_STATE_CANCELLED
"""
The gesture recognizer has received touches resulting in the cancellation of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_RECOGNIZED = ui_constants.GESTURE_STATE_RECOGNIZED
"""
The gesture recognizer has received a multi-touch sequence that it recognizes as its gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
# MARK: - Table View Cell Style
TABLE_VIEW_CELL_STYLE = ui_constants.TABLE_VIEW_CELL_STYLE
TABLE_VIEW_CELL_STYLE_DEFAULT = ui_constants.TABLE_VIEW_CELL_STYLE_DEFAULT
"""
A simple style for a cell with a text label (black and left-aligned) and an optional image view.
"""
TABLE_VIEW_CELL_STYLE_SUBTITLE = ui_constants.TABLE_VIEW_CELL_STYLE_SUBTITLE
"""
A style for a cell with a left-aligned label across the top and a left-aligned label below it in smaller gray text.
"""
TABLE_VIEW_CELL_STYLE_VALUE1 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE1
"""
A style for a cell with a label on the left side of the cell with left-aligned and black text; on the right side is a label that has smaller blue text and is right-aligned. The Settings application uses cells in this style.
"""
TABLE_VIEW_CELL_STYLE_VALUE2 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE2
"""
A style for a cell with a label on the left side of the cell with text that is right-aligned and blue; on the right side of the cell is another label with smaller text that is left-aligned and black. The Phone/Contacts application uses cells in this style.
"""
# MARK: - Table View Cell Accessory Type
ACCESSORY_TYPE = ui_constants.ACCESSORY_TYPE
ACCESSORY_TYPE_NONE = ui_constants.ACCESSORY_TYPE_NONE
"""
No accessory view.
"""
ACCESSORY_TYPE_CHECKMARK = ui_constants.ACCESSORY_TYPE_CHECKMARK
"""
A checkmark image.
"""
ACCESSORY_TYPE_DETAIL_BUTTON = ui_constants.ACCESSORY_TYPE_DETAIL_BUTTON
"""
An information button.
"""
ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON = (
ui_constants.ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON
)
"""
An information button and a disclosure (chevron) control.
"""
ACCESSORY_TYPE_DISCLOSURE_INDICATOR = ui_constants.ACCESSORY_TYPE_DISCLOSURE_INDICATOR
"""
A chevron-shaped control for presenting new content.
"""
# MARK: - Table View Style
TABLE_VIEW_STYLE = ui_constants.TABLE_VIEW_STYLE
TABLE_VIEW_STYLE_PLAIN = ui_constants.TABLE_VIEW_STYLE_PLAIN
"""
A plain table view.
"""
TABLE_VIEW_STYLE_GROUPED = ui_constants.TABLE_VIEW_STYLE_GROUPED
"""
A table view whose sections present distinct groups of rows.
"""
# MARK: - Text Field Border Style
TEXT_FIELD_BORDER_STYLE = ui_constants.TEXT_FIELD_BORDER_STYLE
TEXT_FIELD_BORDER_STYLE_NONE = ui_constants.TEXT_FIELD_BORDER_STYLE_NONE
"""
The text field does not display a border.
"""
TEXT_FIELD_BORDER_STYLE_BEZEL = ui_constants.TEXT_FIELD_BORDER_STYLE_BEZEL
"""
Displays a bezel-style border for the text field. This style is typically used for standard data-entry fields.
"""
TEXT_FIELD_BORDER_STYLE_LINE = ui_constants.TEXT_FIELD_BORDER_STYLE_LINE
"""
Displays a thin rectangle around the text field.
"""
TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT = ui_constants.TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT
"""
Displays a rounded-style border for the text field.
"""
# MARK: - Button Item Style
BUTTON_ITEM_STYLE = ui_constants.BUTTON_ITEM_STYLE
BUTTON_ITEM_STYLE_PLAIN = ui_constants.BUTTON_ITEM_STYLE_PLAIN
"""
Glows when tapped. The default item style.
"""
BUTTON_ITEM_STYLE_DONE = ui_constants.BUTTON_ITEM_STYLE_DONE
"""
The style for a done button—for example, a button that completes some task and returns to the previous view.
"""
# MARK: - Button Item System Item
SYSTEM_ITEM = ui_constants.SYSTEM_ITEM
SYSTEM_ITEM_ACTION = ui_constants.SYSTEM_ITEM_ACTION
"""
The system action button.
"""
SYSTEM_ITEM_ADD = ui_constants.SYSTEM_ITEM_ADD
"""
The system plus button containing an icon of a plus sign.
"""
SYSTEM_ITEM_BOOKMARKS = ui_constants.SYSTEM_ITEM_BOOKMARKS
"""
The system bookmarks button.
"""
SYSTEM_ITEM_CAMERA = ui_constants.SYSTEM_ITEM_CAMERA
"""
The system camera button.
"""
SYSTEM_ITEM_CANCEL = ui_constants.SYSTEM_ITEM_CANCEL
"""
The system Cancel button, localized.
"""
SYSTEM_ITEM_COMPOSE = ui_constants.SYSTEM_ITEM_COMPOSE
"""
The system compose button.
"""
SYSTEM_ITEM_DONE = ui_constants.SYSTEM_ITEM_DONE
"""
The system Done button, localized.
"""
SYSTEM_ITEM_EDIT = ui_constants.SYSTEM_ITEM_EDIT
"""
The system Edit button, localized.
"""
SYSTEM_ITEM_FAST_FORWARD = ui_constants.SYSTEM_ITEM_FAST_FORWARD
"""
The system fast forward button.
"""
SYSTEM_ITEM_FLEXIBLE_SPACE = ui_constants.SYSTEM_ITEM_FLEXIBLE_SPACE
"""
Blank space to add between other items. The space is distributed equally between the other items. Other item properties are ignored when this value is set.
"""
SYSTEM_ITEM_ORGANIZE = ui_constants.SYSTEM_ITEM_ORGANIZE
"""
The system organize button.
"""
SYSTEM_ITEM_PAUSE = ui_constants.SYSTEM_ITEM_PAUSE
"""
The system pause button.
"""
SYSTEM_ITEM_PLAY = ui_constants.SYSTEM_ITEM_PLAY
"""
The system play button.
"""
SYSTEM_ITEM_REDO = ui_constants.SYSTEM_ITEM_REDO
"""
The system redo button.
"""
SYSTEM_ITEM_REFRESH = ui_constants.SYSTEM_ITEM_REFRESH
"""
The system refresh button.
"""
SYSTEM_ITEM_REPLY = ui_constants.SYSTEM_ITEM_REPLY
"""
The system reply button.
"""
SYSTEM_ITEM_REWIND = ui_constants.SYSTEM_ITEM_REWIND
"""
The system rewind button.
"""
SYSTEM_ITEM_SAVE = ui_constants.SYSTEM_ITEM_SAVE
"""
The system Save button, localized.
"""
SYSTEM_ITEM_SEARCH = ui_constants.SYSTEM_ITEM_SEARCH
"""
The system search button.
"""
SYSTEM_ITEM_STOP = ui_constants.SYSTEM_ITEM_STOP
"""
The system stop button.
"""
SYSTEM_ITEM_TRASH = ui_constants.SYSTEM_ITEM_TRASH
"""
The system trash button.
"""
SYSTEM_ITEM_UNDO = ui_constants.SYSTEM_ITEM_UNDO
"""
The system undo button.
"""
###############
# MARK: - Other Classes
###############
# MARK: - Color
class Color:
"""
A ``Color`` object represents a color to be displayed on screen.
Example:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
# RGB
black = ui.Color.rgb(0, 0, 0, 1)
# White
white = ui.Color.white(1, 1)
# Dynamic
background = ui.Color.dynamic(light=white, dark=black)
For pre-defined colors, see `Color <constants.html#ui-elements-colors>`_ constants.
"""
__py_color__ = None
def red(self) -> float:
"""
Returns the red value of the color.
:rtype: float
"""
return float(self.__py_color__.red)
def green(self) -> float:
"""
Returns the green value of the color.
:rtype: float
"""
return float(self.__py_color__.green)
def blue(self) -> float:
"""
Returns the blue value of the color.
:rtype: float
"""
return float(self.__py_color__.blue)
def alpha(self) -> float:
"""
Returns the alpha value of the color.
:rtype: float
"""
return float(self.__py_color__.alpha)
def __init__(self, py_color):
self.__py_color__ = py_color
def __repr__(self):
return str(self.__py_color__.managed.description)
@classmethod
def rgb(cls, red: float, green: float, blue, alpha: float) -> Color:
"""
Initializes a color from RGB values.
All values should be located between 0 and 1, not between 0 and 255.
:param red: The red value.
:param green: The geen value.
:param blue: The blue value.
:param alpha: The opacity value.
:rtype: Color
"""
return cls(__PyColor__.colorWithRed(red, green=green, blue=blue, alpha=alpha))
@classmethod
def white(cls, white: float, alpha: float) -> Color:
"""
Initializes and returns a color from white value.
All values should be located between 0 and 1, not between 0 and 255.
:param white: The grayscale value.
:param alpha: The opacity value.
:rtype: Color
"""
return cls(__PyColor__.colorWithWhite(white, alpha=alpha))
@classmethod
def dynamic(cls, light: Color, dark: Color) -> Color:
"""
Initializes and returns a color that dynamically changes in dark or light mode.
:param light: :class:`~pyto_ui.Color` object to be displayed in light mode.
:param dark: :class:`~pyto_ui.Color` object to be displayed in dark mode.
:rtype: Color
"""
return cls(__PyColor__.colorWithLight(light.__py_color__, dark=dark.__py_color__))
COLOR_LABEL = Color(ui_constants.COLOR_LABEL)
""" The color for text labels containing primary content. """
COLOR_SECONDARY_LABEL = Color(ui_constants.COLOR_SECONDARY_LABEL)
""" The color for text labels containing secondary content. """
COLOR_TERTIARY_LABEL = Color(ui_constants.COLOR_TERTIARY_LABEL)
""" The color for text labels containing tertiary content. """
COLOR_QUATERNARY_LABEL = Color(ui_constants.COLOR_QUATERNARY_LABEL)
""" The color for text labels containing quaternary content. """
COLOR_SYSTEM_FILL = Color(ui_constants.COLOR_SYSTEM_FILL)
""" An overlay fill color for thin and small shapes. """
COLOR_SECONDARY_SYSTEM_FILL = Color(ui_constants.COLOR_SECONDARY_SYSTEM_FILL)
""" An overlay fill color for medium-size shapes. """
COLOR_TERTIARY_SYSTEM_FILL = Color(ui_constants.COLOR_TERTIARY_SYSTEM_FILL)
""" An overlay fill color for large shapes. """
COLOR_QUATERNARY_SYSTEM_FILL = Color(ui_constants.COLOR_QUATERNARY_SYSTEM_FILL)
""" An overlay fill color for large areas containing complex content. """
COLOR_PLACEHOLDER_TEXT = Color(ui_constants.COLOR_PLACEHOLDER_TEXT)
""" The color for placeholder text in controls or text views. """
COLOR_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_BACKGROUND)
""" The color for the main background of your interface. """
COLOR_SECONDARY_SYSTEM_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_SYSTEM_BACKGROUND
)
""" The color for content layered on top of the main background. """
COLOR_TERTIARY_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_TERTIARY_SYSTEM_BACKGROUND)
""" The color for content layered on top of secondary backgrounds. """
COLOR_SYSTEM_GROUPED_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_GROUPED_BACKGROUND)
""" The color for the main background of your grouped interface. """
COLOR_SECONDARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of the main background of your grouped interface. """
COLOR_TERTIARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_TERTIARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of secondary backgrounds of your grouped interface. """
COLOR_SEPARATOR = Color(ui_constants.COLOR_SEPARATOR)
""" The color for thin borders or divider lines that allows some underlying content to be visible. """
COLOR_OPAQUE_SEPARATOR = Color(ui_constants.COLOR_OPAQUE_SEPARATOR)
""" The color for borders or divider lines that hide any underlying content. """
COLOR_LINK = Color(ui_constants.COLOR_LINK)
""" The color for links. """
COLOR_DARK_TEXT = Color(ui_constants.COLOR_DARK_TEXT)
""" The nonadaptable system color for text on a light background. """
COLOR_LIGHT_TEXT = Color(ui_constants.COLOR_LIGHT_TEXT)
""" The nonadaptable system color for text on a dark background. """
COLOR_SYSTEM_BLUE = Color(ui_constants.COLOR_SYSTEM_BLUE)
""" A blue color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GREEN = Color(ui_constants.COLOR_SYSTEM_GREEN)
""" A green color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_INDIGO = Color(ui_constants.COLOR_SYSTEM_INDIGO)
""" An indigo color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_ORANGE = Color(ui_constants.COLOR_SYSTEM_ORANGE)
""" An orange color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PINK = Color(ui_constants.COLOR_SYSTEM_PINK)
""" A pink color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PURPLE = Color(ui_constants.COLOR_SYSTEM_PURPLE)
""" A purple color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_RED = Color(ui_constants.COLOR_SYSTEM_RED)
""" A red color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_TEAL = Color(ui_constants.COLOR_SYSTEM_TEAL)
""" A teal color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_YELLOW = Color(ui_constants.COLOR_SYSTEM_YELLOW)
""" A yellow color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GRAY = Color(ui_constants.COLOR_SYSTEM_GRAY)
""" The base gray color. """
COLOR_SYSTEM_GRAY2 = Color(ui_constants.COLOR_SYSTEM_GRAY2)
""" A second-level shade of grey. """
COLOR_SYSTEM_GRAY3 = Color(ui_constants.COLOR_SYSTEM_GRAY3)
""" A third-level shade of grey. """
COLOR_SYSTEM_GRAY4 = Color(ui_constants.COLOR_SYSTEM_GRAY4)
""" A fourth-level shade of grey. """
COLOR_SYSTEM_GRAY5 = Color(ui_constants.COLOR_SYSTEM_GRAY5)
""" A fifth-level shade of grey. """
COLOR_SYSTEM_GRAY6 = Color(ui_constants.COLOR_SYSTEM_GRAY6)
""" A sixth-level shade of grey. """
COLOR_CLEAR = Color(ui_constants.COLOR_CLEAR)
""" A color object with grayscale and alpha values that are both 0.0. """
COLOR_BLACK = Color(ui_constants.COLOR_BLACK)
""" A color object in the sRGB color space with a grayscale value of 0.0 and an alpha value of 1.0. """
COLOR_BLUE = Color(ui_constants.COLOR_BLUE)
""" A color object with RGB values of 0.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_BROWN = Color(ui_constants.COLOR_BROWN)
""" A color object with RGB values of 0.6, 0.4, and 0.2 and an alpha value of 1.0. """
COLOR_CYAN = Color(ui_constants.COLOR_CYAN)
""" A color object with RGB values of 0.0, 1.0, and 1.0 and an alpha value of 1.0. """
COLOR_DARK_GRAY = Color(ui_constants.COLOR_DARK_GRAY)
""" A color object with a grayscale value of 1/3 and an alpha value of 1.0. """
COLOR_GRAY = Color(ui_constants.COLOR_GRAY)
""" A color object with a grayscale value of 0.5 and an alpha value of 1.0. """
COLOR_GREEN = Color(ui_constants.COLOR_GREEN)
""" A color object with RGB values of 0.0, 1.0, and 0.0 and an alpha value of 1.0. """
COLOR_LIGHT_GRAY = Color(ui_constants.COLOR_LIGHT_GRAY)
""" A color object with a grayscale value of 2/3 and an alpha value of 1.0. """
COLOR_MAGENTA = Color(ui_constants.COLOR_MAGENTA)
""" A color object with RGB values of 1.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_ORANGE = Color(ui_constants.COLOR_ORANGE)
""" A color object with RGB values of 1.0, 0.5, and 0.0 and an alpha value of 1.0. """
COLOR_PURPLE = Color(ui_constants.COLOR_PURPLE)
""" A color object with RGB values of 0.5, 0.0, and 0.5 and an alpha value of 1.0. """
COLOR_RED = Color(ui_constants.COLOR_RED)
""" A color object with RGB values of 1.0, 0.0, and 0.0 and an alpha value of 1.0. """
COLOR_WHITE = Color(ui_constants.COLOR_WHITE)
""" A color object with a grayscale value of 1.0 and an alpha value of 1.0. """
COLOR_YELLOW = Color(ui_constants.COLOR_YELLOW)
""" A color object with RGB values of 1.0, 1.0, and 0.0 and an alpha value of 1.0. """
# MARK: - Font
class Font:
"""
A ``Font`` object represents a font (with name and size) to be used on labels, buttons, text views etc.
"""
__ui_font__ = None
def __init__(self, name: str, size: float):
"""
Initializes a font with given name and size.
:pram name: The fully specified name of the font. This name incorporates both the font family name and the specific style information for the font.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
"""
if name is None and size is None:
return
self.__ui_font__ = __UIFont__.fontWithName(name, size=CGFloat(size))
def __repr__(self):
return str(self.__ui_font__.description)
def with_size(self, size: float) -> Font:
"""
Returns a font object that is the same as the receiver but which has the specified size instead.
:param size: The desired size (in points) of the new font object. This value must be greater than 0.0.
:rtype: Font
"""
font = self.__class__(None, None)
font.__ui_font__ = self.__ui_font__.fontWithSize(CGFloat(size))
return font
@classmethod
def font_names_for_family_name(cls, name: str) -> List[str]:
"""
Returns an array of font names available in a particular font family.
:param name: The name of the font family. Use the :func:`~pyto_ui.font_family_names` function to get an array of the available font family names on the system.
:rtype: List[str]
"""
names = __UIFont__.fontNamesForFamilyName(name)
py_names = []
for name in names:
py_names.append(str(name))
return py_names
@classmethod
def system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items in the specified size.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.systemFontOfSize(CGFloat(size))
return font
@classmethod
def italic_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in italic type in the specified size.
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.italicSystemFontOfSize(CGFloat(size))
return font
@classmethod
def bold_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in boldface type in the specified size
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.boldSystemFontOfSize(CGFloat(size))
return font
@classmethod
def font_with_style(cls, style: FONT_TEXT_STYLE) -> Font:
"""
Returns an instance of the system font for the specified text style and scaled appropriately for the user's selected content size category.
:param style: The text style for which to return a font. See `Font Text Style <constants.html#font-text-style>`_ constants for possible values.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.preferredFontForTextStyle(style)
return font
# MARK: - Gesture Recognizer
class GestureRecognizer:
"""
A gesture-recognizer object—or, simply, a gesture recognizer—decouples the logic for recognizing a sequence of touches (or other input) and acting on that recognition. When one of these objects recognizes a common gesture or, in some cases, a change in the gesture, it sends an action message to each designated target object.
This class represents the type of gesture passed to the ``type`` initializer parameter. See `Gesture Type <constants.html#gesture-type>`_ constants for possible values.
When the gesture is starting, cancelling or changig, ``action`` is called with the gesture recognizer as parameter. You can then access the location and the state from it.
Example:
.. highlight:: python
.. code-block:: python
'''
Move a circle with finger.
'''
import pyto_ui as ui
view = ui.View()
view.background_color = ui.COLOR_SYSTEM_BACKGROUND
circle = ui.View()
circle.size = (50, 50)
circle.center = (view.width/2, view.height/2)
circle.flexible_margins = True
circle.corner_radius = 25
circle.background_color = ui.COLOR_LABEL
view.add_subview(circle)
def move(sender: ui.GestureRecognizer):
if sender.state == ui.GESTURE_STATE_CHANGED:
circle.center = sender.location
gesture = ui.GestureRecognizer(ui.GESTURE_TYPE_PAN)
gesture.action = move
view.add_gesture_recognizer(gesture)
ui.show_view(view)
"""
__py_gesture__ = None
def __init__(
self, type: GESTURE_TYPE, action: Callable[[GestureRecognizer], None] = None
):
if type.objc_class == __PyGestureRecognizer__:
self.__py_gesture__ = type
else:
self.__py_gesture__ = __PyGestureRecognizer__.newRecognizerWithType(type)
self.__py_gesture__.managedValue = _values.value(self)
if action is not None:
self.action = action
def __repr__(self):
return str(self.__py_gesture__.managed.description)
__x__ = []
__y__ = []
@property
def x(self) -> float:
"""
(Read Only) Returns the X position of the gesture in its container view.
:rtype: float
"""
try:
return self.__x__[0]
except IndexError:
return None
@property
def y(self) -> float:
"""
(Read Only) Returns the Y position of the gesture in its container view.
"""
try:
return self.__y__[0]
except IndexError:
return None
@property
def location(self) -> Tuple[float, float]:
"""
(Read Only) Returns a tuple with the X and the Y position of the gesture in its container view.
:rtype: Tuple[float, float]
"""
tup = (self.x, self.y)
if tup == (None, None):
return None
else:
return tup
@property
def view(self) -> "View":
"""
(Read Only) Returns the view associated with the gesture.
:rtype: View
"""
view = self.__py_gesture__.view
if view is None:
return None
else:
_view = View()
_view.__py_view__ = view
return _view
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the gesture recognizer is enabled.
:rtype: bool
"""
return self.__py_gesture__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_gesture__.enabled = new_value
__number_of_touches__ = None
@property
def number_of_touches(self) -> int:
"""
(Read Only) Returns the number of touches involved in the gesture represented by the receiver.
:rtype: int
"""
if self.__number_of_touches__ is not None:
return self.__number_of_touches__
else:
return self.__py_gesture__.numberOfTouches
__state__ = None
@property
def state(self) -> GESTURE_STATE:
"""
(Read Only) The current state of the gesture recognizer.
:rtype: `Gesture State <constants.html#gesture-state>`_
"""
if self.__state__ is not None:
return self.__state__
else:
return self.__py_gesture__.state
@property
def requires_exclusive_touch_type(self) -> bool:
"""
A Boolean indicating whether the gesture recognizer considers touches of different types simultaneously.
:rtype: bool
"""
return self.__py_gesture__.requiresExclusiveTouchType
@requires_exclusive_touch_type.setter
def requires_exclusive_touch_type(self, new_value: bool):
self.__py_gesture__.requiresExclusiveTouchType = new_value
@property
def delays_touches_ended(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a end phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesEnded
@delays_touches_ended.setter
def delays_touches_ended(self, new_value: bool):
self.__py_gesture__.delaysTouchesEnded = new_value
@property
def delays_touches_began(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a begin phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesBegan
@delays_touches_began.setter
def delays_touches_began(self, new_value: bool):
self.__py_gesture__.delaysTouchesBegan = new_value
@property
def cancels_touches_in_view(self) -> bool:
"""
A Boolean value affecting whether touches are delivered to a view when a gesture is recognized.
:rtype: bool
"""
return self.__py_gesture__.cancelsTouchesInView
@cancels_touches_in_view.setter
def cancels_touches_in_view(self, new_value: bool):
self.__py_gesture__.cancelsTouchesInView = new_value
@property
def allowed_touch_types(self) -> List[TOUCH_TYPE]:
"""
An array of touch types used to distinguish type of touches. For possible values, see ``Touch Type`` constants.
:rtype: List[`Touch Type <constants.html#touch-type>`_]
"""
return self.__py_gesture__.allowedTouchTypes
@allowed_touch_types.setter
def allowedTouchTypes(self, new_value: List[TOUCH_TYPE]):
self.__py_gesture__.allowedTouchTypes = new_value
@property
def action(self) -> Callable[[GestureRecognizer], None]:
"""
A function called to handle the gesture. Takes the sender gesture recognizer as parameter.
:rtype: Callable[[GestureRecognizer], None]
"""
action = self.__py_gesture__.action
if action is None:
return None
else:
return _values.globals()[action.identifier]
@action.setter
def action(self, new_value: Callable[[GestureRecognizer], None]):
if new_value is None:
self.__py_gesture__.action = None
else:
self.__py_gesture__.action = _values.value(new_value)
# MARK: - Table View Section
class TableViewSection:
"""
An object representing a section in a Table View.
A section has a title and a list of cells contained in.
"""
__py_section__ = None
def __init__(self, title: str, cells: List["TableViewCell"]):
self.__py_section__ = __PyTableViewSection__.new()
self.__py_section__.managedValue = _values.value(self)
self.title = title
self.cells = cells
@property
def table_view(self) -> "TableView":
"""
(Read Only) Returns the Table view associated with the section.
:rtype: TableView
"""
table_view = self.__py_section__.tableView
if table_view is None:
return None
else:
py_table_view = TableView()
py_table_view.__py_view__ = table_view
return py_table_view
@property
def title(self) -> str:
"""
The title of the section displayed on screen.
:rtype: str
"""
return str(self.__py_section__.title)
@title.setter
def title(self, new_value: str):
self.__py_section__.title = new_value
@property
def cells(self) -> "TableViewCell":
"""
Cells contained in the section. After setting a value, the section will be reloaded automatically.
:rtype: TableViewCell
"""
cells = self.__py_section__.cells
py_cells = []
for cell in cells:
py_cell = TableViewCell()
py_cell.__py_view__ = cell
py_cells.append(py_cell)
return py_cells
@cells.setter
def cells(self, new_value: "TableViewCell"):
cells = []
for cell in new_value:
cells.append(cell.__py_view__)
self.__py_section__.cells = cells
@property
def did_select_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is selected. Takes the sender section and the selected cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didSelectCell
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_select_cell.setter
def did_select_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didSelectCell = None
else:
self.__py_section__.didSelectCell = _values.value(new_value)
@property
def did_tap_cell_accessory_button(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when the accessory button of a cell contained in the section is pressed. Takes the sender section and the cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.accessoryButtonTapped
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_tap_cell_accessory_button.setter
def did_tap_cell_accessory_button(
self, new_value: Callable[[TableViewSection, int], None]
):
if new_value is None:
self.__py_section__.accessoryButtonTapped = None
else:
self.__py_section__.accessoryButtonTapped = _values.value(new_value)
@property
def did_delete_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is deleted. Takes the sender section and the selected deleted cell's index as parameters.
This function should be used to remove the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didDeleteCell
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_delete_cell.setter
def did_delete_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didDeleteCell = None
else:
self.__py_section__.didDeleteCell = _values.value(new_value)
@property
def did_move_cell(self) -> Callable[[TableViewSection, int, int], None]:
"""
A function called when a cell contained in the section is moved. Takes the sender section, the moved deleted cell's index and the destination index as parameters.
This function should be used to move the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int, int], None]
"""
action = self.__py_section__.didMoveCell
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_move_cell.setter
def did_move_cell(self, new_value: Callable[[TableViewSection, int, int], None]):
if new_value is None:
self.__py_section__.didMoveCell = None
else:
self.__py_section__.didMoveCell = _values.value(new_value)
# MARK: - Button Item
class ButtonItem:
"""
A special kind of button that can be placed on the view's navigation bar. Can have a title, and image or a system type.
"""
__py_item__ = None
def __init__(
self,
title: str = None,
image: Image.Image = None,
system_item: SYSTEM_ITEM = None,
style: BUTTON_ITEM_STYLE = __v__("BUTTON_ITEM_STYLE_PLAIN"),
):
if style == "BUTTON_ITEM_STYLE_PLAIN":
style = BUTTON_ITEM_STYLE_PLAIN
if system_item is not None:
self.__py_item__ = __PyButtonItem__.alloc().initWithSystemItem(system_item)
else:
self.__py_item__ = __PyButtonItem__.alloc().initWithStyle(style)
self.__py_item__.managedValue = _values.value(self)
self.title = title
self.image = image
def __repr__(self):
return str(self.__py_item__.managed.description)
@property
def title(self) -> str:
"""
The title of the button displayed on screen.
:rtype: str
"""
title = self.__py_item__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_item__.title = new_value
@property
def image(self) -> Image.Image:
"""
A ``PIL`` image object displayed on screen. May also be an ``UIKit`` ``UIImage`` symbol. See :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_item__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: Image.Image):
if new_value is None:
self.__py_item__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_item__.image = new_value
else:
self.__py_item__.image = __ui_image_from_pil_image__(new_value)
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the button is enabled.
:rtype: bool
"""
return self.__py_item__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_item__.enabled = new_value
@property
def style(self) -> BUTTON_ITEM_STYLE:
"""
The button item style. See `Button Item Style <constants.html#button-item-style>`_ constants for possible values.
:rtype: `Button Item Style <constants.html#button-item-style>`_
"""
return self.__py_item__.style
@style.setter
def style(self, new_value: BUTTON_ITEM_STYLE):
self.__py_item__.style = new_value
@property
def action(self) -> Callable[[ButtonItem], None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[ButtonItem], None]
"""
action = self.__py_item__.action
if action is None:
return None
else:
return _values.globals()[action.identifier]
@action.setter
def action(self, new_value: Callable[[ButtonItem], None]):
if new_value is None:
self.__py_item__.action = None
else:
self.__py_item__.action = _values.value(new_value)
# MARK: - Alert
class Alert:
"""
A class representing an alert.
Example:
.. highlight:: python
.. code-block:: python
from pyto_ui import Alert
alert = Alert("Hello", "Hello World!")
alert.add_action("Ok")
alert.add_cancel_action("Cancel")
if (alert.show() == "Ok"):
print("Good Bye!")
"""
__pyAlert__ = None
def __init__(self, title: str, message: str):
"""
Creates an alert.
:param title: The title of the alert.
:param message: The message of the alert.
"""
self.__pyAlert__ = __PyAlert__.alloc().init()
self.__pyAlert__.title = title
self.__pyAlert__.message = message
__actions__ = []
def add_action(self, title: str):
"""
Adds an action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addAction(title)
def add_destructive_action(self, title: str):
"""
Adds a destructive action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addDestructiveAction(title)
def add_cancel_action(self, title: str):
"""
Adds a cancel action with given title. Can only be added once.
:param title: The title of the action.
"""
if not self.__pyAlert__.addCancelAction(title):
raise ValueError("There is already a cancel action.")
def show(self) -> str:
"""
Shows alert.
Returns the title of the selected action.
:rtype: str
"""
script_path = None
try:
script_path = threading.current_thread().script_path
except AttributeError:
pass
return self.__pyAlert__._show(script_path)
###############
# MARK: - View Classes
###############
class View:
"""
An object that manages the content for a rectangular area on the screen.
"""
__py_view__ = None
def __init__(self):
self.__py_view__ = __PyView__.newView()
def __repr__(self):
return str(self.__py_view__.managed.description)
@property
def title(self) -> str:
"""
If this view is directly presented, the top bar will show this view's title.
:rtype: str
"""
return self.__py_view__.title
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
def close(self):
"""
Closes the view, if the receiver object is the root view presented to the user.
"""
self.__py_view__.close()
def push(self, view: View):
"""
Presents the given additional view on top of the receiver.
:param view: The view to present.
"""
self.__py_view__.pushView(view.__py_view__)
def pop(self):
"""
Pops the visible view controller from the navigation controller.
"""
self.__py_view__.pop()
@property
def navigation_bar_hidden(self) -> bool:
"""
A boolean indicating whether the Navigation Bar of the View should be hidden.
:rtype: bool
"""
return self.__py_view__.navigationBarHidden
@navigation_bar_hidden.setter
def navigation_bar_hidden(self, new_value: bool):
self.__py_view__.navigationBarHidden = new_value
@property
def x(self) -> float:
"""
The x-coordinate of the view.
:rtype: float
"""
return self.__py_view__.x
@x.setter
def x(self, new_value: float):
self.__py_view__.x = new_value
@property
def y(self) -> float:
"""
The y-coordinate of the point.
:rtype: float
"""
return self.__py_view__.y
@y.setter
def y(self, new_value: float):
self.__py_view__.y = new_value
@property
def width(self) -> float:
"""
The width of the view.
:rtype: float
"""
return self.__py_view__.width
@width.setter
def width(self, new_value: float):
self.__py_view__.width = new_value
@property
def height(self) -> float:
"""
The height of the view.
:rtype: float
"""
return self.__py_view__.height
@height.setter
def height(self, new_value: float):
self.__py_view__.height = new_value
@property
def center_x(self) -> float:
"""
The center x-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerX
@center_x.setter
def center_x(self, new_value: float):
self.__py_view__.centerX = new_value
@property
def center_y(self) -> float:
"""
The center y-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerY
@center_y.setter
def center_y(self, new_value: float):
self.__py_view__.centerY = new_value
@property
def center(self) -> Tuple[float, float]:
"""
The center point of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.center_x, self.center_y)
@center.setter
def center(self, new_value: Tuple[float, float]):
self.center_x, self.center_y = new_value
@property
def size(self) -> Tuple[float, float]:
"""
A size that specifies the height and width of the rectangle.
This value is a tuple with height and width values.
:rtype: Tuple[float, float]
"""
return (self.width, self.height)
@size.setter
def size(self, new_value: Tuple[float, float]):
self.width, self.height = new_value
@property
def origin(self) -> Tuple[float, float]:
"""
A point that specifies the coordinates of the view's rectangle’s origin.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.x, self.y)
@origin.setter
def origin(self, new_value: Tuple[float, float]):
self.x, self.y = new_value
@property
def frame(self) -> Tuple[float, float, float, float]:
"""
The frame rectangle, which describes the view’s location and size in its superview’s coordinate system.
This value is a tuple with X, Y, Width and Height values.
:rtype: Tuple[float, float, float, float]
"""
return (self.x, self.y, self.width, self.height)
@frame.setter
def frame(self, new_value: Tuple[float, float, float, float]):
self.x, self.y, self.width, self.height = new_value
@property
def __flexible_width__(self) -> bool:
return self.__py_view__.flexibleWidth
@__flexible_width__.setter
def __flexible_width__(self, new_value: bool):
self.__py_view__.flexibleWidth = new_value
@property
def __flexible_height__(self) -> bool:
return self.__py_view__.flexibleHeight
@__flexible_height__.setter
def __flexible_height__(self, new_value: bool):
self.__py_view__.flexibleHeight = new_value
@property
def __flexible_left_margin__(self) -> bool:
return self.__py_view__.flexibleLeftMargin
@__flexible_left_margin__.setter
def __flexible_left_margin__(self, new_value: bool):
self.__py_view__.flexibleLeftMargin = new_value
@property
def __flexible_right_margin__(self) -> bool:
return self.__py_view__.flexibleRightMargin
@__flexible_right_margin__.setter
def __flexible_right_margin__(self, new_value: bool):
self.__py_view__.flexibleRightMargin = new_value
@property
def __flexible_top_margin__(self) -> bool:
return self.__py_view__.flexibleTopMargin
@__flexible_top_margin__.setter
def __flexible_top_margin__(self, new_value: bool):
self.__py_view__.flexibleTopMargin = new_value
@property
def __flexible_bottom_margin__(self) -> bool:
return self.__py_view__.flexibleBottomMargin
@__flexible_bottom_margin__.setter
def __flexible_bottom_margin__(self, new_value: bool):
self.__py_view__.flexibleBottomMargin = new_value
@property
def flex(self) -> List[AUTO_RESIZING]:
"""
A list that determines how the receiver resizes itself when its superview’s bounds change. See `Auto Resizing <constants.html#auto-resizing>`_ constants for possible values.
:rtype: List[`Auto Resizing <constants.html#auto-resizing>`_]
"""
a = []
if self.__flexible_width__:
a.append(FLEXIBLE_WIDTH)
if self.__flexible_height__:
a.append(FLEXIBLE_HEIGHT)
if self.__flexible_bottom_margin__:
a.append(FLEXIBLE_BOTTOM_MARGIN)
if self.__flexible_top_margin__:
a.append(FLEXIBLE_TOP_MARGIN)
if self.__flexible_left_margin__:
a.append(FLEXIBLE_LEFT_MARGIN)
if self.__flexible_right_margin__:
a.append(FLEXIBLE_RIGHT_MARGIN)
return a
@flex.setter
def flex(self, new_value: List[AUTO_RESIZING]):
self.__flexible_width__, self.__flexible_height__, self.__flexible_top_margin__, self.__flexible_bottom_margin__, self.__flexible_left_margin__, self.__flexible_right_margin__ = (
(FLEXIBLE_WIDTH in new_value),
(FLEXIBLE_HEIGHT in new_value),
(FLEXIBLE_TOP_MARGIN in new_value),
(FLEXIBLE_BOTTOM_MARGIN in new_value),
(FLEXIBLE_LEFT_MARGIN in new_value),
(FLEXIBLE_RIGHT_MARGIN in new_value),
)
@property
def subviews(self) -> List[View]:
"""
(Read Only) A list of the view's children.
See also :func:`~pyto_ui.View.add_subview`.
:rtype: List[View]
"""
views = self.__py_view__.subviews
if views is None or len(views) == 0:
return []
else:
_views = []
for view in views:
_view = self.__class__()
_view.__py_view__ = view
_views.append(_view)
return _views
@property
def superview(self) -> View:
"""
(Read Only) The parent view containg the receiver view.
:rtype: View
"""
superview = self.__py_view__.superView
if superview is None:
return None
else:
view = self.__class__()
view.__py_view__ = superview
return view
@property
def background_color(self) -> Color:
"""
The background color of the view.
:rtype: Color
"""
c = self.__py_view__.backgroundColor
if c is None:
return None
else:
return Color(c)
@background_color.setter
def background_color(self, new_value: Color):
if new_value is None:
self.__py_view__.backgroundColor = None
else:
self.__py_view__.backgroundColor = new_value.__py_color__
@property
def hidden(self) -> bool:
"""
A boolean indicating whether the view is visible or not.
:rtype: bool
"""
return self.__py_view__.hidden
@hidden.setter
def hidden(self, new_value: bool):
self.__py_view__.hidden = new_value
@property
def alpha(self) -> float:
"""
The opacity of the view.
:rtype: float
"""
return self.__py_view__.alpha
@alpha.setter
def alpha(self, new_value: float):
self.__py_view__.alpha = new_value
@property
def opaque(self) -> bool:
"""
A boolean indicating whether the view is opaque or not. Setting to ``True`` should prevent the view from having a transparent background.
:rtype: bool
"""
return self.__py_view__.opaque
@opaque.setter
def opaque(self, new_value: bool):
self.__py_view__.opaque = new_value
@property
def tint_color(self) -> Color:
"""
The tint color of the view. If set to ``None``, the tint color will be inherited from the superview. The tint color affects some views like ``Button`` for title color, ``TextView`` for cursor color, etc.
:rtype: Color
"""
c = self.__py_view__.tintColor
if c is None:
return None
else:
return Color(c)
@tint_color.setter
def tint_color(self, new_value: Color):
if new_value is None:
self.__py_view__.tintColor = None
else:
self.__py_view__.tintColor = new_value.__py_color__
@property
def user_interaction_enabled(self) -> bool:
"""
A boolean indicating whether the view responds to touches.
:rtype: bool
"""
return self.__py_view__.userInteractionEnabled
@user_interaction_enabled.setter
def user_interaction_enabled(self, new_value: bool):
self.__py_view__.userInteractionEnabled = new_value
@property
def clips_to_bounds(self) -> bool:
"""
A boolean value that determines whether subviews are confined to the bounds of the view.
:rtype: bool
"""
return self.__py_view__.clipsToBounds
@clips_to_bounds.setter
def clips_to_bounds(self, new_value: bool):
self.__py_view__.clipsToBounds = new_value
@property
def corner_radius(self) -> float:
"""
The radius to use when drawing rounded corners for the view’s background.
:rtype: float
"""
return self.__py_view__.cornerRadius
@corner_radius.setter
def corner_radius(self, new_value: float):
self.__py_view__.cornerRadius = new_value
@property
def border_width(self) -> float:
"""
The width of the view's border.
:rtype: float
"""
return self.__py_view__.borderWidth
@border_width.setter
def border_width(self, new_value: float):
self.__py_view__.borderWidth = new_value
@property
def border_color(self) -> Color:
"""
The color of the view's border
:rtype: Color
"""
c = self.__py_view__.borderColor
if c is None:
return None
else:
return Color(c)
@border_color.setter
def border_color(self, new_value: Color):
if new_value is None:
self.__py_view__.borderColor = None
else:
self.__py_view__.borderColor = new_value.__py_color__
@property
def content_mode(self) -> CONTENT_MODE:
"""
A flag used to determine how a view lays out its content when its bounds change.
See `Content Mode` <constants.html#content-mode>`_ constants for possible values.
:rtype: `Content Mode` <constants.html#content-mode>`_
"""
return self.__py_view__.contentMode
@content_mode.setter
def content_mode(self, new_value: CONTENT_MODE):
self.__py_view__.contentMode = new_value
@property
def appearance(self) -> APPEARANCE:
"""
The appearance of the view.
See `Appearance <constants.html#appearance>`_ constants for possible values.
:rtype: `Appearance <constants.html#appearance>`_
"""
return self.__py_view__.appearance
@appearance.setter
def appearance(self, new_value: APPEARANCE):
self.__py_view__.appearance = new_value
@property
def first_responder(self) -> bool:
"""
(Read Only) A boolean indicating the view is first responder.
``UIKit`` dispatches some types of events, such as motion events, to the first responder initially.
:rtype: bool
"""
return self.__py_view__.firstResponder
def add_subview(self, view: View):
"""
Adds the given view to the receiver's hierarchy.
:param view: The view to add.
"""
self.__py_view__.addSubview(view.__py_view__)
def insert_subview(self, view: View, index: int):
"""
Inserts the given view to the receiver's hierarchy at the given index.
:param view: The view to insert.
:param index: The index where the view should be inserted.
"""
self.__py_view__.insertSubview(view.__py_view__, at=index)
def insert_subview_bellow(self, view: View, bellow_view: View):
"""
Inserts the given view to the receiver's hierarchy bellow another given view.
:param view: The view to insert.
:param bellow_view: The view above the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, bellow=bellow_view.__py_view__)
def insert_subview_above(self, view: View, above_view: View):
"""
Inserts the given view to the receiver's hierarchy above another given view.
:param view: The view to insert.
:param above_view: The view bellow the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, above=above_view.__py_view__)
def remove_from_superview(self):
"""
Removes the view from the parent's hierarchy.
"""
self.__py_view__.removeFromSuperview()
def add_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Adds a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be added.
"""
self.__py_view__.addGestureRecognizer(gesture_recognizer.__py_gesture__)
def remove_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Removes a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be removed.
"""
self.__py_view__.removeGestureRecognizer(gesture_recognizer.__py_gesture__)
@property
def gesture_recognizers(self) -> List[GestureRecognizer]:
"""
(Read Only) Returns all gesture recognizers.
See :meth:`~pyto_ui.View.add_gesture_recognizer`.
:rtype: List[GestureRecognizer]
"""
recognizers = self.__py_view__.gestureRecognizers
if recognizers is None or len(recognizers) == 0:
return []
else:
_recognizers = []
for recognizer in recognizers:
_recognizer = self.__class__()
_recognizer.__py_gesture__ = recognizer
_recognizers.append(_recognizer)
return _recognizers
def size_to_fit(self):
"""
Sizes the view to fit its content.
"""
self.__py_view__.sizeToFit()
def become_first_responder(self) -> bool:
"""
Becomes the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be shown.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.becomeFirstResponder()
def resign_first_responder(self) -> bool:
"""
Stops being the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be hidden.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.resignFirstResponder()
@property
def layout(self) -> Callable[[View], None]:
"""
A function called when the view is resized. Takes the view as parameter.
:rtype: Callable[[View], None]
"""
action = self.__py_view__.layoutAction
if action is None:
return None
else:
return _values.globals()[action.identifier]
@layout.setter
def layout(self, new_value: Callable[[View], None]):
self.__py_view__.pyValue = _values.value(self)
if new_value is None:
self.__py_view__.layoutAction = None
else:
self.__py_view__.layoutAction = _values.value(new_value)
@property
def button_items(self) -> List[ButtonItem]:
"""
A list of :class:`~pyto_ui.ButtonItem` objects to be displayed on the top bar. Works only if the view is the root view presented with :func:`~pyto_ui.show_view` or :meth:`~pyto_ui.View.push`.
:rtype: List[ButtonItem]
"""
items = self.__py_view__.buttonItems
if items is None or len(items) == 0:
return []
else:
_items = []
for item in items:
_item = ButtonItem()
_item.__py_item__ = _item
_items.append(_item)
return _items
@button_items.setter
def button_items(self, new_value: List[ButtonItem]):
items = []
if new_value is not None and len(new_value) > 0:
for item in new_value:
items.append(item.__py_item__)
self.__py_view__.buttonItems = items
class ImageView(View):
"""
A view displaying an image. The displayed image can be a ``PIL`` image, an ``UIKit`` ``UIImage`` (see :func:`~pyto_ui.image_with_system_name`) or can be directly downloaded from an URL.
"""
def __init__(self, image: Image.Image = None, url: str = None):
self.__py_view__ = __UIImageView__.newView()
self.image = image
if url is not None:
self.load_from_url(url)
@property
def image(self) -> Image.Image:
"""
The image displayed on screen. Can be a ``PIL`` image or an ``UIKit`` ``UIImage``. See :func:`~pyto_ui.image_with_system_name` for more information about how to get a symbol image.
:rtype: Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: Image.Image):
if new_value is None:
self.__py_view__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
def load_from_url(self, url):
"""
Loads and display the image at given url.
:param url: The URL of the image.
"""
if "widget" in os.environ:
raise EnvironmentError("'load_from_url' is not supported in Today Widget.")
def _set_image(self, url):
self.image = Image.open(urlopen(url))
Thread(target=_set_image, args=(self, url)).start()
class Label(View):
"""
A view displaying not editable and not selectable text.
"""
def __init__(self, text: str = ""):
self.__py_view__ = __PyLabel__.newView()
self.text = text
def load_html(self, html):
"""
Loads HTML in the Label.
:param html: The HTML code to load.
"""
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text to be displayed on the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def text_color(self) -> Color:
"""
The color of the text.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The text's alignment. For possible values, see `Text Alignment <constants.html#text-alignment>`_ constants.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def line_break_mode(self) -> LINE_BREAK_MODE:
"""
The line break mode.
:rtype: `Line Break Mode <constants.html#line-break-mode>`_
"""
return self.__py_view__.lineBreakMode
@line_break_mode.setter
def line_break_mode(self, new_value: LINE_BREAK_MODE):
self.__py_view__.lineBreakMode = new_value
@property
def adjusts_font_size_to_fit_width(self) -> bool:
"""
A boolean indicating whether the label adjusts its font size to fit its size.
:rtype: bool
"""
return self.__py_view__.adjustsFontSizeToFitWidth
@adjusts_font_size_to_fit_width.setter
def adjusts_font_size_to_fit_width(self, new_value: bool):
self.__py_view__.adjustsFontSizeToFitWidth = new_value
@property
def allows_default_tightening_for_truncation(self) -> bool:
return self.__py_view__.allowsDefaultTighteningForTruncation
@allows_default_tightening_for_truncation.setter
def allows_default_tightening_for_truncation(self, new_value: bool):
self.__py_view__.allowsDefaultTighteningForTruncation = new_value
@property
def number_of_lines(self) -> int:
"""
The numbers of lines displayed in the label. Set to ``0`` to show all the text.
:rtype: int
"""
return self.__py_view__.numberOfLines
@number_of_lines.setter
def number_of_lines(self, new_value: int):
self.__py_view__.numberOfLines = new_value
class TableViewCell(View):
"""
A cell contained in a :class:`~pyto_ui.TableView`.
Can have a title, a subtitle, an image and an accessory view.
For a list of supported style, see `Table View Cell Style <constants.html#table-view-cell-style>`_ constants.
"""
def __init__(
self, style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_CELL_STYLE_DEFAULT")
):
if style == "TABLE_VIEW_CELL_STYLE_DEFAULT":
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(
TABLE_VIEW_CELL_STYLE_DEFAULT
)
else:
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
@property
def movable(self) -> bool:
"""
A boolean indicating whether the cell is movable. If set to ``True``, the container :class:`TableViewSection` object should handle the move.
:rtype: bool
"""
return self.__py_view__.movable
@movable.setter
def movable(self, new_value: bool):
self.__py_view__.movable = new_value
@property
def removable(self) -> bool:
"""
A boolean indicating the cell is removable. If set to ``True``, the container :class:`TableViewSection` object should handle the removal.
:rtype: bool
"""
return self.__py_view__.removable
@removable.setter
def removable(self, new_value: bool):
self.__py_view__.removable = new_value
@property
def content_view(self) -> View:
"""
(Read Only) The view contained in the cell. Custom views should be added inside it.
:rtype: View
"""
_view = View()
_view.__py_view__ = self.__py_view__.contentView
return _view
@property
def image_view(self) -> ImageView:
"""
(Read Only) The view containing an image. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Image View
"""
view = self.__py_view__.imageView
if view is None:
return None
else:
_view = ImageView()
_view.__py_view__ = view
return _view
@property
def text_label(self) -> Label:
"""
(Read Only) The label containing the main text of the cell.
:rtype: Label
"""
view = self.__py_view__.textLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
return _view
@property
def detail_text_label(self) -> Label:
"""
(Read Only) The label containing secondary text. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Label
"""
view = self.__py_view__.detailLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
return _view
@property
def accessory_type(self) -> ACCESSORY_TYPE:
"""
The type of accessory view placed to the right of the cell. See `Accessory Type <constants.html#accessory_type>`_ constants for possible values.
:rtype: `Accessory Type <constants.html#accessory_type>`_.
"""
return self.__py_view__.accessoryType
@accessory_type.setter
def accessory_type(self, new_value: ACCESSORY_TYPE):
self.__py_view__.accessoryType = new_value
class TableView(View):
"""
A view containing a list of cells.
A Table View has a list of :class:`TableViewSection` objects that represent groups of cells. A Table View has two possible styles. See `Table View Style <constants.html#table-view-style>`_.
"""
def __init__(
self,
style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_STYLE_PLAIN"),
sections: List[TableViewSection] = [],
):
if style == "TABLE_VIEW_STYLE_PLAIN":
self.__py_view__ = __PyTableView__.newViewWithStyle(TABLE_VIEW_STYLE_PLAIN)
else:
self.__py_view__ = __PyTableView__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
self.sections = sections
@property
def reload_action(self) -> Callable[TableView, None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[TableView], None]
"""
action = self.__py_view__.reloadAction
if action is None:
return None
else:
return _values.globals()[action.identifier]
@reload_action.setter
def reload_action(self, new_value: Callable[[TableView], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.reloadAction = _values.value(new_value)
@property
def edit_button_item(self) -> ButtonItem:
"""
Returns a bar button item that toggles its title and associated state between Edit and Done.
The button item is setup to edit the Table View.
:rtype: ButtonItem
"""
item = ButtonItem()
item.__py_item__ = self.__py_view__.editButtonItem
return item
@property
def sections(self) -> List[TableViewSection]:
"""
A list of :class:`TableViewSection` containg cells to be displayed on the Table View.
Setting a new value will reload automatically the contents of the Table View.
:rtype: List[TableViewSection]
"""
sections = self.__py_view__.sections
py_sections = []
for section in sections:
py_section = TableViewSection()
py_section.__py_section__ = section
py_sections.append(py_section)
return py_sections
@sections.setter
def sections(self, new_value: List[TableViewSection]):
sections = []
for section in new_value:
section.__py_section__.tableView = self.__py_view__
sections.append(section.__py_section__)
self.__py_view__.sections = sections
def deselect_row(self):
"""
Deselects the current selected row.
"""
self.__py_view__.deselectRowAnimated(True)
class TextView(View):
"""
An editable, multiline and scrollable view containing text.
"""
def __init__(self, text=""):
self.__py_view__ = __PyTextView__.newView()
self.__py_view__.managedValue = _values.value(self)
self.text = text
@property
def did_begin_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View begins editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View ends editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def did_change(self) -> Callable[[TextView], None]:
"""
A function called when the Text View's text changes. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didChangeText
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_change.setter
def did_change(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didChangeText = None
else:
self.__py_view__.didChangeText = _values.value(new_value)
def load_html(self, html):
"""
Loads HTML in the Text View.
:param html: The HTML code to load.
"""
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text contained in the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def editable(self) -> bool:
"""
A boolean indicating whether the text is editable.
:rtype: bool
"""
return self.__py_view__.editable
@editable.setter
def editable(self, new_value: bool):
self.__py_view__.editable = new_value
@property
def selectable(self) -> bool:
"""
A boolean indicating whether the text is selectable.
:rtype: bool
"""
return self.__py_view__.selectable
@selectable.setter
def selectable(self, new_value: bool):
self.__py_view__.selectable = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
class WebView(View):
"""
A View that displays web content.
"""
class JavaScriptException(Exception):
"""
An excpetion while running JavaScript code. Raised by :meth:`~pyto_ui.WebView.evaluate_js`.
"""
pass
def __init__(self, url: str = None):
self.__py_view__ = __PyWebView__.newView()
self.__py_view__.managedValue = _values.value(self)
if url is not None:
self.load_url(url)
def evaluate_js(self, code) -> str:
"""
Runs JavaScript code and returns a String representation of the evaluation result. Raises a :class:`~pyto_ui.WebView.JavaScriptException`.
:param code: JavaScript code to run.
:rtype: str
"""
result = self.__py_view__.evaluateJavaScript(code)
if result is None:
return None
else:
result = str(result)
if result.startswith("_VALULE_:"):
return result.replace("_VALULE_:", "", 1)
elif result.endswith("_ERROR_:"):
raise self.__class__.JavaScriptException(
result.replace("_ERROR_:", "", 1)
)
def load_url(self, url: str):
"""
Loads an URL.
:param url: The URL to laod. Can be 'http://', 'https://' or 'file://'.
"""
self.__py_view__.loadURL(url)
def load_html(self, html: str, base_url: str = None):
"""
Loads an HTML string.
:param html: The HTML code to load.
:param base_url: An optional URL used to resolve relative paths.
"""
baseURL = base_url
if baseURL is not None:
baseURL = str(base_url)
self.__py_view__.loadHTML(html, baseURL=baseURL)
def reload(self):
"""
Reloads the Web View.
"""
self.__py_view__.reload()
def stop(self):
"""
Stops loading content.
"""
self.__py_view__.stop()
def go_back(self):
"""
Goes back.
"""
self.__py_view__.goBack()
def go_forward(self):
"""
Goes forward.
"""
self.__py_view__.goForward()
@property
def can_go_back(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_back` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoBack
@property
def can_go_forward(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_forward` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoForward
@property
def is_loading(self) -> bool:
"""
(Read Only) A boolean indicating whether the Web View is loading content.
:rtype: bool
"""
return self.__py_view__.isLoading
@property
def url(self) -> str:
"""
(Read Only) The current URL loaded into the Web View.
:rtype: str
"""
url = self.__py_view__.url
if url is None:
return None
else:
return str(url)
@property
def did_start_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View starts loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didStartLoading
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_start_loading.setter
def did_start_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didStartLoading = None
else:
self.__py_view__.didStartLoading = _values.value(new_value)
@property
def did_finish_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View finished loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didFinishLoading
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_finish_loading.setter
def did_finish_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didFinishLoading = None
else:
self.__py_view__.didFinishLoading = _values.value(new_value)
@property
def did_fail_loading(self) -> Callable[[WebView, str], None]:
"""
A function called when the Web View failed to load contents. Takes the sender Web View and a string describing the error as parameters.
:rtype: Callable[[WebView, str], None]
"""
action = self.__py_view__.didFailLoading
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_fail_loading.setter
def did_fail_loading(self, new_value: Callable[[WebView, str], None]):
if new_value is None:
self.__py_view__.didFailLoading = None
else:
self.__py_view__.didFailLoading = _values.value(new_value)
##################
# MARK: - Control Classes
##################
class Control(View):
"""
The base class for controls, which are visual elements that convey a specific action or intention in response to user interactions.
Inherited by :class:`Button`, :class:`SegmentedControl`, :class:`Slider`, :class:`Switch` and :class:`TextField`
"""
def __init__(self):
self.__py_view__ = __PyControl__.newView()
self.__py_view__.managedValue = _values.value(self)
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the control is enabled.
:rtype: bool
"""
return self.__py_view__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_view__.enabled = new_value
@property
def horizontal_alignment(self) -> HORZONTAL_ALIGNMENT:
"""
The horizontal alignment of the view's contents. See `Horizontal Alignment <constants.html#horizontal-alignment>`_ constants for possible values.
:rtype: `Horizontal Alignment <constants.html#horizontal-alignment>`_
"""
return self.__py_view__.contentHorizontalAlignment
@horizontal_alignment.setter
def horizontal_alignment(self, new_value: HORZONTAL_ALIGNMENT):
self.__py_view__.contentHorizontalAlignment = new_value
@property
def vertical_alignment(self) -> VERTICAL_ALIGNMENT:
"""
The vertical alignment of the view's contents. See `Vertical Alignemnt <constants.html#vertical-alignment>`_ constants for possible values.
:rtype: `Vertical Alignment <constants.html#vertical-alignment>`_
"""
return self.__py_view__.contentVerticalAlignment
@vertical_alignment.setter
def vertical_alignment(self, new_value: VERTICAL_ALIGNMENT):
self.__py_view__.contentVerticalAlignment = new_value
@property
def action(self) -> Callable[[Control], None]:
"""
A function called when the control triggers its action.
For example, a :class:`Button` object calls this function when it's pressed.
Takes the :class:`Control` object as parameter.
:rtype: Callable[[Control], None]
"""
action = self.__py_view__.action
if action is None:
return None
else:
return _values.globals()[action.identifier]
@action.setter
def action(self, new_value: Callable[[Control], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.action = _values.value(new_value)
class SegmentedControl(Control):
"""
A horizontal control made of multiple segments, each segment functioning as a discrete button.
The function passed to :data:`~pyto_ui.Control.action` will be called when the segmented control changes its selection.
"""
def __init__(self, segments: List[str] = []):
self.__py_view__ = __PySegmentedControl__.newView()
self.__py_view__.managedValue = _values.value(self)
self.segments = segments
@property
def segments(self) -> List[str]:
"""
A list of strings representing segments titles.
:rtype: List[str]
"""
return list(map(str, self.__py_view__.segments))
@segments.setter
def segments(self, new_value: List[str]):
self.__py_view__.segments = new_value
@property
def selected_segment(self) -> int:
"""
The index of selected segment.
:rtype: int
"""
return self.__py_view__.selectedSegmentIndex
@selected_segment.setter
def selected_segment(self, new_value: int):
self.__py_view__.selectedSegmentIndex = new_value
class Slider(Control):
"""
A control used to select a single value from a continuous range of values. The default range is located between ``0`` and ``1``.
The function passed to :data:`~pyto_ui.Control.action` will be called when the slider changes its value.
"""
def __init__(self, value: float = 0.5):
self.__py_view__ = __PySlider__.newView()
self.__py_view__.managedValue = _values.value(self)
self.value = value
def set_value_with_animation(self, value: float):
"""
Sets the value of the slider with an animation.
:param value: The value of the slider.
"""
self.__py_view__.setValue(value, animated=True)
@property
def value(self) -> float:
"""
The value of the slider between its range.
:rtype: float
"""
return self.__py_view__.value
@value.setter
def value(self, new_value: float):
self.__py_view__.value = new_value
@property
def minimum_value(self) -> float:
"""
The minimum value of the slider.
:rtype: float
"""
return self.__py_view__.minimumValue
@minimum_value.setter
def minimum_value(self, new_value: float):
self.__py_view__.minimumValue = new_value
@property
def maximum_value(self) -> float:
"""
The maximum value of the slider.
:rtype: float
"""
return self.__py_view__.maximumValue
@maximum_value.setter
def maximum_value(self, new_value: float):
self.__py_view__.maximumValue = new_value
@property
def minimum_track_color(self) -> Color:
"""
The color used to tint the default minimum track.
:rtype: Color
"""
c = self.__py_view__.minimumTrackColor
if c is None:
return None
else:
return Color(c)
@minimum_track_color.setter
def minimum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.minimumTrackColor = None
else:
self.__py_view__.minimumTrackColor = new_value.__py_color__
@property
def maximum_track_color(self) -> Color:
"""
The color used to tint the default maximum track.
:rtype: Color
"""
c = self.__py_view__.maximumTrackColor
if c is None:
return None
else:
return Color(c)
@maximum_track_color.setter
def maximum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.maximumTrackColor = None
else:
self.__py_view__.maximumTrackColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the default thumb.
:rtype: Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Switch(Control):
"""
A control that offers a binary choice, such as On/Off.
The function passed to :data:`~pyto_ui.Control.action` will be called when the switch changes its value.
"""
def __init__(self, on=False):
self.__py_view__ = __PySwitch__.newView()
self.__py_view__.managedValue = _values.value(self)
self.on = on
def set_on_with_animation(self, on: bool):
"""
Sets the state of the switch to On or Off with an animation.
:param on: A boolean indicating whether the switch should be On.
"""
self.__py_view__.setOn(on, animated=True)
@property
def on(self) -> bool:
"""
A boolean indicating whether the switch is On.
:rtype: bool
"""
return self.__py_view__.isOn
@on.setter
def on(self, new_value: bool):
self.__py_view__.isOn = new_value
@property
def on_color(self) -> Color:
"""
The color used to tint the appearance of the switch when it is turned on.
:rtype: Color
"""
c = self.__py_view__.onColor
if c is None:
return None
else:
return Color(c)
@on_color.setter
def on_color(self, new_value: Color):
if new_value is None:
self.__py_view__.onColor = None
else:
self.__py_view__.onColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the appearance of the thumb.
:rtype: Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Button(Control):
"""
A control that executes your custom code in response to user interactions.
To add an action, set :data:`~pyto_ui.Control.action`.
For types of buttons, see `Button Type <constants.html#button-type>`_ constants.
"""
def __init__(
self,
type: BUTTON_TYPE = __v__("BUTTON_TYPE_SYSTEM"),
title: str = "",
image: Image.Image = None,
):
if type == "BUTTON_TYPE_SYSTEM":
self.__py_view__ = __PyButton__.newButtonWithType(BUTTON_TYPE_SYSTEM)
else:
self.__py_view__ = __PyButton__.newButtonWithType(type)
self.__py_view__.managedValue = _values.value(self)
self.title = title
self.image = image
@property
def title(self) -> str:
"""
The title of the button
:rtype: str
"""
title = self.__py_view__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
@property
def title_color(self) -> Color:
"""
The color of the title.
:rtype: Color
"""
c = self.__py_view__.titleColor
if c is None:
return None
else:
return Color(c)
@title_color.setter
def title_color(self, new_value: Color):
if new_value is None:
self.__py_view__.titleColor = None
else:
self.__py_view__.titleColor = new_value.__py_color__
@property
def image(self) -> Image.Image:
"""
The image displayed on the button. Can be a ``PIL`` image or an ``UIKit`` symbol image. For more information about symbols, see :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: Image.Image):
if new_value is None:
self.__py_view__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
@property
def font(self) -> Font:
"""
The font to be applied to the text.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
class TextField(Control):
"""
A field to type single line text.
The function passed to :data:`~pyto_ui.Control.action` will be called when the text field changes its text.
"""
def __init__(self, text: str = "", placeholder: str = None):
self.__py_view__ = __PyTextField__.newView()
self.__py_view__.managedValue = _values.value(self)
self.text = text
self.placeholder = placeholder
@property
def border_style(self) -> TEXT_FIELD_BORDER_STYLE:
return self.__py_view__.borderStyle
@border_style.setter
def border_style(self, new_value: TEXT_FIELD_BORDER_STYLE):
self.__py_view__.borderStyle = new_value
@property
def did_begin_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field begins editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field ends editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return _values.globals()[action.identifier]
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def text(self) -> str:
"""
The text contained in the Text Field.
:rtype:
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def placeholder(self) -> str:
"""
A gray text shown when there is no text.
:rtype: str
"""
return str(self.__py_view__.placeholder)
@placeholder.setter
def placeholder(self, new_value: str):
self.__py_view__.placeholder = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data. The text entered by the user will be hidden.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
###################
# MARK: - Functions
###################
def __ui_image_from_pil_image__(image):
if image is None:
return None
with BytesIO() as buffered:
image.save(buffered, format='PNG')
img_str = base64.b64encode(buffered.getvalue())
data = __NSData__.alloc().initWithBase64EncodedString(img_str, options=0)
return UIImage.alloc().initWithData(data)
def __pil_image_from_ui_image__(image):
if image is None:
return None
img_str = str(image.data.base64EncodedStringWithOptions(0))
msg = base64.b64decode(img_str)
with io.BytesIO(msg) as buf:
return Image.open(buf)
def font_family_names() -> List[str]:
"""
Returns all font family names that can be used to initialize a font.
:rtype: List[str]
"""
names = __UIFont__.familyNames
py_names = []
for name in names:
py_names.append(str(name))
return py_names
def image_with_system_name(name: str) -> UIImage:
"""
Returns a system symbol image from given name. The return value is an UIKit ``UIImage`` object, so it can only be used on the ``pyto_ui`` library.
More info about symbols on `Apple's Web Site <https://developer.apple.com/design/resources/>`_ .
:param name: The name of the SF Symbol.
:rtype: UIImage
"""
image = UIImage.systemImageNamed(name, withConfiguration=None)
if image is None:
raise ValueError("The given symbol name is not valid.")
return image
def show_view(view: View, mode: PRESENTATION_MODE):
"""
Presents the given view.
This function doesn't return until the view is closed. You can use another thread to perform background tasks and modify the UI after it's presented.
:param view: The :class:`~pyto_ui.View` object to present.
:param mode: The presentation mode to use. The value will be ignored on a widget. See `Presentation Mode <constants.html#presentation-mode>`_ constants for possible values.
"""
view.__py_view__.presentationMode = mode
try:
ConsoleViewController.showView(
view.__py_view__, onConsoleForPath=threading.current_thread().script_path
)
except AttributeError:
ConsoleViewController.showView(view.__py_view__, onConsoleForPath=None)
while view.__py_view__.isPresented:
sleep(0.2)
|
app.py
|
#! /usr/bin/env python3
from threading import Thread
from flask import Flask, render_template
from selenium import webdriver
from driver import getPath, profile
from rss import pickup
from reddit import pickup_reddit
def banda_server():
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html", articles=pickup())
@app.route("/blogs")
def blog():
return render_template("blog.html")
@app.route("/events")
def events():
return render_template("event.html")
@app.route("/reddit")
def reddit():
return render_template("reddit.html", posts=pickup_reddit())
app.run(threaded=True)
def banda_ui():
ui = webdriver.Firefox(options=profile()[0],
capabilities=profile()[1],
firefox_profile=profile()[2],
executable_path=getPath())
#ui.refresh()
ui.get("http://localhost:5000")
def main():
server_thread = Thread(target=banda_server)
ui_thread = Thread(target=banda_ui)
server_thread.start()
ui_thread.start()
if __name__ == "__main__":
main()
|
service.py
|
'''
Created on Nov 8, 2012
@author: mdickson
'''
### Run Python scripts as a service example (ryrobes.com)
### Usage : python aservice.py install (or / then start, stop, remove)
import os
import sys
from threading import Thread
import win32service
import win32serviceutil
import win32event
from inworldz.util.filesystem import getCurrentUsersAppDataPath
from inworldz.maestro.version import product_name
from inworldz.maestro.MaestroServer import MaestroServer
from inworldz.maestro.MaestroAuth import AUTH_WINDOWS
import inworldz.util.properties as DefaultProperties
class MaestroService(win32serviceutil.ServiceFramework):
_svc_name_ = "InWorldz.Maestro"
_svc_display_name_ = _svc_name_
_svc_description_ = "InWorldz Maestro Management Agent"
_svc_deps = [ "EventLog" ]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# Create an event which we will use to wait on.
# The "service stop" request will set this event.
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
# Register servicemanager.pyd with with our service so eventlog messages
# from our service are logged and rendered in the event viewer)
def SvcStop(self):
# Before we do anything, tell the SCM we are starting the stop process.
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# And set my event.
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
# Starting up
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
if (self.starting() == True):
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# Shutting Down
self.stopping()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def starting(self):
try:
self.appdata = getCurrentUsersAppDataPath()
self.propfile = os.path.join(self.appdata, product_name(), "maestro.config")
self.address = "0.0.0.0"
self.port = 12089
self.props = DefaultProperties.instance()
self.props.loadConfiguration(self.propfile)
self.server = MaestroServer(AUTH_WINDOWS, self.address, self.port, self.propfile)
self.thread = Thread(target = self.server.run, args = ())
self.thread.start()
return True
except Exception, e:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
return False
def stopping(self):
self.server.shutdown()
return True
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(MaestroService)
|
dcom.py
|
"""
Copyright 2021-2021 The jdh99 Authors. All rights reserved.
Device Communication Protocol(DCOM):设备间通信协议。 DCOM协议可用于物联网设备之间RPC通信。
Authors: jdh99 <jdh821@163.com>
"""
from dcompy.rx import *
from dcompy.common import *
import threading
import asyncio
def load(param: LoadParam):
set_load_param(param)
rx_load()
threading.Thread(target=_main_thread).start()
def _main_thread():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.create_task(block_rx_run())
loop.create_task(block_tx_run())
loop.create_task(waitlist_run())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
loop.close()
|
service_spending_logger.py
|
#!/usr/bin/env python3
# Imports
from telegram.ext import Updater, CommandHandler, CallbackContext, Filters
from telegram import ChatAction
import os, sys, threading, logging
import pytz
from functools import wraps
from datetime import datetime
import sql_adapter_spending_logger as sql_adapter
import config
# Logging features
logging.basicConfig(
# for debugging in server, uncomment this line to write log in file
#filename=str(config.logging_file),
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='(%d-%b-%y %H:%M:%S)',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Decorators
def restricted(func):
@wraps(func)
def decorator(update, context, *args, **kwargs):
user_id = update.effective_user.id
if user_id not in config.ALLOWED_USER_ID:
update.message.reply_text(
'This is a private bot.\n'
f'However, if you\'re interested, enter /source to get source code.')
logger.error(f'Unauthorized access. Access denied for {user_id}')
return
return func(update, context, *args, **kwargs)
return decorator
def send_action(action):
def decorator(func):
def command_func(update, context, *args, **kwargs):
context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)
return func(update, context, *args, **kwargs)
return command_func
return decorator
send_typing_action = send_action(ChatAction.TYPING)
# Telegram function
@send_typing_action
def start(update, context):
update.message.reply_text(
'Hi, I am a telegram bot.\n'
'I can help you to track and log your money spending.\n\n'
'/help - show help and use instructions\n'
'/source - get the source from git')
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
@send_typing_action
def source_code(update, context):
update.message.reply_text(
'Developer: akirasy\n\n'
'Code (MIT License):\n'
'https://github.com/akirasy/random-apps.git')
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
@send_typing_action
@restricted
def show_help(update, context):
update.message.reply_text(
'Command summary:\n\n'
'/help - Show this message\n'
'/source - show source code in git\n'
'/admin_reload - reload telegram service\n\n'
'/add {price} {item_name} - add entry to database\n\n'
'/check - current month database entry\n'
'/check {month} - desired month database entry\n\n'
'/sql {sql_command} - execute sql command')
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
@send_typing_action
@restricted
def add_entry(update, context):
if len(update.message.text.split()) < 3:
update.message.reply_text('Wrong syntax given. Please refer /help for more info.')
else:
command, price, item = update.message.text.split(maxsplit=2)
month = datetime.now().strftime('%b%y')
sql_adapter.add_data(month, item, price)
update.message.reply_text(
'These data added:\n'
f'{item}: RM{price}')
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
@send_typing_action
@restricted
def check_entry(update, context):
if len(update.message.text.split()) == 2:
command, month = update.message.text.split()
else:
month = datetime.now().strftime('%b%y')
total_spending, detailed_spending = sql_adapter.get_data(month)
update.message.reply_text(
f'Spending summary for {month}\n'
f' Total: RM{total_spending}\n'
' Detailed spending items:\n'
f'{detailed_spending}')
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
@send_typing_action
@restricted
def sql_command(update, context):
command, sql_input = update.message.text.split(maxsplit=1)
output = sql_adapter.sql_command(sql_input)
update.message.reply_text(output)
logger.info(f'{update.message.from_user.first_name} used command: {update.message.text}')
def init_database(update, context):
month = datetime.now().strftime('%b%y')
sql_adapter.create_table(month)
def new_month(context: CallbackContext):
month = datetime.now().strftime('%b%y')
sql_adapter.create_table(month)
# Main method
def main():
updater = Updater(token=config.BOT_TOKEN, use_context=True)
# Code refresher
def stop_and_restart():
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart_telegram(update, context):
update.message.reply_text('Bot is restarting...')
threading.Thread(target=stop_and_restart).start()
logger.info('Reloading telegram service...')
updater.dispatcher.add_handler(CommandHandler('admin_reload', restart_telegram, filters=Filters.user(config.DEVELOPER_ID)))
updater.dispatcher.add_handler(CommandHandler('start' , start))
updater.dispatcher.add_handler(CommandHandler('source' , source_code))
updater.dispatcher.add_handler(CommandHandler('help' , show_help))
updater.dispatcher.add_handler(CommandHandler('sql' , sql_command))
updater.dispatcher.add_handler(CommandHandler('add' , add_entry))
updater.dispatcher.add_handler(CommandHandler('check' , check_entry))
updater.dispatcher.add_handler(CommandHandler('init_database' , init_database))
tz_kul = pytz.timezone('Asia/Kuala_Lumpur')
job_time = tz_kul.localize(datetime.strptime('00:05','%H:%M'))
updater.job_queue.run_monthly(callback=new_month, day=1, when=job_time)
updater.start_polling()
logger.info('Telegram service started.')
updater.idle()
if __name__ == '__main__':
main()
|
segment_all.py
|
import argparse
import json
import logging
import os
import threading
from os.path import exists, join, split, dirname
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
import torch.utils.data
from torch import nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import dla_up
import data_transforms as transforms
import dataset
from torch.utils.tensorboard import SummaryWriter
# from ptc_dataset import BasicDataset
from dataset_trans import BasicDataset #TODO
from torch.utils.data import DataLoader, random_split
from miou import RunningConfusionMatrix
from Synchronized_BatchNorm_PyTorch.sync_batchnorm import convert_model
try:
from modules import batchnormsync
HAS_BN_SYNC = True
except ImportError:
HAS_BN_SYNC = False
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
global_step = 0 #TODO
CITYSCAPE_PALLETE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False, out_size=False, binary=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.out_size = out_size
self.binary = binary
self.read_lists()
def __getitem__(self, index):
image = Image.open(join(self.data_dir, self.image_list[index]))
data = [image]
if self.label_list is not None:
label_map = Image.open(join(self.data_dir, self.label_list[index]))
if self.binary:
label_map = Image.fromarray(
(np.array(label_map) > 0).astype(np.uint8))
data.append(label_map)
if self.bbox_list is not None:
data.append(Image.open(join(self.data_dir, self.bbox_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
if self.out_size:
data.append(torch.from_numpy(np.array(image.size, dtype=int)))
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
bbox_path = join(self.list_dir, self.phase + '_bboxes.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
if exists(bbox_path):
self.bbox_list = [line.strip() for line in open(bbox_path, 'r')]
assert len(self.image_list) == len(self.bbox_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(join(self.data_dir,
self.label_list[index])))
# data = list(self.transforms(*data))
if len(data) > 1:
out_data = list(self.transforms(*data))
else:
out_data = [self.transforms(*data)]
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, confusion_matrix, print_freq=10, cityscape=False):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
score_1 = AverageMeter()
score_5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():#TODO
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda().float()
# target = target.cuda(async=True)
target = target.cuda().long()
output = model(input)[0]
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# print("model done")
confusion_matrix.update_matrix(target, output)
top_all, top_1, top_5 = confusion_matrix.compute_current_mean_intersection_over_union()
score.update(top_all, input.size(0))
score_1.update(top_1, input.size(0))
score_5.update(top_5, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# mIoU is updated for all samples, no need to update again
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.val:.3f})\t'
'Score_1 {score_1.val:.3f} ({score_1.val:.3f})\t'
'Score_5 {score_5.val:.3f} ({score_5.val:.3f})\t'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score, score_1=score_1, score_5=score_5)) #TODO , flush=True
confusion_matrix.show_classes()
print(' * Score {top1.val:.3f}\tScore {score_1.val:.3f}\tScore {score_5.val:.3f}'.format(top1=score, score_1=score_1, score_5=score_5))
return score.val, losses.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
# correct = correct[target != 255]
correct = correct[target != -1]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
# return score.data[0]
return score.item()
def recall(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
positive_target = target[target == 1]
positive_pred = pred[target == 1]
correct = positive_pred.eq(positive_target)
# correct = correct[target != 255]
# correct = correct[target != -1]
correct = correct.view(-1)
if correct.size(0):
score = correct.float().sum(0).mul(100.0 / correct.size(0))
else:
score = 100.0
return score
# return score.data[0]
return score.item()
def train(train_loader, model, criterion, optimizer, epoch, lr_scheduler, writer,
confusion_matrix, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
scores_1 = AverageMeter()
scores_5 = AverageMeter()
global global_step
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# pdb.set_trace()
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda().float()
# target = target.cuda(async=True)
target = target.cuda().long()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
writer.add_scalar('Loss/train', loss.item(), global_step)
losses.update(loss.item(), input.size(0))
confusion_matrix.update_matrix(target_var, output)
# FIXME
confusion_matrix.update_matrix(target, output)
top_all, top_1, top_5 = confusion_matrix.compute_current_mean_intersection_over_union()
scores.update(top_all, input.size(0))
scores_1.update(top_1, input.size(0))
scores_5.update(top_5, input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
global_step += 1
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})\t'
'Score_1 {scores_1.val:.3f} ({scores_1.avg:.3f})\t'
'Score_5 {scores_5.val:.3f} ({scores_5.avg:.3f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores, scores_1=scores_1, scores_5=scores_5))
confusion_matrix.show_classes()
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parse_args()
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, join(args.checkpoint_dir, 'model_best.pth.tar'))
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
checkpoint_dir = args.checkpoint_dir
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
pretrained_base = args.pretrained_base
# print(dla_up.__dict__.get(args.arch))
single_model = dla_up.__dict__.get(args.arch)(
classes=args.classes, down_ratio=args.down)
single_model = convert_model(single_model)
model = torch.nn.DataParallel(single_model).cuda()
print('model_created')
if args.edge_weight > 0:
weight = torch.from_numpy(
np.array([1, args.edge_weight], dtype=np.float32))
# criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
criterion = nn.NLLLoss2d(ignore_index=-1, weight=weight)
else:
# criterion = nn.NLLLoss2d(ignore_index=255)
criterion = nn.NLLLoss2d(ignore_index=-1)
criterion.cuda()
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.append(transforms.RandomCrop(crop_size)) #TODO
if args.random_color:
t.append(transforms.RandomJitter(0.4, 0.4, 0.4))
t.extend([transforms.RandomHorizontalFlip()]) #TODO
t_val = []
t_val.append(transforms.RandomCrop(crop_size))
dir_img = '/shared/xudongliu/data/argoverse-tracking/argo_track_all/train/image_02/'
dir_mask = '/shared/xudongliu/data/argoverse-tracking/argo_track_all/train/' + args.target + '/'
my_train = BasicDataset(dir_img, dir_mask, transforms.Compose(t), is_train=True)
val_dir_img = '/shared/xudongliu/data/argoverse-tracking/argo_track_all/val/image_02/'
val_dir_mask = '/shared/xudongliu/data/argoverse-tracking/argo_track_all/val/' + args.target + '/'
my_val = BasicDataset(val_dir_img, val_dir_mask, transforms.Compose(t_val), is_train=True)
train_loader = torch.utils.data.DataLoader(
my_train,
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
my_val, batch_size=batch_size, shuffle=False, num_workers=num_workers,pin_memory=True) #TODO batch_size
print("loader created")
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = None #TODO
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
confusion_labels = np.arange(0, 5)
val_confusion_matrix = RunningConfusionMatrix(confusion_labels, ignore_label=-1)
if args.evaluate:
confusion_labels = np.arange(0, 2)
val_confusion_matrix = RunningConfusionMatrix(confusion_labels, ignore_label=-1, reduce=True)
validate(val_loader, model, criterion, confusion_matrix=val_confusion_matrix)
return
writer = SummaryWriter(comment = args.log)
# TODO test val
# print("test val")
# prec1 = validate(val_loader, model, criterion, confusion_matrix=val_confusion_matrix)
for epoch in range(start_epoch, args.epochs):
train_confusion_matrix = RunningConfusionMatrix(confusion_labels, ignore_label=-1)
lr = adjust_learning_rate(args, optimizer, epoch)
print('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, lr_scheduler,
confusion_matrix=train_confusion_matrix, writer=writer)
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict()
}, is_best=False, filename=checkpoint_path)
# evaluate on validation set
val_confusion_matrix = RunningConfusionMatrix(confusion_labels, ignore_label=-1)
prec1, loss_val = validate(val_loader, model, criterion, confusion_matrix=val_confusion_matrix)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
writer.add_scalar('mIoU/epoch', prec1, epoch+1)
writer.add_scalar('loss/epoch', loss_val, epoch+1)
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_freq == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10
every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def crop_image(image, size):
left = (image.size[0] - size[0]) // 2
upper = (image.size[1] - size[1]) // 2
right = left + size[0]
lower = upper + size[1]
return image.crop((left, upper, right, lower))
def save_output_images(predictions, filenames, output_dir, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_prob_images(prob, filenames, output_dir, sizes=None):
for ind in range(len(filenames)):
im = Image.fromarray(
(prob[ind][1].squeeze().data.cpu().numpy() * 255).astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name, size) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
prob = torch.exp(final)
if save_vis:
save_output_images(pred, name, output_dir, size)
if prob.size(1) == 2:
save_prob_images(prob, name, output_dir + '_prob', size)
else:
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
print('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
print('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
ious = per_class_iu(hist) * 100
print(' '.join('{:.03f}'.format(i) for i in ious))
if has_gt: # val
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = dla_up.__dict__.get(args.arch)(
args.classes, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
# data_dir = args.data_dir
# info = dataset.load_dataset_info(data_dir)
# normalize = transforms.Normalize(mean=info.mean, std=info.std)
# scales = [0.5, 0.75, 1.25, 1.5, 1.75]
# scales = [0.5, 0.75, 1.25, 1.5]
# t = []
# if args.crop_size > 0:
# t.append(transforms.PadToSize(args.crop_size))
# t.extend([transforms.ToTensor(), normalize])
# if args.ms:
# data = SegListMS(data_dir, phase, transforms.Compose(t), scales)
# else:
# data = SegList(data_dir, phase, transforms.Compose(t),
# out_name=True, out_size=True,
# binary=args.classes == 2)
test_loader = torch.utils.data.DataLoader(
data,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
# print("=> loaded checkpoint '{}' (epoch {})"
# .format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
print('mAP: ', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(
description='DLA Segmentation and Boundary Prediction')
parser.add_argument('cmd', choices=['train', 'test'])
# parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--train-samples', default=16000, type=int)
parser.add_argument('--loss', default='l1', type=str)
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='- seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging '
'training status')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-base', default=None,
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--down', default=2, type=int, choices=[2, 4, 8, 16],
help='Downsampling ratio of IDA network output, which '
'is then upsampled to the original resolution '
'with bilinear interpolation.')
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--lr-mode', default='step')
parser.add_argument('--bn-sync', action='store_true', default=False)
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--random-color', action='store_true', default=False)
parser.add_argument('--save-freq', default=10, type=int)
parser.add_argument('--ms', action='store_true', default=False)
parser.add_argument('--edge-weight', type=int, default=-1)
parser.add_argument('--test-suffix', default='')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-i', '--checkpoint-dir')
parser.add_argument('--log')
parser.add_argument('--target')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# assert args.data_dir is not None
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
return args
def main():
args = parse_args()
if not exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.bn_sync:
if HAS_BN_SYNC:
dla_up.set_bn(batchnormsync.BatchNormSync)
else:
print('batch normalization synchronization across GPUs '
'is not imported.')
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
# validate(val_loader, model, criterion, eval_score=None, print_freq=10)
def my_val():
args = parse_args()
val_dir_img = '/shared/xudongliu/data/argoverse-tracking/argo_track/val/image_02/'
val_dir_mask = '/shared/xudongliu/data/argoverse-tracking/argo_track/val/npy_mask/'
my_val = BasicDataset(val_dir_img, val_dir_mask, None, is_train=False)
val_loader = torch.utils.data.DataLoader(
# SegList(data_dir, 'val', transforms.Compose([
# transforms.RandomCrop(crop_size),
# # transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ]),
# binary=(args.classes == 2)),
my_val, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,pin_memory=True)
single_model = dla_up.__dict__.get(args.arch)(
args.classes, down_ratio=args.down)
checkpoint = torch.load(args.resume)
# print(checkpoint['epoch'])
# model.load_state_dict(checkpoint['state_dict'])
# single_model.load_state_dict(checkpoint)
model = torch.nn.DataParallel(single_model).cuda()
model.load_state_dict(checkpoint['state_dict']) #TODO
criterion = nn.NLLLoss2d(ignore_index=-1)
score = validate(val_loader, model, criterion, eval_score=mIOU, print_freq=10, cityscape=True)
print(score)
if __name__ == '__main__':
main()
# my_val()
|
translate.py
|
import argparse
import hashlib
import itertools
import multiprocessing
import os
import subprocess
import sys
import time
import traceback
from collections import OrderedDict, deque
def count_lines(f):
i = 0
if not os.path.exists(f):
return i
with open(f) as r:
for _ in r:
i += 1
return i
def worker(queue, rqueue, entry, model, N, device, extra, quiet):
env = dict(os.environ) # Make a copy of the current environment
if device != "cpu" and not device.startswith("gpu"):
device = "gpu%s" % device
if isinstance(extra, (list, tuple)):
extra = " ".join(extra)
env['THEANO_FLAGS'] = 'device={}'.format(device)
# import theano.sandbox.cuda
# theano.sandbox.cuda.use(gpu)
cmd = 'python -u {entry} translate --model {model} {extra}'.format(
entry=entry,
model=model,
extra=extra)
if not quiet:
sys.stderr.write("translate cmd: {}\n".format(cmd))
p = subprocess.Popen(cmd.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
env=env)
while True:
i, line = queue.get()
if i < 0: # end of queue
break
p.stdin.write("%s\n" % line.strip())
out = []
for _ in xrange(N):
out.append(p.stdout.readline())
rqueue.put((i, out))
def translate(model, signature, pending, done, src2out, N, devices, entry, extra, quiet, write_stdout):
n_pending = map(count_lines, pending)
n_done = sum(map(count_lines, done)) if done else 0
n_total = sum(n_pending) + n_done
if sum(n_pending) == 0:
return
out_dir = os.path.dirname(src2out.values()[0])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if pending:
tic = time.time()
fds = [open(f) for f in pending]
reader = itertools.chain(*fds)
queue = multiprocessing.Queue()
rqueue = multiprocessing.Queue()
ps = [multiprocessing.Process(target=worker, args=(queue, rqueue, entry, model, N, device, extra, quiet))
for
device in
devices]
try:
for p in ps:
p.daemon = True
p.start()
# feed
for i, line in enumerate(reader):
queue.put((i, line))
for _ in ps:
queue.put((-1, None)) # end of queue
# printing
hist = deque(maxlen=5) # average over 5 past records
# consume to prevent holding all translations in memory
buffer = []
i = 0
j = 0
writer = None
while i < len(pending):
time.sleep(1)
# check exitcode and only get() when queue.qsize()>0 to prevent dead lock when error encountered in subprocess
if any([p.exitcode > 0 for p in ps]): # error in subprocess
sys.stderr.write("Error occurs in worker")
raise RuntimeError()
added = False
while rqueue.qsize() > 0:
buffer.append(rqueue.get())
added = True
if added:
buffer = sorted(buffer, key=lambda x: x[0])
while buffer and buffer[0][0] == sum(n_pending[:i]) + j:
idx, out_lines = buffer[0]
if writer is None:
if write_stdout:
writer = sys.stdout
else:
writer = open(src2out[pending[i]], 'w')
writer.write(''.join(out_lines))
j += 1
if not j < n_pending[i]:
if not write_stdout and writer is not None:
writer.close()
writer = None
i += 1
j = 0
buffer = buffer[1:] # remove processed output
if not quiet:
n1 = n_done + sum(n_pending[:i]) + j + rqueue.qsize() + len(buffer)
hist.append(n1)
rate = 0.0
if len(hist) > 1:
rate = (hist[-1] - hist[0] + 0.0) / len(hist)
toc = time.time()
sys.stderr.write(
'\r{}/{}, {:.2f} s/sec, {}'.format(n1, n_total, rate,
time.strftime('%H:%M:%S', time.gmtime(toc - tic))))
if not quiet:
sys.stderr.write('\n')
except KeyboardInterrupt:
traceback.print_exc()
for p in ps:
p.terminate()
p.join()
sys.exit(1)
except Exception as e:
traceback.print_exc()
for p in ps:
p.terminate()
p.join()
sys.exit(1)
def main(args):
model = args.model
entry = args.entry
inputs = args.inputs
N = args.N
tag = args.tag
gpu = args.gpu
quiet = args.quiet
remains = args.remains
write_stdout = args.stdout
force = args.force
extra = " ".join(remains)
if N is None:
N = 1
if gpu:
sp = [item.split(':') for item in gpu]
sp = [item if len(item) == 2 else [item[0], '1'] for item in sp]
devices = list(itertools.chain(*[itertools.repeat(id, int(n_task)) for id, n_task in sp]))
else:
devices = ['cpu']
signature = hashlib.md5(open(model, 'rb').read()).hexdigest()
if tag:
signature = '%s-%s' % (signature, tag)
if args.signature:
print signature
sys.exit(0)
src_signature = [hashlib.md5(open(f, 'rb').read()).hexdigest() for f in inputs]
# skip translated
src2out = OrderedDict()
for s, s_sign in itertools.izip(inputs, src_signature):
output = os.path.join('out', 'translations',
'{}-{}-{}-{}'.format(os.path.basename(model), os.path.basename(s), signature, s_sign))
src2out[s] = output
if args.list_outputs:
for output in src2out.itervalues():
print output
sys.exit(0)
pending = []
done = []
if force:
pending = inputs
else:
for s, o in src2out.iteritems():
if os.path.exists(o) and count_lines(s) == count_lines(o):
# skip translated
done.append(s)
else:
pending.append(s)
if not quiet:
for f in done:
sys.stderr.write('skip {}\n'.format(f))
translate(model, signature, pending, done, src2out, N, devices, entry, extra, quiet, write_stdout)
def valid_file(parser, arg):
if arg and not os.path.exists(arg):
parser.error('The file doesn\'t exist: {}'.format(arg))
else:
return arg
def parse_args():
parser = argparse.ArgumentParser()
file_type = lambda arg: valid_file(parser, arg)
parser.add_argument('model')
parser.add_argument('--entry', '-e', default='rnnsearch.py')
parser.add_argument('--inputs', '-i', type=file_type, nargs='+')
parser.add_argument('--N', type=int, help='the invoked translator returns N lines for each input')
parser.add_argument('--sign', action='store_true', dest='signature', help='print signature and exit')
parser.add_argument('--list-outputs', action='store_true',
help='list output names in correspondence with given model and input files, then exit')
parser.add_argument('--tag', '-t', type=str)
parser.add_argument('--gpu', '-g', nargs='+', type=str, help='e.g. --gpu 0:3 1:2 or --gpu 0 1')
parser.add_argument('--quiet', '-q', action="store_true", help='suppress procedural printings')
parser.add_argument('--stdout', action="store_true",
help='write to stdout instead of files, if True, suppress all irrelevant stdout')
parser.add_argument('--force', '-f', action="store_true", help='force to translate all input files')
args, remains = parser.parse_known_args()
args.remains = remains
if not args.signature:
valid_file(parser, args.entry)
if not args.inputs:
raise argparse.ArgumentError(args.src, 'argument is required')
if args.N and args.N < 1:
raise argparse.ArgumentError(args.N, 'argument should be a positive integer')
return args
if __name__ == '__main__':
args = parse_args()
main(args)
|
server.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import asyncio
import functools
import inspect
import logging
import os
import threading
import time
from concurrent import futures
import grpc
from grpc import _common, _server
from grpc._cython.cygrpc import StatusCode
from grpc._server import _serialize_response, _status, _abort, _Context, _unary_request, \
_select_thread_pool_for_behavior, _unary_response_in_pool
from notification_service.event_storage import DbEventStorage
from notification_service.high_availability import DbHighAvailabilityStorage, SimpleNotificationServerHaManager
from notification_service.proto import notification_service_pb2_grpc
from notification_service.service import NotificationService, HighAvailableNotificationService
from notification_service.server_config import NotificationServerConfig
from notification_service.util.utils import get_ip_addr
_PORT = 50051
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class NotificationServer(object):
"""
Block/Async server of Notification function service.
"""
def __init__(self, service, port=_PORT):
self.executor = Executor(futures.ThreadPoolExecutor(max_workers=10))
self.grpc_server = grpc.server(self.executor)
self.service = service
notification_service_pb2_grpc.add_NotificationServiceServicer_to_server(service,
self.grpc_server)
self.grpc_server.add_insecure_port('[::]:' + str(port))
def run(self, is_block=False):
"""
start the notification service
:param is_block: is block mode
:return:
"""
self.service.start()
self.grpc_server.start()
logging.info('Notification server started.')
if is_block:
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
self.stop()
else:
pass
def stop(self):
"""
stop the notification service
:return:
"""
self.executor.shutdown()
self.service.stop()
self.grpc_server.stop(0)
logging.info('Notification server stopped.')
class NotificationServerRunner(object):
def __init__(self, config_file):
if not os.path.exists(config_file):
raise IOError('Config file {} not exist!'.format(config_file))
self.config = NotificationServerConfig(config_file=config_file)
def _init_server(self):
if self.config.db_uri:
self.storage = DbEventStorage(self.config.db_uri)
else:
raise Exception('Failed to start notification service without database connection info.')
if self.config.enable_ha:
server_uri = self.config.advertised_uri \
if self.config.advertised_uri is not None else get_ip_addr() + ':' + str(self.config.port)
ha_storage = DbHighAvailabilityStorage(db_conn=self.config.db_uri)
ha_manager = SimpleNotificationServerHaManager()
service = HighAvailableNotificationService(
self.storage,
ha_manager,
server_uri,
ha_storage,
5000)
self.server = NotificationServer(service=service,
port=int(self.config.port))
else:
self.server = NotificationServer(service=NotificationService(self.storage),
port=int(self.config.port))
def start(self, is_block=False):
self._init_server()
self.server.run(is_block)
def stop(self):
self.server.stop()
def _loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
if not loop.is_running() or loop.is_closed():
loop.run_forever()
pending = asyncio.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class Executor(futures.Executor):
def __init__(self, thread_pool, loop=None):
super().__init__()
self._shutdown = False
self._thread_pool = thread_pool
self._loop = loop or asyncio.get_event_loop()
if not self._loop.is_running() or self._loop.is_closed():
self._thread = threading.Thread(target=_loop, args=(self._loop,), daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown.')
if not self._loop.is_running():
raise RuntimeError('Loop must be started before any function could be submitted.')
if inspect.iscoroutinefunction(fn):
coroutine = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coroutine, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(self._thread_pool, func)
def shutdown(self, wait=True):
self._shutdown = True
if wait:
self._thread_pool.shutdown()
async def _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e:
with state.condition:
if e not in state.rpc_errors:
logging.exception(e)
_abort(state, rpc_event.operation_call, StatusCode.unknown, _common.encode(e))
return None, False
async def _unary_response_in_pool_async(rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state, method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary, default_thread_pool)
if asyncio.iscoroutinefunction(method_handler.unary_unary):
return thread_pool.submit(_unary_response_in_pool_async, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
else:
return thread_pool.submit(_unary_response_in_pool, rpc_event, state, method_handler.unary_unary, unary_request,
method_handler.request_deserializer, method_handler.response_serializer)
_server._handle_unary_unary = _handle_unary_unary
|
progress.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, ParaTools, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# (3) Neither the name of ParaTools, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Draw progress indicators on the console.
Show bars or spinners, possibly with instantaneous CPU load average.
"""
import os
import sys
import threading
import itertools
from datetime import datetime, timedelta
from taucmdr import logger
from taucmdr.error import ConfigurationError
LOGGER = logger.get_logger(__name__)
def _read_proc_stat_cpu():
with open('/proc/stat') as fin:
cpu_line = fin.readline()
values = (float(x) for x in cpu_line.split()[1:])
fields = 'user', 'nice', 'sys', 'idle', 'iowait', 'irq', 'sirq'
return dict(zip(fields, values))
def _proc_stat_cpu_load_average():
if not hasattr(_proc_stat_cpu_load_average, 'prev'):
_proc_stat_cpu_load_average.prev = _read_proc_stat_cpu()
prev = _proc_stat_cpu_load_average.prev
cur = _read_proc_stat_cpu()
if prev and cur:
prev_idle = prev['idle'] + prev['iowait']
cur_idle = cur['idle'] + cur['iowait']
prev_total = sum(prev.itervalues())
cur_total = sum(cur.itervalues())
diff_total = cur_total - prev_total
diff_idle = cur_idle - prev_idle
_proc_stat_cpu_load_average.prev = cur
if diff_total:
return (diff_total - diff_idle) / diff_total
return 0.0
def load_average():
"""Calculate the CPU load average.
Returns:
float: Load average since last time this routine was called
or None if couldn't calculate load average.
"""
try:
cpu_load_avg = _proc_stat_cpu_load_average()
except IOError:
cpu_load_avg = None
return cpu_load_avg
class ProgressIndicator(object):
"""A fancy progress indicator to entertain antsy users."""
_spinner = itertools.cycle(['-', '\\', '|', '/'])
_indent = ' '
def __init__(self, label, total_size=0, block_size=1, show_cpu=True, auto_refresh=0.25):
mode = os.environ.get('__TAUCMDR_PROGRESS_BARS__', 'full').lower()
if mode not in ('full', 'disabled'):
raise ConfigurationError('Invalid value for __TAUCMDR_PROGRESS_BARS__ environment variable: %s' % mode)
self.label = label
self.count = 0
self.total_size = total_size
self.block_size = block_size
self.show_cpu = show_cpu if load_average() is not None else False
self.auto_refresh = auto_refresh if mode != 'disabled' else 0
self._mode = mode
self._line_remaining = 0
self._phases = []
self._phase_count = 0
self._phase_depth = 0
self._phase_base = 0
self._thread = None
self._exiting = None
self._updating = None
def _thread_progress(self):
while not self._exiting.wait(self.auto_refresh):
self._updating.acquire()
self.update()
self._updating.notify()
self._updating.release()
def __enter__(self):
self.push_phase(self.label)
return self
def __exit__(self, unused_exc_type, unused_exc_value, unused_traceback):
self.complete()
return False
def _line_reset(self):
sys.stdout.write('\r')
sys.stdout.write(logger.COLORED_LINE_MARKER)
self._line_remaining = logger.LINE_WIDTH
def _line_append(self, text):
from taucmdr import util
sys.stdout.write(text)
self._line_remaining -= len(util.uncolor_text(text))
def _line_flush(self, newline=False):
self._line_append(' '*self._line_remaining)
if newline:
sys.stdout.write('\n')
sys.stdout.flush()
assert self._line_remaining == 0, str(self._line_remaining)
def _draw_bar(self, percent, width, char, *args, **kwargs):
from taucmdr import util
bar_on = max(int(percent*width), 1)
bar_off = width - bar_on
self._line_append(util.color_text(char*bar_on, *args, **kwargs))
self._line_append(' '*bar_off)
def _draw_phase_labels(self):
start = self._phase_base
printed_phases = self._phases[:start]
for i, (label, timestamp, implicit) in enumerate(self._phases[start:-1], start):
if label is not None:
if self._phases[i+1][0] is not None:
self._line_reset()
self._line_append("%s:" % label)
self._line_flush(newline=True)
printed_phases.append((label, timestamp, implicit))
else:
label, tstart, _ = printed_phases.pop()
tdelta = (timestamp - tstart).total_seconds()
self._line_reset()
self._line_append("%s [%0.3f seconds]" % (label, tdelta))
self._line_flush(newline=True)
label, timestamp, implicit = self._phases[-1]
if label is not None:
printed_phases.append((label, timestamp, implicit))
else:
label, tstart, _ = printed_phases.pop()
tdelta = (timestamp - tstart).total_seconds()
self._line_reset()
self._line_append("%s [%0.3f seconds]" % (label, tdelta))
self._line_flush(newline=True)
self._phases = printed_phases
self._phase_depth = len(printed_phases)
self._phase_base = max(self._phase_base, self._phase_depth-1)
def push_phase(self, label, implicit=False):
if self.auto_refresh:
if self._thread is None:
self._thread = threading.Thread(target=self._thread_progress)
self._exiting = threading.Event()
self._updating = threading.Condition()
self._thread.daemon = True
self._thread.start()
self._updating.acquire()
try:
top_phase = self._phases[-1]
except IndexError:
new_phase = True
else:
new_phase = top_phase[0] is not None and top_phase[0].strip() != label
if top_phase[2]:
self.pop_phase()
if new_phase:
label = (self._phase_depth*self._indent) + label
self._phases.append((label, datetime.now(), implicit))
if self.auto_refresh:
self._updating.wait()
self._updating.release()
else:
self.update()
def pop_phase(self):
if self.auto_refresh:
self._updating.acquire()
if self._phases:
self._phases.append((None, datetime.now(), None))
if self.auto_refresh:
self._updating.wait()
self._updating.release()
else:
self.update()
def phase(self, label):
self.push_phase(label, True)
def increment(self, count=1):
self.count += count
def update(self, count=None, block_size=None, total_size=None):
"""Show progress.
Updates `block_size` or `total_size` if given for compatibility with :any:`urllib.urlretrieve`.
Args:
count (int): Number of blocks of `block_size` that have been completed.
block_size (int): Size of a work block.
total_size (int): Total amount of work to be completed.
"""
if count is not None:
self.count = count
if block_size is not None:
self.block_size = block_size
if total_size is not None:
self.total_size = total_size
if self.auto_refresh:
if threading.current_thread() is not self._thread:
if not self._phases:
self.push_phase(self.label)
return
else:
if not self._phases:
self.push_phase(self.label)
return
if self._phase_depth != len(self._phases):
self._draw_phase_labels()
if not self._phases:
return
label, tstart, _ = self._phases[-1]
tdelta = (datetime.now() - tstart).total_seconds()
self._line_reset()
if label == "":
self._line_append("%0.1f seconds %s" % (tdelta, self._spinner.next()))
else:
self._line_append("%s: %0.1f seconds %s" % (label, tdelta, self._spinner.next()))
show_bar = self.total_size > 0
if self.show_cpu and self._line_remaining > 40:
cpu_load = min(load_average(), 1.0)
self._line_append("[CPU: %0.1f " % (100*cpu_load))
width = (self._line_remaining/4) if show_bar else (self._line_remaining-2)
self._draw_bar(cpu_load, width, '|', 'white', 'on_white')
self._line_append("]")
if show_bar and self._line_remaining > 20:
self._line_append(" ")
completed = float(self.count*self.block_size)
percent = max(min(completed / self.total_size, 1.0), 0.0)
self._line_append("[%0.1f%% " % (100*percent))
if completed == 0:
eta = '(unknown)'
else:
time_remaining = (tdelta / completed) * (self.total_size - completed)
eta = datetime.now() + timedelta(seconds=time_remaining)
eta = '%s-%s-%s %02d:%02d' % (eta.year, eta.month, eta.day, eta.hour, eta.minute)
width = self._line_remaining - 4 - len(eta)
self._draw_bar(percent, width, '>', 'green', 'on_green')
self._line_append("] %s" % eta)
self._line_flush()
def complete(self):
active = len(self._phases)
for _ in xrange(active):
self.pop_phase()
if self.auto_refresh:
self._exiting.set()
self._thread.join()
else:
self.update()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
PR_PAID, PR_FAILED, maybe_extract_bolt11_invoice)
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import PasswordDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = req['status']
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment was sent'))
self._trigger_update_history()
elif status == PR_FAILED:
self.show_info(_('Payment failed'))
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key, is_lightning=is_lightning)
self.request_popup.set_status(request['status'])
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
status = invoice['status']
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.set_status(status)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.network.register_callback(self.on_channels, ['channels_updated'])
self.network.register_callback(self.on_channel, ['channel'])
self.network.register_callback(self.on_invoice_status, ['invoice_status'])
self.network.register_callback(self.on_request_status, ['request_status'])
self.network.register_callback(self.on_channel_db, ['channel_db'])
self.network.register_callback(self.set_num_peers, ['gossip_peers'])
self.network.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
def on_success(x):
# save pin_code so that we can create backups
self.password = x
self.load_wallet(wallet)
self.password_dialog(
check_password=wallet.check_password,
on_success=on_success,
on_failure=self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path)
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
if storage.is_encrypted():
if not storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
def on_password(pw):
self.password = pw
storage.decrypt(pw)
self._on_decrypted_storage(storage)
self.password_dialog(
check_password=storage.check_password,
on_success=on_password,
on_failure=self.stop)
return
self._on_decrypted_storage(storage)
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if not self.wallet.has_lightning():
self.show_error('Lightning not enabled on this wallet')
return
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 5*60:
self.password_dialog(check_password=self.check_pin_code, on_success=None, on_failure=self.stop, is_password=False)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
on_success = lambda pw: f(*(args + (self.password,)))
self.password_dialog(
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None,
is_password=False)
else:
f(*(args + (self.password,)))
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter PIN code to display your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def password_dialog(self, **kwargs):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, **kwargs)
self._password_dialog.open()
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
self.password_dialog(
check_password = self.wallet.check_password,
on_success=on_success, on_failure=on_failure,
is_change=True, is_password=True,
has_password=self.wallet.has_password())
def change_pin_code(self, cb):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
self._password_dialog.init(
self, check_password=self.check_pin_code,
on_success=on_success, on_failure=on_failure,
is_change=True, is_password=False,
has_password = self.has_pin_code())
self._password_dialog.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import pjit, pjit_p, with_sharding_constraint, SpecSync
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
@unittest.skipIf(jax._src.lib.version < (0, 1, 72), "Needs jaxlib 0.1.72+")
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
self.assertIn(pjit_p, xla.call_translations)
rule = xla.call_translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla.call_translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla.call_translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithAbstractArgs(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4, in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = np.random.default_rng(1)
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.integers(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
run_all.py
|
import logging.config
import multiprocessing
import runpy
# create these targets so we can get structural and functional data in parallel
def get_content_store_data():
runpy.run_module('src.data_preprocessing.get_content_store_data', run_name='__main__')
def make_functional_edges_and_weights():
runpy.run_module('src.data_preprocessing.make_functional_edges_and_weights', run_name='__main__')
if __name__ == '__main__':
logging.config.fileConfig('src/logging.conf')
module_logger = logging.getLogger('run_all')
functional_edges_and_weights = multiprocessing.Process(
name='make_functional_edges_and_weights', target=make_functional_edges_and_weights)
content_store_data = multiprocessing.Process(name='get_content_store_data', target=get_content_store_data)
functional_edges_and_weights.start()
module_logger.info('kicked off make_functional_edges_and_weights (in parallel)')
content_store_data.start()
module_logger.info('kicked off get_content_store_data (in parallel)')
functional_edges_and_weights.join()
module_logger.info('make_functional_edges_and_weights is finished')
content_store_data.join()
module_logger.info('get_content_store_data is finished')
module_logger.info('running make_network')
runpy.run_module('src.features.make_network', run_name='__main__')
module_logger.info('make_network is finished')
module_logger.info('running train_node2vec_model')
runpy.run_module('src.models.train_node2vec_model', run_name='__main__')
module_logger.info('train_node2vec_model is finished')
module_logger.info('running predict_related_links')
runpy.run_module('src.models.predict_related_links', run_name='__main__')
module_logger.info('predict_related_links is finished')
module_logger.info('everything has run')
|
infunction.py
|
import random
import time
import boto3
from multiprocessing import Process, Pipe
def parallel_handler(event, context):
startTime = GetTime()
if 'n' in event:
times = event['n']
parallelIndex = event['parallelIndex']
temp = alu(times,parallelIndex)
return{
'result': temp,
'times': times,
'execTime': GetTime() - startTime
}
else:
return{
'error': "No n in event"
}
def GetTime():
return int(round(time.time() * 1000))
def alu(times, parallelIndex):
per_times = int(times / parallelIndex)
threads = []
childConns = []
parentConns = []
for i in range(parallelIndex):
parentConn, childConn = Pipe()
parentConns.append(parentConn)
childConns.append(childConn)
t = Process(target=singleAlu, args=(per_times, childConn, i))
threads.append(t)
for i in range(parallelIndex):
threads[i].start()
for i in range(parallelIndex):
threads[i].join()
results = []
for i in range(parallelIndex):
results.append(parentConns[i].recv())
return str(results)
def singleAlu(times, childConn, clientId):
a = random.randint(10, 100)
b = random.randint(10, 100)
temp = 0
for i in range(times):
if i % 4 == 0:
temp = a + b
elif i % 4 == 1:
temp = a - b
elif i % 4 == 2:
temp = a * b
else:
temp = a / b
print(times)
childConn.send(temp)
childConn.close()
return temp
|
test_interrupt.py
|
import os
import signal
import time
from threading import Thread
import pytest
from dagster import (
DagsterEventType,
Field,
ModeDefinition,
String,
execute_pipeline_iterator,
pipeline,
reconstructable,
resource,
seven,
solid,
)
from dagster.core.errors import DagsterExecutionInterruptedError
from dagster.core.test_utils import instance_for_test_tempdir
from dagster.utils import (
check_received_delayed_interrupt,
delay_interrupts,
raise_delayed_interrupts,
raise_interrupts_immediately,
safe_tempfile_path,
send_interrupt,
)
def _send_kbd_int(temp_files):
while not all([os.path.exists(temp_file) for temp_file in temp_files]):
time.sleep(0.1)
send_interrupt()
@solid(config_schema={"tempfile": Field(String)})
def write_a_file(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while (time.time() - start_time) < 30:
time.sleep(0.1)
raise Exception("Timed out")
@solid
def should_not_start(_context):
assert False
@pipeline
def write_files_pipeline():
write_a_file.alias("write_1")()
write_a_file.alias("write_2")()
write_a_file.alias("write_3")()
write_a_file.alias("write_4")()
should_not_start.alias("x_should_not_start")()
should_not_start.alias("y_should_not_start")()
should_not_start.alias("z_should_not_start")()
def test_single_proc_interrupt():
@pipeline
def write_a_file_pipeline():
write_a_file()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={"solids": {"write_a_file": {"config": {"tempfile": success_tempfile}}}},
):
results.append(result.event_type)
assert False # should never reach
except KeyboardInterrupt:
pass
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_multiproc():
with seven.TemporaryDirectory() as tempdir:
with instance_for_test_tempdir(tempdir) as instance:
file_1 = os.path.join(tempdir, "file_1")
file_2 = os.path.join(tempdir, "file_2")
file_3 = os.path.join(tempdir, "file_3")
file_4 = os.path.join(tempdir, "file_4")
# launch a thread that waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([file_1, file_2, file_3, file_4],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
reconstructable(write_files_pipeline),
run_config={
"solids": {
"write_1": {"config": {"tempfile": file_1}},
"write_2": {"config": {"tempfile": file_2}},
"write_3": {"config": {"tempfile": file_3}},
"write_4": {"config": {"tempfile": file_4}},
},
"execution": {"multiprocess": {"config": {"max_concurrent": 4}}},
"intermediate_storage": {"filesystem": {}},
},
instance=instance,
):
results.append(result)
assert False # should never reach
except (DagsterExecutionInterruptedError, KeyboardInterrupt):
pass
assert [result.event_type for result in results].count(
DagsterEventType.STEP_FAILURE
) == 4
assert DagsterEventType.PIPELINE_FAILURE in [result.event_type for result in results]
def test_interrupt_resource_teardown():
called = []
cleaned = []
@resource
def resource_a(_):
try:
called.append("A")
yield "A"
finally:
cleaned.append("A")
@solid(config_schema={"tempfile": Field(String)}, required_resource_keys={"a"})
def write_a_file_resource_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
while True:
time.sleep(0.1)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def write_a_file_pipeline():
write_a_file_resource_solid()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={
"solids": {
"write_a_file_resource_solid": {"config": {"tempfile": success_tempfile}}
}
},
):
results.append(result.event_type)
assert False # should never reach
except KeyboardInterrupt:
pass
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
assert "A" in cleaned
def _send_interrupt_to_self():
os.kill(os.getpid(), signal.SIGINT)
start_time = time.time()
while not check_received_delayed_interrupt():
time.sleep(1)
if time.time() - start_time > 15:
raise Exception("Timed out waiting for interrupt to be received")
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_delay_interrupt():
outer_interrupt = False
inner_interrupt = False
try:
with delay_interrupts():
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
inner_interrupt = True
except KeyboardInterrupt:
outer_interrupt = True
assert outer_interrupt
assert not inner_interrupt
# Verify standard interrupt handler is restored
standard_interrupt = False
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
standard_interrupt = True
assert standard_interrupt
outer_interrupt = False
inner_interrupt = False
# No exception if no signal thrown
try:
with delay_interrupts():
try:
time.sleep(5)
except KeyboardInterrupt:
inner_interrupt = True
except KeyboardInterrupt:
outer_interrupt = True
assert not outer_interrupt
assert not inner_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_raise_interrupts_immediately_no_op():
with raise_interrupts_immediately():
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
standard_interrupt = True
assert standard_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_inside_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with delay_interrupts():
with raise_interrupts_immediately():
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
interrupt_inside_nested_raise = True
except KeyboardInterrupt:
interrupt_after_delay = True
assert interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_after_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with delay_interrupts():
with raise_interrupts_immediately():
try:
time.sleep(5)
except KeyboardInterrupt:
interrupt_inside_nested_raise = True
_send_interrupt_to_self()
except KeyboardInterrupt:
interrupt_after_delay = True
assert not interrupt_inside_nested_raise
assert interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_raise_delayed_interrupts():
interrupt_from_check = False
interrupt_after_delay = False
try:
with delay_interrupts():
_send_interrupt_to_self()
try:
raise_delayed_interrupts()
except KeyboardInterrupt:
interrupt_from_check = True
except KeyboardInterrupt:
interrupt_after_delay = True
assert interrupt_from_check
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_calling_raise_interrupts_immediately_also_raises_any_delayed_interrupts():
interrupt_from_raise_interrupts_immediately = False
interrupt_after_delay = False
try:
with delay_interrupts():
_send_interrupt_to_self()
try:
with raise_interrupts_immediately():
pass
except KeyboardInterrupt:
interrupt_from_raise_interrupts_immediately = True
except KeyboardInterrupt:
interrupt_after_delay = True
assert interrupt_from_raise_interrupts_immediately
assert not interrupt_after_delay
|
streaming.py
|
"""Sender--Receiver pairs for socket communication."""
from __future__ import absolute_import, division, print_function
from builtins import str
from builtins import object
import sys
import queue
import threading
import time
import socket
from socketserver import TCPServer, BaseRequestHandler
from .tools import QuitWithResources
class Sender(object):
"""Generic sender class.
The sender is a server that runs asynchronously and sends any data
that is passed to its send function. This never closes. I'm expecting the
program terminates with a Ctrl-C, as often happens in long trainings.
"""
QUEUE_SIZE = 20
def __init__(self, msg_length, port, wait=False):
"""Initialize.
:param msg_length: the fixed length of messages (bytes).
:param port: (int) a port to use for incoming requests.
:param wait: if False, a send() returns immediately; if True,
send() waits if there are too many messages still to be sent.
"""
# Store
self.MSG_LENGTH = msg_length
self._port = port
# Create connection
self.server = Sender.OneRequestTCPServer(
("0.0.0.0", port), Sender.RequestHandler)
# Data to send
self._data_queue = queue.Queue(self.QUEUE_SIZE if wait else 0)
self.server._data_queue = self._data_queue
def start(self):
"""Start sending messages on queue."""
# Finally close
def close():
self.server.server_close()
print("\nSender closed")
QuitWithResources.add("Sender:" + str(self._port), close)
thread = threading.Thread(target=self.server.serve_forever)
thread.daemon = True
thread.start()
while not self.server.is_serving:
time.sleep(0.1)
def send(self, data):
"""Send data asynchronously.
:param data: binary data
:return: True if the data was correctly pushed to the sending queue
"""
# Checks
if not isinstance(data, bytes):
raise TypeError("Can only send bytes")
if len(data) != self.MSG_LENGTH:
raise ValueError("Message with the wrong length")
if not self.server.is_serving:
return False
# Send
self._data_queue.put(data, block=True)
return True
class OneRequestTCPServer(TCPServer):
"""Restrict to only one connection."""
request_queue_size = 1
allow_reuse_address = True
is_serving = False
timeout = None
def handle_error(self, request, client_address):
"""Stop the server on broken connection."""
print("Broken connection", file=sys.stderr)
self.server_close()
self.is_serving = False
def serve_forever(self):
"""Forward."""
self.is_serving = True
TCPServer.serve_forever(self)
class RequestHandler(BaseRequestHandler):
"""This actually sends data to the client who requested."""
def handle(self):
"""Send."""
while True:
data = self.server._data_queue.get(block=True, timeout=None)
try:
self.request.sendall(data)
except OSError:
break
class Receiver(object):
"""Generic receiver class."""
QUEUE_SIZE = 20
def __init__(self, msg_length, ip, port, wait=False):
"""Initialize.
:param msg_length: the fixed length of messages (bytes)
:param ip: ip address of the sender (str)
:param port: port of the sender (int)
:param wait: if True, if receive() is not called often enough,
it no longer accepts new messages. This is only useful with a
Sender that also waits.
"""
self.MSG_LENGTH = msg_length
self.ip = ip
self.port = port
# Create connection
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Received data
self._data_queue = queue.Queue(self.QUEUE_SIZE if wait else 0)
def start(self):
"""Start receiving messages to a queue."""
# Connect
self.sock.connect((self.ip, self.port))
self.sock.settimeout(None)
# Start receiving
thread = threading.Thread(target=self._always_receive)
thread.daemon = True
thread.start()
self.receiving_thread = thread
def _always_receive(self):
"""Continuously receive data; internal use."""
while True:
# Receive a complete message
chunks = []
remaining_bytes = self.MSG_LENGTH
while remaining_bytes > 0:
# Read
chunk = self.sock.recv(min(remaining_bytes, 2048))
if chunk == b"":
print("Closed", file=sys.stderr)
self._data_queue.put(None, block=True) # Signal EOT
return
chunks.append(chunk)
remaining_bytes -= len(chunk)
# Return
msg = b"".join(chunks)
self._data_queue.put(msg, block=True)
def receive(self, wait=False):
"""Return a message received.
:param wait: if true, waits until a complete message is received
:return: a bytes object containing the message, or None if there are
no new messages
:raises: IOError at the end of transmission
"""
if not wait and self._data_queue.empty():
return None
# note: this is safe, because i must be the only consumer
msg = self._data_queue.get(block=wait, timeout=None)
if msg is None:
raise IOError("End Of Transmission")
return msg
|
test_app.py
|
import unittest
import os
import flask
from flask_mab import BanditMiddleware,add_bandit,choose_arm,reward_endpt
import flask_mab.storage
from flask_mab.bandits import EpsilonGreedyBandit
from werkzeug.http import parse_cookie
import json
from utils import makeBandit
from threading import Thread
from multiprocessing import Pool
from copy import copy
from queue import Queue
class MABTestCase(unittest.TestCase):
def setUp(self):
banditStorage = flask_mab.storage.JSONBanditStorage('./bandits.json')
app = flask.Flask('test_app')
flask_mab.BanditMiddleware(app)
app.add_bandit('color_button',makeBandit("EpsilonGreedyBandit",epsilon=0.1))
app.debug = True
@app.route("/")
def root():
return "Hello"
@app.route("/show_btn_decorated")
@choose_arm("color_button")
def assign_arm_decorated(color_button):
return flask.make_response("assigned an arm")
@app.route("/reward_decorated")
@reward_endpt("color_button",1.0)
def reward_decorated():
return flask.make_response("awarded the arm")
self.app = app
self.app.config["MAB_DEBUG_HEADERS"] = True
self.app_client = app.test_client()
def test_routing(self):
rv = self.app_client.get("/")
assert "Hello".encode() in rv.data
def test_suggest_decorated(self):
rv = self.app_client.get("/show_btn_decorated")
assert parse_cookie(rv.headers["Set-Cookie"])["MAB"]
assert "X-MAB-Debug" in rv.headers.keys()
chosen_arm = self.get_arm(rv.headers)["color_button"]
assert self.app.extensions['mab'].bandits["color_button"][chosen_arm]["pulls"] > 0
assert json.loads(parse_cookie(rv.headers["Set-Cookie"])["MAB"])["color_button"] == chosen_arm
def test_from_cookie_reward_decorated(self):
first_req = self.app_client.get("/show_btn_decorated")
assert "X-MAB-Debug" in first_req.headers.keys()
chosen_arm = json.loads(parse_cookie(first_req.headers["Set-Cookie"])["MAB"])["color_button"]
self.app_client.get("/reward_decorated")
assert self.app.extensions['mab'].bandits["color_button"][chosen_arm]["reward"] > 0
def get_arm(self,headers):
key_vals = [h.strip() for h in headers["X-MAB-Debug"].split(';')[1:]]
return dict([tuple(tup.split(":")) for tup in key_vals])
def test_new_session(self):
first_req = self.app_client.get("/show_btn_decorated")
assert first_req.headers['X-MAB-Debug'].split(';')[0].strip() == 'STORE'
self.app_client.cookie_jar.clear()
second_req = self.app_client.get("/show_btn_decorated")
assert second_req.headers['X-MAB-Debug'].split(';')[0].strip() == 'STORE'
def test_repeating_session(self):
first_req = self.app_client.get("/show_btn_decorated")
for i in range(30):
req = self.app_client.get("/show_btn_decorated")
assert req.headers['X-MAB-Debug'].split(';')[0].strip() == 'SAVED'
def test_concurrency(self):
"""Test that concurrent clients do not get confused
bandit arms
"""
self.app.extensions['mab'].bandit_storage.flush()
def request_worker(test,iden,q):
try:
client = test.app.test_client()
first_req = client.get("/show_btn_decorated")
chosen_arm = json.loads(parse_cookie(first_req.headers["Set-Cookie"])["MAB"])["color_button"]
assert first_req.headers['X-MAB-Debug'].split(';')[0].strip() == 'STORE'
for i in range(400):
req = client.get("/show_btn_decorated")
#TODO: refactor this to regex
assert req.headers['X-MAB-Debug'].split(';')[1].split(':')[1] == chosen_arm
assert req.headers['X-MAB-Debug'].split(';')[0].strip() == 'SAVED'
client.cookie_jar.clear()
final_req = client.get("/show_btn_decorated")
assert final_req.headers['X-MAB-Debug'].split(';')[0].strip() == 'STORE'
q.put(True)
except AssertionError(e):
q.put(e)
jobs = []
q = Queue()
for i in range(4):
jobs.append(Thread(target=request_worker, args=(self,i,q)))
map(lambda x: x.start(), jobs)
map(lambda x: x.join(), jobs)
while not q.empty():
val = q.get()
if isinstance(val,AssertionError):
raise val
if __name__ == '__main__':
unittest.main()
|
rbacCollectionTest.py
|
from membase.api.rest_client import RestConnection
import urllib.request, urllib.parse, urllib.error
import json
from remote.remote_util import RemoteMachineShellConnection
import subprocess
import socket
import fileinput
import sys
from subprocess import Popen, PIPE
from basetestcase import BaseTestCase
from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core.cluster import PasswordAuthenticator
from sdk_client3 import SDKClient
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
from security.rbacmain import rbacmain
from lib.membase.api.rest_client import RestHelper
from security.x509main import x509main
from copy import deepcopy
from threading import Thread, Event
from security.ldap_user import LdapUser
from security.ldap_group import LdapGroup
from security.internal_user import InternalUser
from security.external_user import ExternalUser
from security.ldapGroupBase import ldapGroupBase
class ServerInfo():
def __init__(self,
ip,
port,
ssh_username,
ssh_password,
ssh_key=''):
self.ip = ip
self.rest_username = ssh_username
self.rest_password = ssh_password
self.port = port
self.ssh_key = ssh_key
class rbacCollectionTest(BaseTestCase):
def suite_setUp(self):
pass
def suite_tearDown(self):
pass
def setUp(self):
super(rbacCollectionTest, self).setUp()
self.rest = CollectionsRest(self.master)
self.cli = CollectionsCLI(self.master)
self.rest_client = RestConnection(self.master)
self.test_scope = self.input.param("test_scope",'collection')
self.update_role = self.input.param("update_role",False)
self.auth_type = self.input.param("auth_type",'users')
self.sasl_mech = None
self.certpath = None
if self.auth_type == 'InternalGroup':
ldapGroupBase().create_int_group("group1", ['user1'], '', [''], self.master, user_creation=True)
ldapGroupBase().create_int_group("group2", ['user2'], '', [''], self.master, user_creation=True)
elif self.auth_type == "externalUser":
ldapGroupBase().create_ldap_config(self.master)
LdapUser('user1','password',self.master).user_setup()
LdapUser('user2','password',self.master).user_setup()
self.sasl_mech = 'PLAIN'
temp_cert = self.rest_client.get_cluster_ceritificate()
test_file = open("/tmp/server.crt",'w')
test_file.write(temp_cert)
test_file.close()
self.certpath = '/tmp/server.crt'
def tearDown(self):
super(rbacCollectionTest, self).tearDown()
self.user_cleanup()
#Remove all users and groups after very test case run
def user_cleanup(self):
rest_client = RestConnection(self.master)
self.user_delete(rest_client)
def download_cert(self):
tmp_path = "/tmp/server.pem"
for servers in self.servers:
cli_command = "ssl-manage"
remote_client = RemoteMachineShellConnection(servers)
options = "--regenerate-cert={0}".format(tmp_path)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options,
cluster_host=servers.ip, user="Administrator",
password="password")
return tmp_path
#Remove users - based on domain
def user_delete_username(self,rest_client,domain=None,username=None):
try:
if domain == 'local':
rest_client.delete_builtin_user(username)
else:
rest_client.delete_external_user(username)
except:
self.log.info ("User deletion failed, probably first time run")
#Remove groups
def group_delete_username(self,rest_client,username=None):
try:
rest_client.delete_group(username)
except:
self.log.info ("Group deletion failed, probably first time run")
#Delete all groups and users in the system
def user_delete(self,rest_client):
status, content, header = rbacmain(self.master)._retrieve_user_roles()
content = json.loads(content)
for item in content:
self.user_delete_username(rest_client,domain=item['domain'],username=item['id'])
status, content = rest_client.get_group_list()
for item in content:
if 'ldap_group_ref' in item:
self.log.info("ldap group")
else:
self.group_delete_username(rest_client,username=item['id'])
#Create a single user
def create_collection_read_write_user(self, user, bucket, scope, collection, role=None):
if self.auth_type == 'users':
self.user_delete_username(RestConnection(self.master),'local',user)
else:
self.user_delete_username(RestConnection(self.master),'external',user)
if collection is None:
payload = "name=" + user + "&roles=" + role + "[" + bucket + ":" + scope
elif collection is None and scope in None:
payload = "name=" + user + "&roles=" + role + "[" + bucket
else:
payload = "name=" + user + "&roles=" + role + "[" + bucket + ":" + scope + ":" + collection
if self.auth_type == 'extenalUser':
payload = payload + "]"
ExternalUser(user, payload, self.master).user_setup()
else:
payload = payload + "]&password=password"
status, content, header = rbacmain(self.master, "builtin")._set_user_roles(user_name=user, payload=payload)
#Create user using a list, update user roles
def create_collection_read_write_user_list(self, user_details, scope_scope, scope_collection,update=False):
self.log.info ("User details are -= {0}".format(user_details))
user_list = []
user_list_role = []
for item in user_details:
if scope_collection is None and scope_scope is None:
payload = item['role'] + "[" + item['bucket'] + "]"
elif scope_collection is None:
payload = item['role'] + "[" + item['bucket'] + ":" + item['scope'] + "]"
else:
payload = item['role'] + "[" + item['bucket'] + ":" + item['scope'] + ":" + item['collection'] + "]"
if item['user'] in user_list:
user_index = user_list.index(item['user'])
final_role = (user_list_role[user_index])['role'] + ',' + payload
user_list_role[user_index]['role'] = final_role
else:
user_list.append(item['user'])
user_list_role.append({"user":item['user'],"role":payload})
for item in user_list_role:
if update:
user, final_role = self.get_current_roles(item['user'])
final_payload = "name=" + item['user'] + "&roles=" + item['role'] + "," + final_role
else:
final_payload = "name=" + item['user'] + "&roles=" + item['role']
self.log.info (final_payload)
if self.auth_type == 'users':
final_payload = final_payload + + "&password=password"
self.user_delete_username(RestConnection(self.master), 'local', item['user'])
status, content, header = rbacmain(self.master, "builtin")._set_user_roles(user_name=item['user'], payload=final_payload)
elif self.auth_type == 'InternalGroup':
self.group_delete_username(RestConnection(self.master), item['user'])
ldapGroupBase().add_role_group(item['user'], item['role'], None, self.master)
if item['user'] == 'group1':
ldapGroupBase().create_grp_usr_internal(['user1'],self.master,roles=[''], groups=item['user'])
elif item['user'] == 'group2':
ldapGroupBase().create_grp_usr_internal(['user2'],self.master,roles=[''], groups=item['user'])
elif self.auth_type == 'externalUser':
self.user_delete_username(RestConnection(self.master), 'external', item['user'])
ExternalUser(item['user'], final_payload, self.master).user_setup()
#Update roles for users
def update_roles(self,user_details, scope_scope, scope_collection,update=False):
user_list = []
user_list_role = []
for item in user_details:
if scope_collection is None:
payload = item['role'] + "[" + item['bucket'] + ":" + item['scope'] + "]"
elif scope_collection is None and scope_scope in None:
payload = item['role'] + "[" + item['bucket'] + "]"
else:
payload = item['role'] + "[" + item['bucket'] + ":" + item['scope'] + ":" + item['collection'] + "]"
if item['user'] in user_list:
user_index = user_list.index(item['user'])
final_role = (user_list_role[user_index])['role'] + ',' + payload
user_list_role[user_index]['role'] = final_role
else:
user_list.append(item['user'])
user_list_role.append({"user":item['user'],"role":payload})
for item in user_list_role:
role_list = item['role'].split(',')
for role_item in role_list:
user, final_role = self.get_current_roles(item['user'])
if ":*" in final_role:
final_role = final_role.replace(":*","")
final_role = final_role + role_item
final_payload = "name=" + item['user'] + "&roles=" + final_role + "&password=password"
status, content, header = rbacmain(self.master, "builtin")._set_user_roles(user_name=item['user'], payload=final_payload)
user, final_role = self.get_current_roles(item['user'])
#Get current roles
def get_current_roles(self, user):
status, content, header = rbacmain(self.master)._retrieve_user_roles()
content = json.loads(content)
final_role = ''
for item in content:
if item['id'] == user:
for item_list in item['roles']:
final_role = item_list['role'] + "[" + item_list['bucket_name'] + ":" + item_list['scope_name'] + ":" + item_list['collection_name'] + "]" + "," + final_role
return user, final_role
#Create SDK connection
def collectionConnection(self, scope, collection, bucket='default', username=None):
self.log.info("Scope {0} Collection {1} username {2}".format(scope,collection,username))
if self.certpath is None:
scheme = "couchbase"
else:
scheme = "couchbases"
host=self.master.ip
if self.master.ip == "127.0.0.1":
scheme = "http"
host="{0}:{1}".format(self.master.ip, self.master.port)
try:
if username is None:
client = SDKClient(scheme=scheme, hosts = [host], bucket = bucket, username=collection, password = 'password', certpath=self.certpath, sasl_mech=self.sasl_mech)
else:
client = SDKClient(scheme=scheme, hosts = [host], bucket = bucket, username=username, password = 'password', certpath= self.certpath, sasl_mech=self.sasl_mech)
return client
except Exception as e:
self.log.info (e.result.err)
#Input role details. Creates bucket, scope and collection.
#Creates roles based on [bucket:*],[bucket:scope:*] bucket[bucket:scope:collection] based on access
#Creates users and based on role - data_reader and data_writer validates the operation
def check_permission_multiple_roles(self, role_details, access, update=False):
self.log.info ("Roles details are --- {0}".format(role_details))
for details in role_details:
scope = details['scope']
collection = details['collection']
bucket = details['bucket']
self.rest_client.delete_bucket(bucket)
self.rest_client.create_bucket(bucket=bucket, ramQuotaMB=100)
self.sleep(10)
self.rest.create_scope_collection(bucket=bucket, scope=scope, collection=collection)
self.sleep(10)
count = 1
if (access['collection'] is True) and (access['scope'] is True):
if update:
self.update_roles(role_details, True,True,True)
else:
self.create_collection_read_write_user_list(role_details, True,True)
elif (access['collection'] is False) and (access['scope'] is True):
if update:
self.update_roles(role_details, True,None,True)
else:
self.create_collection_read_write_user_list(role_details, True,None)
elif (access['collection'] is False) and (access['scope'] is False):
if update:
self.update_roles(role_details, None,None,True)
else:
self.create_collection_read_write_user_list(role_details, None,None)
for details in role_details:
count = count + 1
scope = details['scope']
collection = details['collection']
bucket = details['bucket']
role = details['role']
user = details['user']
if user == 'group1':
user = 'user1'
elif user == 'group2':
user = 'user2'
try:
if details['role'] == 'data_writer':
client = self.collectionConnection(scope, collection, bucket, user)
result = client.insert(str(count), "{1:2}",scope=scope,collection=collection)
errorResult = True if result.error == 0 else False
self.assertTrue(errorResult,'Issue with insertion')
if details['role'] == 'data_reader':
client = self.collectionConnection(scope, collection, bucket, 'Administrator')
result = client.insert(str(count), "{2:2}",scope=scope,collection=collection)
client = self.collectionConnection(scope, collection, bucket, user)
result = client.get(str(count), scope=scope, collection=collection)
self.log.info (result[2])
except Exception as e:
self.log.info (e.result.errstr)
self.assertFalse(True,"Error message is -{0}".format(e.result.errstr))
#Negative test case. Check with inverted scope and collections name
#Validate error messages for incorrect scope and collections
def check_permission_multiple_roles_negative(self, role_details, access, updateScope=True, updateCollection=True):
self.log.info(" Scope value is -{0} and Collection value is {1}".format(updateScope,updateCollection))
self.log.info ("Original Role details are -{0}".format(role_details))
scopes = ['scope1','scope2']
collections = ['collection1','collection2']
roles = ['data_writer','data_reader']
original_role_details = deepcopy(role_details)
for details in role_details:
scope = details['scope']
collection = details['collection']
bucket = details['bucket']
self.rest_client.delete_bucket(bucket)
self.rest_client.create_bucket(bucket=bucket, ramQuotaMB=100)
self.sleep(10)
self.rest.create_scope_collection(bucket=bucket, scope=scope, collection=collection)
self.sleep(10)
count = 1
for details in role_details:
if updateScope == True:
for scope_item in scopes:
if details['scope'] != scope_item:
details['scope'] = scope_item
break
if updateCollection == True:
for collections_items in collections:
if details['collection'] != collections_items:
details['collection'] = collections_items
break
if updateScope == False and updateCollection == False:
for roles_items in roles:
if details['role'] != roles_items:
details['role'] = roles_items
break
self.log.info ("Updated Role details are -{0}".format(role_details))
if (access['collection'] is True) and (access['scope'] is True):
self.create_collection_read_write_user_list(original_role_details, True,True,True)
elif (access['collection'] is False) and (access['scope'] is True):
self.create_collection_read_write_user_list(original_role_details, True,True,None)
elif (access['collection'] is False) and (access['scope'] is False):
self.create_collection_read_write_user_list(original_role_details, True,None,None)
for details in role_details:
count = count + 1
scope = details['scope']
collection = details['collection']
bucket = details['bucket']
role = details['role']
user = details['user']
try:
if details['role'] == 'data_writer':
client = self.collectionConnection(scope, collection, bucket, user)
result = client.insert(str(count), "{1:2}",scope=scope,collection=collection)
if details['role'] == 'data_reader':
client = self.collectionConnection(scope, collection, bucket, 'Administrator')
result = client.insert(str(count), "{2:2}",scope=scope,collection=collection)
client = self.collectionConnection(scope, collection, bucket, user)
result = client.get(str(count), scope=scope, collection=collection)
except Exception as e:
self.log.info (e.result.errstr)
if (updateScope is True ):
errorResult = True if (e.result.errstr == 'LCB_ERR_SCOPE_NOT_FOUND (217)') else False
if (updateCollection is True and updateScope is False) and (updateCollection is False and updateScope is False):
errorResult = True if (e.result.errstr == 'LCB_ERR_COLLECTION_NOT_FOUND (211)') else False
self.assertTrue(errorResult,"Error")
def check_incorrect_scope_collection(self):
'''
This test is for insertion to incorrect scope and collection.
Validate error messages for scope and collection
'''
if not RestHelper(self.rest_client).bucket_exists("testbucket"):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.delete_collection("testbucket", "testscope", "testcollection")
self.rest.delete_scope("testbucket", "testscope")
self.sleep(10)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection", role='data_writer')
try:
client = self.collectionConnection("testscope1", "testcollection1", "testbucket", 'testuser')
result = client.insert("testdoc", "{1:2}",scope="testscope1",collection="testcollection1")
except Exception as e:
self.log.info (e.result.errstr)
errorResult = True if (e.result.errstr == 'LCB_ERR_SCOPE_NOT_FOUND (217)') else False
self.assertTrue(errorResult,"Error")
try:
client = self.collectionConnection("testscope", "testcollection1", "testbucket", 'testuser')
result = client.insert("testdoc", "{1:2}",scope="testscope",collection="testcollection1")
except Exception as e:
errorResult = True if (e.result.errstr == 'LCB_ERR_COLLECTION_NOT_FOUND (211)') else False
self.assertTrue(errorResult,"Error")
try:
client = self.collectionConnection("testscope", "testcollection", "testbucket", 'testuser')
result = client.insert("testdoc", "{1:2}",scope="testscope",collection="testcollection")
errorResult = True if result.error == 0 else False
self.assertTrue(errorResult,'Issue with insertion')
except Exception as e:
self.assertFalse(True, 'Issue with insertion with correct - {0}'.format(e.result.errstr))
#Create list of permission with just 1 role attached
def create_roles_collections_single(self):
scopes = ['scope1','scope2']
collections = ['collection1','collection2']
roles = ['data_writer','data_reader']
if self.auth_type == 'users' or self.auth_type == 'externalUser':
users = ['user1','user2']
elif self.auth_type == 'InternalGroup':
users = ['group1','group2']
buckets = ['default']
final_roles = []
access = {}
final_return_roles=[]
final_roles=[]
for scope in scopes:
for collection in collections:
for user in users:
for role in roles:
for bucket in buckets:
final_role = {"scope":scope,"collection":collection,'user':user,'role':role,'bucket':bucket}
final_roles.append([final_role])
final_return_roles.extend(final_roles)
return final_return_roles
#Create a list of roles with multiple roles and combination of users and roles
def create_roles_collections_multiple(self):
scopes = ['scope1','scope2']
collections = ['collection1','collection2']
roles = ['data_writer','data_reader']
if self.auth_type == 'users' or self.auth_type == 'externalUser':
users = ['user1','user2']
elif self.auth_type == 'InternalGroup':
users = ['group1','group2']
buckets = ['default']
final_roles = []
access = {}
final_return_roles=[]
final_roles=[]
for scope in scopes:
for collection in collections:
for user in users:
for role in roles:
for bucket in buckets:
final_role = {"scope":scope,"collection":collection,'user':user,'role':role,'bucket':bucket}
final_roles.append(final_role)
# [{'scope': 'scope1', 'collection': 'collection1', 'user': 'user1', 'role': 'data_writer', 'bucket': 'default'},
# {'scope': 'scope1', 'collection': 'collection1', 'user': 'user2', 'role': 'data_writer', 'bucket': 'default'}]
final_list1=[]
for collection in collections:
for role in roles:
for scope in scopes:
final_list = []
for user in users:
for final in final_roles:
if (role in final['role'] and collection in final['collection'] and scope in final['scope'] and user in final['user']):
final_list.append(final)
final_list1.append(final_list)
final_return_roles.extend(final_list1)
# [{'scope': 'scope1', 'collection': 'collection1', 'user': 'user1', 'role': 'data_writer', 'bucket': 'default'},
# {'scope': 'scope1', 'collection': 'collection1', 'user': 'user1', 'role': 'data_reader', 'bucket': 'default'}]
final_list1=[]
for collection in collections:
for user in users:
for scope in scopes:
final_list = []
for role in roles:
for final in final_roles:
if (role in final['role'] and collection in final['collection'] and scope in final['scope'] and user in final['user']):
final_list.append(final)
final_list1.append(final_list)
final_return_roles.extend(final_list1)
# [{'scope': 'scope1', 'collection': 'collection2', 'user': 'user1', 'role': 'data_reader', 'bucket': 'default'},
# {'scope': 'scope1', 'collection': 'collection2', 'user': 'user2', 'role': 'data_writer', 'bucket': 'default'}]
final_list1=[]
for user in users:
final_list = []
for role in roles:
for final in final_roles:
if (role in final['role'] and user in final['user']):
final_list.append((final))
if (role not in final['role'] and user not in final['user']):
final_list.append((final))
final_list1.append(final_list)
final_list_return1 = []
final_list_return2 = []
final_return_list1 = []
for list1 in final_list1:
for i in range(0,len(list1),2):
if (i % 2) == 0:
final_list_return1.append(list1[i])
for i in range(1,len(list1),2):
if (i % 2) != 0:
final_list_return2.append(list1[i])
for i in range(0,len(final_list_return1)):
final_return_list1.append([final_list_return1[i],final_list_return2[i]])
final_return_roles.extend(final_return_list1[0:8])
return(final_return_roles)
#Defines the scope of the role - bucket, bucket:scope, bucket:scope:collection
def getRoles(self,target):
access = {}
if target == 'collection':
access = {'bucket':True, 'scope':True, 'collection':True}
if target == 'scope':
access = {'bucket':True, 'scope':True, 'collection':False}
if target == 'bucket':
access = {'bucket':True, 'scope':False, 'collection':False}
return access
#Function to create roles for multiple buckets
def testDiffBucket(self):
scopes = ['scope1','scope2']
collections = ['collection1','collection2']
roles = ['data_writer','data_reader']
if self.auth_type == 'users' or self.auth_type == 'externalUser':
users = ['user1','user2']
else:
users = ['group1','group2']
buckets = ['first','second']
final_roles = []
access = {}
final_return_roles=[]
for scope in scopes:
for collection in collections:
for user in users:
for role in roles:
for bucket in buckets:
final_role = {"scope":scope,"collection":collection,'user':user,'role':role,'bucket':bucket}
final_roles.append(final_role)
final_list1=[]
for role in roles:
for collection in collections:
for role in roles:
for scope in scopes:
final_list = []
for bucket in buckets:
for final in final_roles:
if (role in final['role'] and bucket in final['bucket'] and scope in final['scope'] and user in final['user'] and collection in final['collection']):
final_list.append(final)
final_list1.append(final_list)
final_return_roles.extend(final_list1)
return final_return_roles
#Helper function to run doc ops in parallel and capture exceptions during writes
def createBulkDocuments(self, client, scope, collection,start_num=0, end_num=10000, key1='demo_key'):
value1 = {
"name":"demo_value",
"lastname":'lastname',
"areapin":'',
"preference":'veg',
"type":''
}
for x in range (start_num, end_num):
value = value1.copy()
key = 'demo_key'
key = key + str(x)
for key1 in value:
if value[key1] == 'type' and x % 2 == 0:
value['type'] = 'odd'
else:
value['type'] = 'even'
value[key1] = value[key1] + str(x)
value['id'] = str(x)
try:
result = client.insert(key, value,scope=scope,collection=collection)
except Exception as e:
errorResult = True if (e.result.errstr == 'LCB_ERR_AUTHENTICATION_FAILURE (206)') else False
self.assertTrue(errorResult,"Incorrect error message for connection")
raise
#Helper function for creating bucket, collection, scope and users
def create_users_collections(self, bucket, number_collections, prefix='test'):
final_list = []
self.rest_client.create_bucket(bucket=bucket, ramQuotaMB=100)
try:
for i in range(0, number_collections):
self.rest.create_scope_collection(bucket=bucket, scope=prefix + "scope" + str(i),
collection=prefix + "collection" + str(i))
self.sleep(10)
self.create_collection_read_write_user(prefix + "user" + str(i), bucket, prefix + "scope" + str(i),
prefix + "collection" + str(i), role="data_writer")
self.sleep(10)
final_list.append(
{'bucket': bucket, 'scope': prefix + 'scope' + str(i), 'collection': prefix + 'collection' + str(i),
'user': prefix + 'user' + str(i)})
return final_list
except Exception as e:
self.assertTrue(False, "Issue with creation of 1000 collection and users")
#Test for testing single role assignment per user
def test_check_single_role_collection_scope(self):
roleSet = self.create_roles_collections_single()
getScope = self.getRoles(self.test_scope)
for role in roleSet:
self.check_permission_multiple_roles(role,getScope)
#Test for testing multiple role assignment per user
def test_check_multiple_role_collection_scope(self):
roleSet = self.create_roles_collections_multiple()
getScope = self.getRoles(self.test_scope)
for role in roleSet:
self.check_permission_multiple_roles(role,getScope)
#Test for multiple roles per and update the role one by one
def test_check_multiple_role_collection_scope_update_roles(self):
roleSet = self.create_roles_collections_multiple()
getScope = self.getRoles(self.test_scope)
for role in roleSet:
self.check_permission_multiple_roles(role,getScope,True)
#Test for mutliple roles for multiple buckets
def test_check_multiple_buckets(self):
roleSet = self.testDiffBucket()
getScope = self.getRoles(self.test_scope)
for role in roleSet:
self.check_permission_multiple_roles(role,getScope)
#Test for error messages when user tries to access scope and collections
#the user does not have access to
def test_incorrect_scope_collection(self):
self.check_incorrect_scope_collection()
#Defect - PYCBC-1014
#Test for error message when user tries to access scope and collections
#the user does not roles on. This test is for multiple roles on the same user
def test_negative_scope_collection(self):
roleSet = self.testDiffBucket()
getScope = self.getRoles(self.test_scope)
updateScope = self.input.param('updateScope',False)
updateCollection = self.input.param('updateCollection',False)
for role in roleSet:
self.check_permission_multiple_roles_negative(role, getScope, updateScope, updateCollection)
#Test that roles for uses are updated when collection and scope are deleted
#User is not deleted after scope and collections are deleted
def test_delete_collection_check_roles(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection", role="data_writer")
self.rest.delete_collection("testbucket", "testscope", "testcollection")
self.rest.delete_scope("testbucket", "testscope")
user, final_role = self.get_current_roles('testuser')
result = True if (final_role == '') else False
self.assertTrue(result,'Role is not empty after deletion of collection ')
result = True if (user == 'testuser') else False
self.assertTrue(result,'user is deleted after deletion of collection')
#Test uesr roles are cleaned up after collection is deleted
def test_delete_recreated_collection_check_roles(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection", role="data_writer")
self.rest.delete_collection("testbucket", "testscope", "testcollection")
self.rest.delete_scope("testbucket", "testscope")
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection", role="data_writer")
user, final_role = self.get_current_roles('testuser')
result = True if (final_role == '') else False
self.assertTrue(result,'Role is not empty after deletion of collection ')
result = True if (user == 'testuser') else False
self.assertTrue(result,'user is deleted after deletion of collection')
#Check for errors when collections is deleted in parallel
def test_collection_deletion_while_ops(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection", role="data_writer")
self.sleep(10)
client = self.collectionConnection("testscope", "testcollection", "testbucket", "testuser")
self.sleep(10)
try:
create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(client, "testscope", "testcollection",))
create_docs.start()
except Exception as e:
self.log.info (e)
self.rest.delete_collection("testbucket", "testscope", "testcollection")
self.rest.delete_scope("testbucket", "testscope")
create_docs.join()
#Create 1000 collections and users
def create_hunderd_users_collections(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=self.bucket_size)
try:
for i in range(0,1000):
self.rest.create_scope_collection(bucket="testbucket", scope="testscope" + str(i), collection="testcollection" + str(i))
self.sleep(10)
self.create_collection_read_write_user("testuser" + str(i), "testbucket" , "testscope" + str(i), "testcollection" + str(i), role="data_writer")
self.sleep(10)
client = self.collectionConnection("testscope" + str(i), "testcollection" + str(i), "testbucket", "testuser" + str(i))
result = client.insert("key", "{2:2}",scope="testscope" + str(i),collection="testcollection" + str(i))
except Exception as e:
self.assertTrue(False,"Issue with creation of 1000 collection and users")
#Create collectioins and users during rebalance in
def rebalance_in(self):
try:
final_items = self.create_users_collections('testbucket',10)
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key", "{2:2}", scope=item['scope'], collection=item['collection'])
servs_inout = self.servers[self.nodes_init:]
self.log.info("{0}".format(servs_inout))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_inout, [])
final_items = self.create_users_collections('testbucket1', 10,'test2')
rebalance.result()
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key1", "{2:2}", scope=item['scope'], collection=item['collection'])
except Exception as e:
self.assertTrue(False,"Issue with creation of 1000 collection and users")
#Create collections and users during rebalance out
def rebalance_out(self):
try:
final_items = self.create_users_collections('testbucket',10)
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key", "{2:2}", scope=item['scope'], collection=item['collection'])
servs_inout = self.servers[-1]
self.log.info ("Value of server_in_out is {0}".format(servs_inout))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],servs_inout)
final_items = self.create_users_collections('testbucket1', 10,'test2')
rebalance.result()
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key1", "{2:2}", scope=item['scope'], collection=item['collection'])
except Exception as e:
self.assertTrue(False,"Issue with creation of 1000 collection and users")
#Create collectios and userss during failover
def failover_out(self):
try:
final_items = self.create_users_collections('testbucket',1)
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key", "{2:2}", scope=item['scope'], collection=item['collection'])
servs_inout = self.servers[-1]
self.log.info ("Value fo server_in_out is {0}".format(servs_inout))
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[servs_inout], graceful=False)
final_items = self.create_users_collections('testbucket1', 1,'test2')
fail_over_task.result()
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key1", "{2:2}", scope=item['scope'], collection=item['collection'])
self.rest_client.set_recovery_type('ns_1@' + servs_inout.ip, "full")
self.rest_client.add_back_node('ns_1@' + servs_inout.ip)
final_items = self.create_users_collections('testbucket3', 1, 'test3')
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key3", "{2:2}", scope=item['scope'], collection=item['collection'])
except Exception as e:
self.assertTrue(False,"Issue with creation of 1000 collection and users")
#Delete users while data loading in parallel
def test_user_deletion_while_ops(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection",
role="data_writer")
self.sleep(30)
client = self.collectionConnection("testscope", "testcollection", "testbucket", "testuser")
self.sleep(10)
try:
create_docs = Thread(name='create_docs', target=self.createBulkDocuments,
args=(client, "testscope", "testcollection",))
create_docs.start()
except Exception as e:
self.log.info(e)
self.rest_client.delete_user_roles("testuser")
create_docs.join()
#Delete user and check for error when insert happens
def test_user_deletion_recreation(self):
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
self.create_collection_read_write_user("testuser", "testbucket", "testscope", "testcollection",
role="data_writer")
client = self.collectionConnection("testscope", "testcollection", "testbucket", "testuser")
self.sleep(30)
result = client.insert("key1", "{2:2}", scope="testscope", collection="testcollection")
self.rest_client.delete_user_roles("testuser")
self.rest_client.add_set_builtin_user("testuser","name=testuser&roles=cluster_admin&password=password")
try:
result = client.insert("key1", "{2:2}", scope="testscope", collection="testcollection")
except Exception as e:
errorResult = True if (e.result.errstr == 'LCB_ERR_AUTHENTICATION_FAILURE (206)') else False
self.assertTrue(errorResult, "Incorrect error message for connection")
#Create users during rebalance and and other data loading is happening
def rebalance_in_create_users(self):
num_threads = []
i = 1
try:
final_items = self.create_users_collections('testbucket',10)
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key", "{2:2}", scope=item['scope'], collection=item['collection'])
servs_inout = self.servers[self.nodes_init:]
print ("Value fo server_in_out is {0}".format(servs_inout))
self.log.info("{0}".format(servs_inout))
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
create_docs = Thread(name='create_docs', target=self.createBulkDocuments,
args=(client, item['scope'], item['collection'],i, i+ 1000,'demo_key'+item['collection'],))
num_threads.append(create_docs)
create_docs.start()
i = (i * 2) + 1000
for item in num_threads:
item.join()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_inout, [])
for i in range(1, 10):
for j in range(1,20):
self.create_collection_read_write_user("testusernew" + str(j), "testbucket",
scope="testscope" + str(i), collection= "testcollection" + str(i),
role="data_writer")
rebalance.result()
except Exception as e:
self.assertTrue(False, "Issue with creating users during rebalance - Error message {0}".format(e))
#Delete users while rebalance in and data loading in parallel
def rebalance_in_delete_users(self):
num_threads = []
i = 1
try:
final_items = self.create_users_collections('testbucket',10)
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
result = client.insert("key", "{2:2}", scope=item['scope'], collection=item['collection'])
for i in range(1, 10):
for j in range(1,20):
self.create_collection_read_write_user("testusernew" + str(j), "testbucket",
scope="testscope" + str(i), collection= "testcollection" + str(i),
role="data_writer")
servs_inout = self.servers[self.nodes_init:]
print ("Value fo server_in_out is {0}".format(servs_inout))
self.log.info("{0}".format(servs_inout))
for item in final_items:
client = self.collectionConnection(item['scope'], item['collection'], item['bucket'],
item['user'])
create_docs = Thread(name='create_docs', target=self.createBulkDocuments,
args=(client, item['scope'], item['collection'],i, i+ 1000,'demo_key'+item['collection'],))
num_threads.append(create_docs)
create_docs.start()
i = (i * 2) + 1000
for item in num_threads:
item.join()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_inout, [])
for j in range(1,10):
self.rest_client.delete_user_roles("testusernew" + str(j))
rebalance.result()
except Exception as e:
self.assertTrue(False, "Issue with deleting users during rebalance - Error message {0}".format(e))
#Remove users from group and check
def add_remove_users_groups(self):
self.rest_client.delete_bucket("testbucket")
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.sleep(10)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
ldapGroupBase().add_role_group("testgroup", 'data_writer[testbucket:testscope:testcollection]', None, self.master)
ldapGroupBase().create_grp_usr_internal(["user1"], self.master, roles=[''], groups="testgroup")
ldapGroupBase().create_grp_usr_internal(["user2"], self.master, roles=[''], groups="testgroup")
client = self.collectionConnection("testscope", "testcollection", "testbucket","user1")
result = client.insert("user1key", "{2:2}", scope="testscope", collection="testcollection")
client = self.collectionConnection("testscope", "testcollection", "testbucket", "user2")
result = client.insert("user2key", "{2:2}", scope="testscope", collection="testcollection")
self.rest_client.delete_user_roles("user2")
self.sleep(10)
try:
result = client.insert("user2key_error", "{2:2}", scope="testscope", collection="testcollection")
except Exception as e:
errorResult = True if (e.result.errstr == 'LCB_ERR_AUTHENTICATION_FAILURE (206)') else False
self.assertTrue(errorResult, "User deleted but still able to insert data")
# Remove groups from CB and check if user has access
def add_remove_groups(self):
self.rest_client.delete_bucket("testbucket")
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.sleep(10)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.sleep(10)
ldapGroupBase().add_role_group("testgroup", 'data_writer[testbucket:testscope:testcollection]', None, self.master)
ldapGroupBase().add_role_group("testgroup1", 'data_writer[testbucket:testscope:testcollection]', None,
self.master)
ldapGroupBase().create_grp_usr_internal(["user1"], self.master, roles=[''], groups="testgroup")
ldapGroupBase().create_grp_usr_internal(["user2"], self.master, roles=[''], groups="testgroup1")
client = self.collectionConnection("testscope", "testcollection", "testbucket","user1")
result = client.insert("user1key", "{2:2}", scope="testscope", collection="testcollection")
client = self.collectionConnection("testscope", "testcollection", "testbucket", "user2")
result = client.insert("user2key", "{2:2}", scope="testscope", collection="testcollection")
self.rest_client.delete_group('testgroup1')
self.sleep(10)
try:
result = client.insert("user2key_error", "{2:2}", scope="testscope", collection="testcollection")
except Exception as e:
errorResult = True if (e.result.errstr == 'LCB_ERR_AUTHENTICATION_FAILURE (206)') else False
self.assertTrue(errorResult, "Group deleted but still able to insert data")
#Tests for scope admin
def rest_execute(self, bucket, scope, collection, username, password, method='POST'):
rest = RestConnection(self.master)
api = "http://10.112.191.101:8091" + '/pools/default/buckets/%s/collections/%s' % (bucket, scope)
body = {'name': collection}
params = urllib.parse.urlencode(body)
import base64
authorization = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode()
header = {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
self.sleep(10)
status, content, header = rest._http_request(api, 'POST', params=params, headers=header)
return status, content, header
def test_scope_admin_add_collection(self):
self.rest_client.delete_bucket("testbucket")
self.rest_client.create_bucket(bucket="testbucket", ramQuotaMB=100)
self.rest.create_scope_collection(bucket="testbucket", scope="testscope", collection="testcollection")
self.rest.create_scope_collection(bucket="testbucket", scope="testscope1", collection="testcollection")
self.create_collection_read_write_user('user_scope_admin', 'testbucket', 'testscope', None, role='scope_admin')
self.sleep(10)
status, content, header = self.rest_execute('testbucket', 'testscope1', 'collection1','user_scope_admin','password')
if status == True:
self.assertTrue(False,'Scope admin can create collection for scope it does not have access to')
status, content, header = self.rest_execute('testbucket', 'testscope', 'collection1','user_scope_admin','password')
if status == False:
self.assertTrue(False,'Scope admin cannot create collection in scope it has access to')
self.create_collection_read_write_user('user_scope_admin_full', 'testbucket', None, None, role='scope_admin')
self.sleep(30)
status, content, header = self.rest_execute('testbucket', 'testscope', 'collection2','user_scope_admin_full','password')
if status == False:
self.assertTrue(False,'Scope admin can create collection for scope it does not have access to')
#Defect - PYCBC-1005
import copy
def test_x509(self):
x509main(self.master)._generate_cert(self.servers, type='openssl', encryption="", key_length=1024, client_ip='172.16.1.174',wildcard_dns=None)
x509main(self.master).setup_master("enable", ["subject.cn","san.dnsname","san.uri"], ['www.cb-','us.','www.'], [".",".","."], "rest")
x509main().setup_cluster_nodes_ssl(self.servers)
self.connection_sdk(self.master.ip)
'''
roleSet = self.checkCollections_x509()
getScope = self.getRoles('scope')
print (type(getScope))
for role in roleSet:
self.check_permission_multiple_roles(role,getScope)
'''
|
process.py
|
from __future__ import print_function
import signal
import subprocess
import sys
import logging
from datetime import datetime
from threading import Thread
from Queue import Queue, Empty
#
# This code comes from Honcho. Didn't need the whole Honcho
# setup, so I just swiped this part which is what the build
# pack utils library needs.
#
# https://github.com/nickstenning/honcho
#
# I've modified parts to fit better with this module.
#
def _enqueue_output(proc, queue):
if not proc.quiet:
for line in iter(proc.stdout.readline, b''):
try:
line = line.decode('utf-8')
except UnicodeDecodeError as e:
queue.put((proc, e))
continue
if not line.endswith('\n'):
line += '\n'
queue.put((proc, line))
proc.stdout.close()
class Process(subprocess.Popen):
def __init__(self, cmd, name=None, quiet=False, *args, **kwargs):
self.name = name
self.quiet = quiet
self.reader = None
self.printer = None
self.dead = False
if self.quiet:
self.name = "{0} (quiet)".format(self.name)
defaults = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': True,
'bufsize': 1,
'close_fds': True
}
defaults.update(kwargs)
super(Process, self).__init__(cmd, *args, **defaults)
class ProcessManager(object):
"""
Here's where the business happens. The ProcessManager multiplexes and
pretty-prints the output from a number of Process objects, typically added
using the add_process() method.
Example:
pm = ProcessManager()
pm.add_process('name', 'ruby server.rb')
pm.add_process('name', 'python worker.py')
pm.loop()
"""
def __init__(self):
self.processes = []
self.queue = Queue()
self.returncode = None
self._terminating = False
self._log = logging.getLogger('process')
def add_process(self, name, cmd, quiet=False):
"""
Add a process to this manager instance:
Arguments:
name - a human-readable identifier for the process
(e.g. 'worker'/'server')
cmd - the command-line used to run the process
(e.g. 'python run.py')
"""
self._log.debug("Adding process [%s] with cmd [%s]", name, cmd)
self.processes.append(Process(cmd, name=name, quiet=quiet))
def loop(self):
"""
Enter the main loop of the program. This will print the multiplexed
output of all the processes in this ProcessManager to sys.stdout, and
will block until all the processes have completed.
If one process terminates, all the others will be terminated
and loop() will return.
Returns: the returncode of the first process to exit, or 130 if
interrupted with Ctrl-C (SIGINT)
"""
self._init_readers()
self._init_printers()
for proc in self.processes:
self._log.info("Started [%s] with pid [%s]", proc.name, proc.pid)
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
pass
except KeyboardInterrupt:
self._log.exception("SIGINT received")
self.returncode = 130
self.terminate()
else:
self._print_line(proc, line)
for proc in self.processes:
if not proc.dead and proc.poll() is not None:
self._log.info('process [%s] with pid [%s] terminated',
proc.name, proc.pid)
proc.dead = True
# Set the returncode of the ProcessManager instance if not
# already set.
if self.returncode is None:
self.returncode = proc.returncode
self.terminate()
if not self._process_count() > 0:
break
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
break
else:
self._print_line(proc, line)
return self.returncode
def terminate(self):
"""
Terminate all the child processes of this ProcessManager, bringing the
loop() to an end.
"""
if self._terminating:
return False
self._terminating = True
self._log.info("sending SIGTERM to all processes")
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGTERM to pid [%d]", proc.pid)
proc.terminate()
def kill(signum, frame):
# If anything is still alive, SIGKILL it
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGKILL to pid [%d]", proc.pid)
proc.kill()
signal.signal(signal.SIGALRM, kill) # @UndefinedVariable
signal.alarm(5) # @UndefinedVariable
def _process_count(self):
return [p.poll() for p in self.processes].count(None)
def _init_readers(self):
for proc in self.processes:
self._log.debug("Starting [%s]", proc.name)
t = Thread(target=_enqueue_output, args=(proc, self.queue))
t.daemon = True # thread dies with the program
t.start()
def _init_printers(self):
width = max(len(p.name) for p in
filter(lambda x: not x.quiet, self.processes))
for proc in self.processes:
proc.printer = Printer(sys.stdout,
name=proc.name,
width=width)
def _print_line(self, proc, line):
if isinstance(line, UnicodeDecodeError):
self._log.error(
"UnicodeDecodeError while decoding line from process [%s]",
proc.name)
else:
print(line, end='', file=proc.printer)
class Printer(object):
def __init__(self, output=sys.stdout, name='unknown', width=0):
self.output = output
self.name = name
self.width = width
self._write_prefix = True
def write(self, *args, **kwargs):
new_args = []
for arg in args:
lines = arg.split('\n')
lines = [self._prefix() + l if l else l for l in lines]
new_args.append('\n'.join(lines))
self.output.write(*new_args, **kwargs)
def _prefix(self):
time = datetime.now().strftime('%H:%M:%S')
name = self.name.ljust(self.width)
prefix = '{time} {name} | '.format(time=time, name=name)
return prefix
|
test_all.py
|
#!/usr/bin/python
from functions import *
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%p")
# Get header file.
with open(os.path.join(scriptDir,headerHtml), 'r') as content_file:
dashboard = content_file.read()
dashboard = dashboard + "\n<p>Last updated: " + now + " UTC</p><br>"
# Table opening and columns.
dashboard = dashboard + "\n<table>"
dashboard = dashboard + "\n<tr>"
dashboard = dashboard + "\n<th>OS</th>"
dashboard = dashboard + "\n<th>Branch</th>"
dashboard = dashboard + "\n<th>Install Status</th>"
dashboard = dashboard + "\n<th>Install Outcome</th>"
dashboard = dashboard + "\n<th>Install Duration</th>"
dashboard = dashboard + "\n<th>Install Output</th>"
dashboard = dashboard + "\n<th>Install Error</th>"
dashboard = dashboard + "\n<th>Apache</th>"
dashboard = dashboard + "\n<th>php-fpm</th>"
dashboard = dashboard + "\n<th>Patch Status</th>"
dashboard = dashboard + "\n<th>Patch Duration</th>"
dashboard = dashboard + "\n<th>Patch Output</th>"
dashboard = dashboard + "\n</tr>"
delete_dir(statusDir)
delete_dir(webdir)
make_dir(statusDir)
make_dir(webdir)
if len(sys.argv) > 1:
branches = [sys.argv[1]]
for branch in branches:
# Need a unique timestamp for each branch run.
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%p")
# Restore snapshots
restore_clean_snapshots()
# Wait for instances to get ready a bit.
time.sleep(bootTime)
# Add identities
add_ssh_identities()
# Run updates here.
threads = []
for OS in OSs:
instance = get_instance("Name","fogtesting-" + OS)
threads.append(Thread(target=update_os,args=(branch,OS,now,instance)))
complete_threads(threads)
# Run fog installation tests.
threads = []
for OS in OSs:
instance = get_instance("Name","fogtesting-" + OS)
threads.append(Thread(target=runTest,args=(branch,OS,now,instance)))
complete_threads(threads)
# Here, need to gather the results and write an html file.
for OS in OSs:
resultFile = os.path.join(statusDir, OS + "." + branch + ".result")
with open(resultFile, 'r') as content_file:
exitCode = content_file.read()
dashboard = dashboard + "\n<tr>"
dashboard = dashboard + "\n<td>" + OS + "</td>"
dashboard = dashboard + "\n<td>" + branch + "</td>"
# Fog install status.
if exitCode in codes.keys():
dashboard = dashboard + "\n<td><img src=\"" + codes[exitCode]["status"] + "\" alt=\"" + codes[exitCode]["status"] + "\"></td>"
dashboard = dashboard + "\n<td>" + codes[exitCode]["reason"] + "</td>"
else:
dashboard = dashboard + "\n<td><img src=\"" + red + "\" alt=\"" + red + "\"></td>"
dashboard = dashboard + "\n<td>Unknown installation failure, exit code '" + exitCode + "'</td>"
# Fog install duration.
if os.path.isfile(os.path.join(statusDir,OS + "." + branch + ".duration")):
fog_duration = read_file(os.path.join(statusDir,OS + "." + branch + ".duration"))
dashboard = dashboard + "\n<td>" + fog_duration + "</td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# fog output log.
if os.path.isfile(os.path.join(webdir,OS,now + "_output.log")):
dashboard = dashboard + "\n<td><a href=\"" + http + s3bucket + port + netdir + "/" + OS + "/" + now + "_output.log\">output</td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# fog error log.
if os.path.isfile(os.path.join(webdir,OS,now + "_fog_error.log")):
dashboard = dashboard + "\n<td><a href=\"" + http + s3bucket + port + netdir + "/" + OS + "/" + now + "_fog_error.log\">error</td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# apache log.
if os.path.isfile(os.path.join(webdir,OS,now + "_apache.log")):
dashboard = dashboard + "\n<td><a href=\"" + http + s3bucket + port + netdir + "/" + OS + "/" + now + "_apache.log\">apache</a></td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# php-fpm log.
if os.path.isfile(os.path.join(webdir,OS,now + "_php-fpm.log")):
dashboard = dashboard + "\n<td><a href=\"" + http + s3bucket + port + netdir + "/" + OS + "/" + now + "_php-fpm.log\">php-fpm</a></td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# Patch results.
resultFile = os.path.join(statusDir,OS + "." + branch + ".patch_result")
with open(resultFile, 'r') as content_file:
exitCode = content_file.read()
if exitCode == "0":
dashboard = dashboard + "\n<td><img src=\"" + green + "\" alt=\"green\"></td>"
elif exitCode == "-1":
dashboard = dashboard + "\n<td><img src=\"" + orange + "\" alt=\"orange\"></td>"
else:
dashboard = dashboard + "\n<td><img src=\"" + red + "\" alt=\"red\"></td>"
# Patch duration.
if os.path.isfile(os.path.join(statusDir,OS + "." + branch + ".patch_duration")):
patch_duration = read_file(os.path.join(statusDir,OS + "." + branch + ".patch_duration"))
dashboard = dashboard + "\n<td>" + patch_duration + "</td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
# Patch output.
if os.path.isfile(os.path.join(webdir,OS,now + "_patch_output.log")):
dashboard = dashboard + "\n<td><a href=\"" + http + s3bucket + port + netdir + "/" + OS + "/" + now + "_patch_output.log\">patch</a></td>"
else:
dashboard = dashboard + "\n<td>NA</td>"
dashboard = dashboard + "\n</tr>"
# Close table.
dashboard = dashboard + "\n</table>"
# Get the footer html.
with open(os.path.join(scriptDir,footerHtml), 'r') as content_file:
dashboard = dashboard + "\n" + content_file.read()
# Write out the dashboard.
newDashboard = os.path.join(webdir,"index.html")
with open(newDashboard, 'w') as content_file:
content_file.write(dashboard)
# Ensure the little color dots are in the web dir.
if not os.path.isfile(os.path.join(webdir,green)):
subprocess.call("cp " + os.path.join(scriptDir,green) + " " + os.path.join(webdir,green), shell=True)
if not os.path.isfile(os.path.join(webdir,orange)):
subprocess.call("cp " + os.path.join(scriptDir,orange) + " " + os.path.join(webdir,orange), shell=True)
if not os.path.isfile(os.path.join(webdir,red)):
subprocess.call("cp " + os.path.join(scriptDir,red) + " " + os.path.join(webdir,red), shell=True)
# Empty the bucket:
subprocess.call(aws + " s3 rm s3://" + s3bucket + " --recursive > /dev/null 2>&1", shell=True)
# Copy contents to the s3 dashboard
subprocess.call(aws + " s3 cp " + webdir + "/ s3://" + s3bucket + " --recursive > /dev/null 2>&1", shell=True)
|
session.py
|
import os
import random
import threading
import time
from keras.models import load_model
from sklearn.cluster import KMeans
import librosa
import numpy as np
import tensorflow as tf
from tomomibot.audio import (AudioIO, slice_audio, detect_onsets,
is_silent, mfcc_features, get_db)
from tomomibot.const import MODELS_FOLDER, SILENCE_CLASS
from tomomibot.train import reweight_distribution
from tomomibot.utils import (get_num_classes,
encode_duration_class,
encode_dynamic_class,
encode_feature_vector,
decode_classes)
CHECK_WAV_INTERVAL = 0.1 # Check .wav queue interval (in seconds)
MAX_DENSITY_ONSETS = 10 # How many offsets for max density
PLAY_DELAY_EXP = 5 # Exponent for maximum density delay
RESET_PROPABILITY = 0.1 # Percentage of chance for resetting sequence
class Session():
def __init__(self, ctx, voice, model, reference_voice=None, **kwargs):
self.ctx = ctx
self.num_sound_classes = kwargs.get('num_classes')
self.use_dynamics = kwargs.get('dynamics')
self.use_durations = kwargs.get('durations')
self.penalty = kwargs.get('penalty')
self.samplerate = kwargs.get('samplerate')
self.seq_len = kwargs.get('seq_len')
self.threshold_db = kwargs.get('threshold')
# These parameters can be changed during performance
self._interval = kwargs.get('interval')
self._temperature = kwargs.get('temperature')
# Prepare audio I/O
try:
self._audio = AudioIO(ctx,
samplerate=self.samplerate,
device_in=kwargs.get('input_device'),
device_out=kwargs.get('output_device'),
channel_in=kwargs.get('input_channel'),
channel_out=kwargs.get('output_channel'),
volume=kwargs.get('volume'))
except RuntimeError as err:
self.ctx.elog(err)
self.ctx.log('Loading ..')
# Prepare concurrent threads
self._thread = threading.Thread(target=self.run, args=())
self._thread.daemon = True
self._play_thread = threading.Thread(target=self.play, args=())
self._play_thread.daemon = True
self._lock = threading.Lock()
# Prepare playing logic
self._sequence = []
self._wavs = []
self._density = 0.0
self.is_running = False
# Load model & make it ready for being used in another thread
model_name = '{}.h5'.format(model)
model_path = os.path.join(os.getcwd(), MODELS_FOLDER, model_name)
self._model = load_model(model_path)
self._model._make_predict_function()
self._graph = tf.get_default_graph()
# Calculate number of total classes
num_classes = get_num_classes(self.num_sound_classes,
self.use_dynamics,
self.use_durations)
num_model_classes = self._model.layers[-1].output_shape[1]
if num_model_classes != num_classes:
self.ctx.elog('The given model was trained with a different '
'amount of classes: given {}, but '
'should be {}.'.format(num_classes,
num_model_classes))
# Prepare voice and k-means clustering
if reference_voice is None:
reference_voice = voice
else:
voice.fit(reference_voice)
self._voice = voice
self._kmeans = KMeans(n_clusters=self.num_sound_classes)
self._kmeans.fit(reference_voice.points)
# Get the classes of the voice sound material / points
point_classes = self._kmeans.predict(self._voice.points)
self._point_classes = []
for idx in range(num_classes):
indices = np.where(point_classes == idx)
self._point_classes.append(indices[0])
self.ctx.log('Voice "{}" with {} samples'
.format(voice.name, len(voice.points)))
@property
def master_volume(self):
return self._audio.volume
@master_volume.setter
def master_volume(self, value):
self._audio.volume = value
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, value):
with self._lock:
self._interval = value
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, value):
with self._lock:
self._temperature = value
def reset_sequence(self):
with self._lock:
self._sequence = []
def start(self):
self.is_running = True
# Start reading audio signal _input
self._audio.start()
# Start threads
self._thread.start()
self._play_thread.start()
self.ctx.log('Ready!\n')
def stop(self):
self._audio.stop()
self.is_running = False
def run(self):
while self.is_running:
time.sleep(self._interval)
if self.is_running:
with self._lock:
self.tick()
def play(self):
while self.is_running:
time.sleep(CHECK_WAV_INTERVAL)
if not self.is_running:
return
if len(self._wavs) > 1:
# Get next wav file to play from queue
wav = self._wavs[0]
self.ctx.vlog(
'▶ play .wav sample "{}" (queue={}, density={})'.format(
os.path.basename(wav),
len(self._wavs),
self._density))
# Delay playing the sample a little bit
rdm = random.expovariate(PLAY_DELAY_EXP) * self._density
time.sleep(rdm)
# Play it!
self._audio.play(wav)
# Remove the played sample from our queue
self._wavs = self._wavs[1:]
def tick(self):
"""Main routine for live sessions"""
# Read current frame buffer from input signal
frames = np.array(self._audio.read_frames()).flatten()
if len(frames) == 0:
return
self.ctx.vlog('Read {0} frames (volume={1:.2f}dB)'.format(
len(frames), np.max(get_db(frames))))
# Detect onsets in available data
onsets, _ = detect_onsets(frames,
self.samplerate,
self.threshold_db)
# Set a density based on amount of onsets
self._density = min(
MAX_DENSITY_ONSETS, len(onsets)) / MAX_DENSITY_ONSETS
# Slice audio into parts when possible
slices = []
if len(onsets) == 0 and not is_silent(frames, self.threshold_db):
slices = [[frames, 0, 0]]
else:
slices = slice_audio(frames, onsets, trim=False)
self.ctx.vlog('{} onsets detected & {} slices generated'.format(
len(onsets), len(slices)))
# Analyze and categorize slices
for y in slices:
y_slice = y[0]
# Calculate MFCCs
try:
mfcc = mfcc_features(y_slice, self.samplerate)
except RuntimeError:
self.ctx.vlog(
'Not enough sample data for MFCC analysis')
else:
# Calculate RMS
rms_data = librosa.feature.rms(y=y_slice) / self._voice.rms_max
rms = np.float32(np.max(rms_data)).item()
# Project point into given voice PCA space
point = self._voice.project([mfcc])[0].flatten()
# Predict k-means class from point
class_sound = self._kmeans.predict([point])[0]
# Get dynamic class
class_dynamic = encode_dynamic_class(class_sound, rms)
# Get duration class
duration = len(y_slice) / self.samplerate * 1000
class_duration = encode_duration_class(duration)
# Encode it!
feature_vector = encode_feature_vector(self.num_sound_classes,
class_sound,
class_dynamic,
class_duration,
self.use_dynamics,
self.use_durations)
# Add it to our sequence queue
self._sequence.append(feature_vector)
# Check for too long sequences, cut it if necessary
penalty = self.seq_len * self.penalty
if len(self._sequence) > penalty:
self._sequence = self._sequence[penalty:]
# Check if we already have enough data to do something
if len(self._sequence) < self.seq_len:
self.ctx.vlog('')
return
with self._graph.as_default():
max_index = len(self._sequence)
while True:
# Play all possible subsequences
min_index = max_index - self.seq_len
if min_index < 0:
break
sequence_slice = self._sequence[min_index:max_index]
# Predict next action via model
result = self._model.predict(np.array([sequence_slice]))
if np.sum(result) == 0:
break
# Reweight the softmax distribution
result_reweighted = reweight_distribution(result,
self._temperature)
result_class = np.argmax(result_reweighted)
# Decode class back into sub classes
class_sound, class_dynamic, class_duration = decode_classes(
result_class,
self.num_sound_classes,
self.use_dynamics,
self.use_durations)
# Version >1: Do not do anything when this is silence
if self._voice.version == 1 or class_sound != SILENCE_CLASS:
# Find closest sound to this point
wav = self._voice.find_wav(self._point_classes,
class_sound,
class_dynamic,
class_duration)
# Only show this when able to work with dynamics etc.
if self._voice.version == 2:
smiley = '☺' if wav else '☹'
self.ctx.vlog('{} find sound (class={}, '
'dynamic={}, duration={})'.format(
smiley, class_sound, class_dynamic,
class_duration))
if wav:
self._wavs.append(wav)
max_index -= 1
# Remove oldest event from sequence queue
self._sequence = self._sequence[1:]
if random.random() < RESET_PROPABILITY:
self._sequence = []
self.ctx.vlog('')
|
producer_consumer.py
|
#Simple Producer and Consumer Problem
# demonstrates queues and events with locks
import random
import threading
from threading import Thread
import multiprocessing
from queue import Queue
import time
import logging
logging.basicConfig(format='%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
#Functions
def display(msg):
threadname = threading.current_thread().name
procname = multiprocessing.current_process().name
logging.info(f'{procname}-{threadname} : {msg}')
#Producer
def create_work(queue, finished, max):
finished.put(False)
for x in range(max):
v = random.randint(1, 100)
queue.put(v)
display(f'Producing {x} : {v}')
finished.put(True)
display(f'Finished P')
#Consumer
def perform_work(work, finished):
counter = 0
while True:
if not work.empty():
v = work.get()
display(f'Consuming {counter} : {v}')
counter += 1
else:
q = finished.get()
if q == True:
break
display(f'Finished C')
#Main
def main():
max = 50
work = Queue()
finished = Queue()
producer = Thread(target=create_work, args=[work, finished, max], daemon=True)
consumer = Thread(target=perform_work, args=[work, finished], daemon=True)
producer.start()
consumer.start()
producer.join()
display(f'Producer has Finished')
consumer.join()
display(f'Consumer has Finished')
display(f'Main finished')
if __name__ == '__main__':
main()
|
session_test.py
|
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import config_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(types.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(types.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('', sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [types.float32,
types.float64,
types.int32,
types.uint8,
types.int16,
types.int8,
types.int64,
types.bool,
types.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == types.bool:
np_array = np_array > 0
elif dtype == types.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([str(i) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([str(i) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=types.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = ['\n\x01\x00', '\n\x00\x01']
feed_t = array_ops.placeholder(dtype=types.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testInvalidTargetFails(self):
with self.assertRaises(RuntimeError):
session.Session("INVALID_TARGET")
if __name__ == '__main__':
googletest.main()
|
code_rater.py
|
import os
import re
from threading import Thread
from threading import BoundedSemaphore
from threading import Lock
from pylint import epylint as linter
import file_handling
files_to_lint = list(file_handling.find_files(".", file_extensions=["py"]))
score_regex = r"(?<=Your code has been rated at ).+(?=\s\(previous)"
try:
print("Preparing...")
linter.py_run(files_to_lint[0], return_std=True)
# We just want to run one file, so we have the "(previous)" line, which the regex depends on
except:
pass
threads = []
semaphore = BoundedSemaphore(os.cpu_count())
lock = Lock()
ratings_list = []
def lint_file(file_path):
semaphore.acquire()
print("Linting:", file_path)
(pylint_stdout, pylint_stderr) = (linter.py_run(f"{file} --disable=all", return_std=True))
output = (pylint_stdout.read())
rating_line = [x for x in output.split("\n") if "Your code has been rated at" in x]
if rating_line:
rating_line = rating_line[0].strip()
else:
return
extracted_rating = re.findall(score_regex, rating_line)
if extracted_rating:
lock.acquire()
ratings_list.append(extracted_rating[0])
lock.release()
semaphore.release()
for file in files_to_lint:
threads.append(Thread(target=lint_file, args=(file,)))
print(f"Starting with {os.cpu_count()} threads...")
for thread in threads:
thread.start()
for thread in threads:
try:
thread.join()
except RuntimeError:
pass
ratings_list = [float(x.split("/")[0]) for x in ratings_list]
final_rating = round(sum(ratings_list) / len(ratings_list), 2)
print(f"\n\033[1;46mCode Rating: {final_rating}/10\033[0m")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.